diff --git a/Changelog b/Changelog index f30f398a9f..2ccd2645fc 100644 --- a/Changelog +++ b/Changelog @@ -5,8 +5,8 @@ version : - v360 filter - Intel QSV-accelerated MJPEG decoding - Intel QSV-accelerated VP9 decoding -- support for TrueHD in mp4 -- Supoort AMD AMF encoder on Linux (via Vulkan) +- Support for TrueHD in mp4 +- Support AMD AMF encoder on Linux (via Vulkan) - IMM5 video decoder - ZeroMQ protocol - support Sipro ACELP.KELVIN decoding @@ -27,6 +27,9 @@ version : - axcorrelate filter - mvdv decoder - mvha decoder +- MPEG-H 3D Audio support in mp4 +- thistogram filter +- freezeframes filter version 4.2: diff --git a/LICENSE.md b/LICENSE.md index e2ab65cf5e..613070e1b6 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -35,7 +35,6 @@ Specifically, the GPL parts of FFmpeg are: - `vf_eq.c` - `vf_find_rect.c` - `vf_fspp.c` - - `vf_geq.c` - `vf_histeq.c` - `vf_hqdn3d.c` - `vf_kerndeint.c` diff --git a/compat/avisynth/avisynth_c.h b/compat/avisynth/avisynth_c.h index 8d17125adc..9ff9321552 100644 --- a/compat/avisynth/avisynth_c.h +++ b/compat/avisynth/avisynth_c.h @@ -1096,7 +1096,7 @@ AVSC_INLINE AVS_Library * avs_load_library() { AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library)); if (library == NULL) return NULL; - library->handle = LoadLibrary("avisynth"); + library->handle = LoadLibraryA("avisynth"); if (library->handle == NULL) goto fail; diff --git a/configure b/configure index 42e7df3941..46f2038627 100755 --- a/configure +++ b/configure @@ -482,6 +482,7 @@ Developer options (useful when working on FFmpeg itself): --ignore-tests=TESTS comma-separated list (without "fate-" prefix in the name) of tests whose result is ignored --enable-linux-perf enable Linux Performance Monitor API + --disable-large-tests disable tests that use a large amount of memory NOTE: Object files are built at the place where configure is launched. EOF @@ -1931,6 +1932,7 @@ CONFIG_LIST=" $SUBSYSTEM_LIST autodetect fontconfig + large_tests linux_perf memory_poisoning neon_clobber_test @@ -2194,6 +2196,7 @@ SYSTEM_FUNCS=" getaddrinfo gethrtime getopt + GetModuleHandle GetProcessAffinityMask GetProcessMemoryInfo GetProcessTimes @@ -2223,6 +2226,7 @@ SYSTEM_FUNCS=" SecItemImport SetConsoleTextAttribute SetConsoleCtrlHandler + SetDllDirectory setmode setrlimit Sleep @@ -3499,7 +3503,6 @@ freezedetect_filter_select="scene_sad" frei0r_filter_deps="frei0r libdl" frei0r_src_filter_deps="frei0r libdl" fspp_filter_deps="gpl" -geq_filter_deps="gpl" headphone_filter_select="fft" histeq_filter_deps="gpl" hqdn3d_filter_deps="gpl" @@ -3576,6 +3579,7 @@ tinterlace_filter_deps="gpl" tinterlace_merge_test_deps="tinterlace_filter" tinterlace_pad_test_deps="tinterlace_filter" tonemap_filter_deps="const_nan" +tonemap_vaapi_filter_deps="vaapi VAProcFilterParameterBufferHDRToneMapping" tonemap_opencl_filter_deps="opencl const_nan" transpose_opencl_filter_deps="opencl" transpose_vaapi_filter_deps="vaapi VAProcPipelineCaps_rotation_flags" @@ -3724,6 +3728,7 @@ enable asm enable debug enable doc enable faan faandct faanidct +enable large_tests enable optimizations enable runtime_cpudetect enable safe_bitstream_reader @@ -6032,6 +6037,7 @@ check_func_headers mach/mach_time.h mach_absolute_time check_func_headers stdlib.h getenv check_func_headers sys/stat.h lstat +check_func_headers windows.h GetModuleHandle check_func_headers windows.h GetProcessAffinityMask check_func_headers windows.h GetProcessTimes check_func_headers windows.h GetSystemTimeAsFileTime @@ -6040,6 +6046,7 @@ check_func_headers windows.h MapViewOfFile check_func_headers windows.h PeekNamedPipe check_func_headers windows.h SetConsoleTextAttribute check_func_headers windows.h SetConsoleCtrlHandler +check_func_headers windows.h SetDllDirectory check_func_headers windows.h Sleep check_func_headers windows.h VirtualAlloc check_func_headers glob.h glob @@ -6577,6 +6584,7 @@ if enabled vaapi; then check_type "va/va.h va/va_dec_hevc.h" "VAPictureParameterBufferHEVC" check_struct "va/va.h" "VADecPictureParameterBufferVP9" bit_depth + check_type "va/va.h va/va_vpp.h" "VAProcFilterParameterBufferHDRToneMapping" check_struct "va/va.h va/va_vpp.h" "VAProcPipelineCaps" rotation_flags check_type "va/va.h va/va_enc_hevc.h" "VAEncPictureParameterBufferHEVC" check_type "va/va.h va/va_enc_jpeg.h" "VAEncPictureParameterBufferJPEG" @@ -7448,7 +7456,7 @@ cat > $TMPH <,streams=v". Please note that descriptor string should be a self-closing xml tag. +seg_duration, frag_duration and frag_type override the global option values for each adaptation set. +For example, -adaptation_sets "id=0,seg_duration=2,frag_duration=1,frag_type=duration,streams=v id=1,seg_duration=2,frag_type=none,streams=a" @item timeout @var{timeout} Set timeout for socket I/O operations. Applicable only for HTTP output. @item index_correction @var{index_correction} @@ -326,9 +331,26 @@ This option will also try to comply with the above open spec, till Apple's spec Applicable only when @var{streaming} and @var{hls_playlist} options are enabled. This is an experimental feature. +@item ldash @var{ldash} +Enable Low-latency Dash by constraining the presence and values of some elements. + @item master_m3u8_publish_rate @var{master_m3u8_publish_rate} Publish master playlist repeatedly every after specified number of segment intervals. +@item -write_prft @var{write_prft} +Write Producer Reference Time elements on supported streams. This also enables writing +prft boxes in the underlying muxer. Applicable only when the @var{utc_url} option is enabled. + +@item -mpd_profile @var{mpd_profile} +Set one or more manifest profiles. + +@item -http_opts @var{http_opts} +List of options to pass to the underlying HTTP protocol. Applicable only for HTTP output. + +@item -target_latency @var{target_latency} +Set an intended target latency in seconds (fractional value can be set) for serving. Applicable only when @var{streaming} and @var{write_prft} options are enabled. +This is an informative fields clients can use to measure the latency of the service. + @end table @anchor{framecrc} @@ -1169,6 +1191,32 @@ The pattern "img%%-%d.jpg" will specify a sequence of filenames of the form @file{img%-1.jpg}, @file{img%-2.jpg}, ..., @file{img%-10.jpg}, etc. +The image muxer supports the .Y.U.V image file format. This format is +special in that that each image frame consists of three files, for +each of the YUV420P components. To read or write this image file format, +specify the name of the '.Y' file. The muxer will automatically open the +'.U' and '.V' files as required. + +@subsection Options + +@table @option +@item frame_pts +If set to 1, expand the filename with pts from pkt->pts. +Default value is 0. + +@item start_number +Start the sequence from the specified number. Default value is 1. + +@item update +If set to 1, the filename will always be interpreted as just a +filename, not a pattern, and the corresponding file will be continuously +overwritten with new images. Default value is 0. + +@item strftime +If set to 1, expand the filename with date and time information from +@code{strftime()}. Default value is 0. +@end table + @subsection Examples The following example shows how to use @command{ffmpeg} for creating a @@ -1209,32 +1257,6 @@ You can set the file name with current frame's PTS: ffmpeg -f v4l2 -r 1 -i /dev/video0 -copyts -f image2 -frame_pts true %d.jpg" @end example -@subsection Options - -@table @option -@item frame_pts -If set to 1, expand the filename with pts from pkt->pts. -Default value is 0. - -@item start_number -Start the sequence from the specified number. Default value is 1. - -@item update -If set to 1, the filename will always be interpreted as just a -filename, not a pattern, and the corresponding file will be continuously -overwritten with new images. Default value is 0. - -@item strftime -If set to 1, expand the filename with date and time information from -@code{strftime()}. Default value is 0. -@end table - -The image muxer supports the .Y.U.V image file format. This format is -special in that that each image frame consists of three files, for -each of the YUV420P components. To read or write this image file format, -specify the name of the '.Y' file. The muxer will automatically open the -'.U' and '.V' files as required. - @section matroska Matroska container muxer. diff --git a/doc/outdevs.texi b/doc/outdevs.texi index c96d2d0e43..27f543fa1a 100644 --- a/doc/outdevs.texi +++ b/doc/outdevs.texi @@ -140,8 +140,8 @@ device with @command{-list_formats 1}. Audio sample rate is always 48 kHz. @item list_devices If set to @option{true}, print a list of devices and exit. -Defaults to @option{false}. Alternatively you can use the @code{-sinks} -option of ffmpeg to list the available output devices. +Defaults to @option{false}. This option is deprecated, please use the +@code{-sinks} option of ffmpeg to list the available output devices. @item list_formats If set to @option{true}, print a list of supported formats and exit. @@ -168,7 +168,7 @@ Defaults to @samp{unset}. @item List output devices: @example -ffmpeg -i test.avi -f decklink -list_devices 1 dummy +ffmpeg -sinks decklink @end example @item diff --git a/fftools/cmdutils.c b/fftools/cmdutils.c index d7d17319ee..e5b89a443b 100644 --- a/fftools/cmdutils.c +++ b/fftools/cmdutils.c @@ -119,7 +119,7 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v void init_dynload(void) { -#ifdef _WIN32 +#if HAVE_SETDLLDIRECTORY && defined(_WIN32) /* Calling SetDllDirectory with the empty string (but not NULL) removes the * current working directory from the DLL search path as a security pre-caution. */ SetDllDirectory(""); @@ -182,7 +182,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags, first = 1; for (po = options; po->name; po++) { - char buf[64]; + char buf[128]; if (((po->flags & req_flags) != req_flags) || (alt_flags && !(po->flags & alt_flags)) || @@ -2039,7 +2039,7 @@ FILE *get_preset_file(char *filename, size_t filename_size, av_strlcpy(filename, preset_name, filename_size); f = fopen(filename, "r"); } else { -#ifdef _WIN32 +#if HAVE_GETMODULEHANDLE && defined(_WIN32) char datadir[MAX_PATH], *ls; base[2] = NULL; diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c index 466a213651..f519425c62 100644 --- a/fftools/ffmpeg.c +++ b/fftools/ffmpeg.c @@ -1268,7 +1268,8 @@ static void do_video_out(OutputFile *of, ost->forced_keyframes_expr_const_values[FKF_N] += 1; } else if ( ost->forced_keyframes && !strncmp(ost->forced_keyframes, "source", 6) - && in_picture->key_frame==1) { + && in_picture->key_frame==1 + && !i) { forced_keyframe = 1; } @@ -3404,10 +3405,6 @@ static int init_output_stream_encode(OutputStream *ost) av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n" "Please consider specifying a lower framerate, a different muxer or -vsync 2\n"); } - for (j = 0; j < ost->forced_kf_count; j++) - ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j], - AV_TIME_BASE_Q, - enc_ctx->time_base); enc_ctx->width = av_buffersink_get_w(ost->filter->filter); enc_ctx->height = av_buffersink_get_h(ost->filter->filter); @@ -3599,12 +3596,14 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) int i; for (i = 0; i < ist->st->nb_side_data; i++) { AVPacketSideData *sd = &ist->st->side_data[i]; - uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); - if (!dst) - return AVERROR(ENOMEM); - memcpy(dst, sd->data, sd->size); - if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) - av_display_rotation_set((uint32_t *)dst, 0); + if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) { + uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size); + if (!dst) + return AVERROR(ENOMEM); + memcpy(dst, sd->data, sd->size); + if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX) + av_display_rotation_set((uint32_t *)dst, 0); + } } } diff --git a/fftools/ffmpeg_filter.c b/fftools/ffmpeg_filter.c index 72838de1e2..40cc4c191c 100644 --- a/fftools/ffmpeg_filter.c +++ b/fftools/ffmpeg_filter.c @@ -786,10 +786,9 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC); av_bprintf(&args, "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:" - "pixel_aspect=%d/%d:sws_param=flags=%d", + "pixel_aspect=%d/%d", ifilter->width, ifilter->height, ifilter->format, - tb.num, tb.den, sar.num, sar.den, - SWS_BILINEAR + ((ist->dec_ctx->flags&AV_CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0)); + tb.num, tb.den, sar.num, sar.den); if (fr.num && fr.den) av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den); snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index, diff --git a/fftools/ffmpeg_opt.c b/fftools/ffmpeg_opt.c index e3bc419123..aa6ede5e88 100644 --- a/fftools/ffmpeg_opt.c +++ b/fftools/ffmpeg_opt.c @@ -3202,7 +3202,7 @@ void show_help_default(const char *opt, const char *arg) OPT_EXIT, 0, 0); show_help_options(options, "Global options (affect whole program " - "instead of just one file:", + "instead of just one file):", 0, per_file | OPT_EXIT | OPT_EXPERT, 0); if (show_advanced) show_help_options(options, "Advanced global options:", OPT_EXPERT, diff --git a/fftools/ffprobe.c b/fftools/ffprobe.c index a95d74346d..b619c1f34e 100644 --- a/fftools/ffprobe.c +++ b/fftools/ffprobe.c @@ -254,6 +254,7 @@ static const OptionDef *options; /* FFprobe context */ static const char *input_filename; +static const char *print_input_filename; static AVInputFormat *iformat = NULL; static struct AVHashContext *hash; @@ -2836,7 +2837,8 @@ static void show_error(WriterContext *w, int err) writer_print_section_footer(w); } -static int open_input_file(InputFile *ifile, const char *filename) +static int open_input_file(InputFile *ifile, const char *filename, + const char *print_filename) { int err, i; AVFormatContext *fmt_ctx = NULL; @@ -2858,6 +2860,10 @@ static int open_input_file(InputFile *ifile, const char *filename) print_error(filename, err); return err; } + if (print_filename) { + av_freep(&fmt_ctx->url); + fmt_ctx->url = av_strdup(print_filename); + } ifile->fmt_ctx = fmt_ctx; if (scan_all_pmts_set) av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE); @@ -2971,7 +2977,8 @@ static void close_input_file(InputFile *ifile) avformat_close_input(&ifile->fmt_ctx); } -static int probe_file(WriterContext *wctx, const char *filename) +static int probe_file(WriterContext *wctx, const char *filename, + const char *print_filename) { InputFile ifile = { 0 }; int ret, i; @@ -2980,7 +2987,7 @@ static int probe_file(WriterContext *wctx, const char *filename) do_read_frames = do_show_frames || do_count_frames; do_read_packets = do_show_packets || do_count_packets; - ret = open_input_file(&ifile, filename); + ret = open_input_file(&ifile, filename, print_filename); if (ret < 0) goto end; @@ -3286,6 +3293,12 @@ static int opt_input_file_i(void *optctx, const char *opt, const char *arg) return 0; } +static int opt_print_filename(void *optctx, const char *opt, const char *arg) +{ + print_input_filename = arg; + return 0; +} + void show_help_default(const char *opt, const char *arg) { av_log_set_callback(log_callback_help); @@ -3544,6 +3557,7 @@ static const OptionDef real_options[] = { { "read_intervals", HAS_ARG, {.func_arg = opt_read_intervals}, "set read intervals", "read_intervals" }, { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {.func_arg = opt_default}, "generic catch all option", "" }, { "i", HAS_ARG, {.func_arg = opt_input_file_i}, "read specified file", "input_file"}, + { "print_filename", HAS_ARG, {.func_arg = opt_print_filename}, "override the printed input filename", "print_file"}, { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info }, "read and decode the streams to fill missing information with heuristics" }, { NULL, }, @@ -3692,7 +3706,7 @@ int main(int argc, char **argv) av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name); ret = AVERROR(EINVAL); } else if (input_filename) { - ret = probe_file(wctx, input_filename); + ret = probe_file(wctx, input_filename, print_input_filename); if (ret < 0 && do_show_error) show_error(wctx, ret); } diff --git a/libavcodec/adpcm.c b/libavcodec/adpcm.c index edf7052636..7b5b3d9698 100644 --- a/libavcodec/adpcm.c +++ b/libavcodec/adpcm.c @@ -140,8 +140,8 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx) break; case AV_CODEC_ID_ADPCM_IMA_APC: if (avctx->extradata && avctx->extradata_size >= 8) { - c->status[0].predictor = AV_RL32(avctx->extradata); - c->status[1].predictor = AV_RL32(avctx->extradata + 4); + c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18); + c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18); } break; case AV_CODEC_ID_ADPCM_IMA_WS: @@ -441,7 +441,7 @@ static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, d = in[16+i+j*4]; t = sign_extend(d, 4); - s = ( t<>6); + s = t*(1<>6); s_2 = s_1; s_1 = av_clip_int16(s); out0[j] = s_1; @@ -468,7 +468,7 @@ static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1, d = in[16+i+j*4]; t = sign_extend(d >> 4, 4); - s = ( t<>6); + s = t*(1<>6); s_2 = s_1; s_1 = av_clip_int16(s); out1[j] = s_1; @@ -1233,7 +1233,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data, } for (i=0; i<=st; i++) { c->status[i].predictor = bytestream2_get_le32u(&gb); - if (FFABS(c->status[i].predictor) > (1<<16)) + if (FFABS((int64_t)c->status[i].predictor) > (1<<16)) return AVERROR_INVALIDDATA; } diff --git a/libavcodec/agm.c b/libavcodec/agm.c index 628f324913..80f4697ee5 100644 --- a/libavcodec/agm.c +++ b/libavcodec/agm.c @@ -423,8 +423,8 @@ static int decode_inter_plane(AGMContext *s, GetBitContext *gb, int size, int map = s->map[x]; if (orig_mv_x >= -32) { - if (y * 8 + mv_y < 0 || y * 8 + mv_y >= h || - x * 8 + mv_x < 0 || x * 8 + mv_x >= w) + if (y * 8 + mv_y < 0 || y * 8 + mv_y + 8 >= h || + x * 8 + mv_x < 0 || x * 8 + mv_x + 8 >= w) return AVERROR_INVALIDDATA; copy_block8(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8, diff --git a/libavcodec/alac.c b/libavcodec/alac.c index d08c946249..ea5ab182f9 100644 --- a/libavcodec/alac.c +++ b/libavcodec/alac.c @@ -228,7 +228,7 @@ static void lpc_prediction(int32_t *error_buffer, uint32_t *buffer_out, sign = sign_only(val) * error_sign; lpc_coefs[j] -= sign; val *= (unsigned)sign; - error_val -= (val >> lpc_quant) * (j + 1); + error_val -= (val >> lpc_quant) * (j + 1U); } } } diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c index c33edf23c9..ec7366144f 100644 --- a/libavcodec/allcodecs.c +++ b/libavcodec/allcodecs.c @@ -776,6 +776,7 @@ extern AVCodec ff_mpeg2_qsv_encoder; extern AVCodec ff_mpeg2_vaapi_encoder; extern AVCodec ff_mpeg4_cuvid_decoder; extern AVCodec ff_mpeg4_mediacodec_decoder; +extern AVCodec ff_mpeg4_omx_encoder; extern AVCodec ff_mpeg4_v4l2m2m_encoder; extern AVCodec ff_vc1_cuvid_decoder; extern AVCodec ff_vp8_cuvid_decoder; diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c index f87131ab4f..1b23e54153 100644 --- a/libavcodec/apedec.c +++ b/libavcodec/apedec.c @@ -496,6 +496,7 @@ static inline int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb, x = (overflow << rice->k) + get_bits(gb, rice->k); } else { av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %"PRIu32"\n", rice->k); + ctx->error = 1; return AVERROR_INVALIDDATA; } rice->ksum += x - (rice->ksum + 8 >> 4); diff --git a/libavcodec/ass.c b/libavcodec/ass.c index b4f081c819..a51673fb4e 100644 --- a/libavcodec/ass.c +++ b/libavcodec/ass.c @@ -105,7 +105,7 @@ int ff_ass_add_rect(AVSubtitle *sub, const char *dialog, char *ass_str; AVSubtitleRect **rects; - rects = av_realloc_array(sub->rects, (sub->num_rects+1), sizeof(*sub->rects)); + rects = av_realloc_array(sub->rects, sub->num_rects+1, sizeof(*sub->rects)); if (!rects) return AVERROR(ENOMEM); sub->rects = rects; diff --git a/libavcodec/atrac9dec.c b/libavcodec/atrac9dec.c index 5415d1348e..075d610e75 100644 --- a/libavcodec/atrac9dec.c +++ b/libavcodec/atrac9dec.c @@ -223,8 +223,18 @@ static inline int parse_band_ext(ATRAC9Context *s, ATRAC9BlockData *b, b->channel[0].band_ext = get_bits(gb, 2); b->channel[0].band_ext = ext_band > 2 ? b->channel[0].band_ext : 4; - if (!get_bits(gb, 5)) + if (!get_bits(gb, 5)) { + for (int i = 0; i <= stereo; i++) { + ATRAC9ChannelData *c = &b->channel[i]; + const int count = at9_tab_band_ext_cnt[c->band_ext][ext_band]; + for (int j = 0; j < count; j++) { + int len = at9_tab_band_ext_lengths[c->band_ext][ext_band][j]; + c->band_ext_data[j] = av_clip_uintp2_c(c->band_ext_data[j], len); + } + } + return 0; + } for (int i = 0; i <= stereo; i++) { ATRAC9ChannelData *c = &b->channel[i]; diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 735a3c2d76..4b0e7c0853 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -656,6 +656,7 @@ enum AVCodecID { AV_CODEC_ID_ATRAC9, AV_CODEC_ID_HCOM, AV_CODEC_ID_ACELP_KELVIN, + AV_CODEC_ID_MPEGH_3D_AUDIO, /* subtitle codecs */ AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. @@ -1175,6 +1176,11 @@ typedef struct AVCPBProperties { uint64_t vbv_delay; } AVCPBProperties; +typedef struct AVProducerReferenceTime { + int64_t wallclock; + int flags; +} AVProducerReferenceTime; + /** * The decoder will keep a reference to the frame and may reuse it later. */ @@ -1409,6 +1415,11 @@ enum AVPacketSideDataType { */ AV_PKT_DATA_AFD, + /** + * Producer Reference Time data corresponding to the AVProducerReferenceTime struct. + */ + AV_PKT_DATA_PRFT, + /** * The number of side data types. * This is not part of the public API/ABI in the sense that it may diff --git a/libavcodec/avpacket.c b/libavcodec/avpacket.c index 858f827a0a..74845efcd2 100644 --- a/libavcodec/avpacket.c +++ b/libavcodec/avpacket.c @@ -741,3 +741,25 @@ int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, i return 0; } + +int ff_side_data_set_prft(AVPacket *pkt, int64_t timestamp) +{ + AVProducerReferenceTime *prft; + uint8_t *side_data; + int side_data_size; + + side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_PRFT, &side_data_size); + if (!side_data) { + side_data_size = sizeof(AVProducerReferenceTime); + side_data = av_packet_new_side_data(pkt, AV_PKT_DATA_PRFT, side_data_size); + } + + if (!side_data || side_data_size < sizeof(AVProducerReferenceTime)) + return AVERROR(ENOMEM); + + prft = (AVProducerReferenceTime *)side_data; + prft->wallclock = timestamp; + prft->flags = 0; + + return 0; +} diff --git a/libavcodec/bgmc.c b/libavcodec/bgmc.c index 2d59aa37ad..361f7c52e6 100644 --- a/libavcodec/bgmc.c +++ b/libavcodec/bgmc.c @@ -493,7 +493,7 @@ int ff_bgmc_decode_init(GetBitContext *gb, unsigned int *h, *h = TOP_VALUE; *l = 0; - *v = get_bits_long(gb, VALUE_BITS); + *v = get_bits(gb, VALUE_BITS); return 0; } diff --git a/libavcodec/binkaudio.c b/libavcodec/binkaudio.c index 2384ebf312..64a08b8608 100644 --- a/libavcodec/binkaudio.c +++ b/libavcodec/binkaudio.c @@ -153,7 +153,7 @@ static av_cold int decode_init(AVCodecContext *avctx) static float get_float(GetBitContext *gb) { int power = get_bits(gb, 5); - float f = ldexpf(get_bits_long(gb, 23), power - 23); + float f = ldexpf(get_bits(gb, 23), power - 23); if (get_bits1(gb)) f = -f; return f; diff --git a/libavcodec/bsf.c b/libavcodec/bsf.c index c1653cddb0..9dbf6a636d 100644 --- a/libavcodec/bsf.c +++ b/libavcodec/bsf.c @@ -82,6 +82,7 @@ const AVClass *av_bsf_get_class(void) int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx) { AVBSFContext *ctx; + AVBSFInternal *bsfi; int ret; ctx = av_mallocz(sizeof(*ctx)); @@ -98,14 +99,15 @@ int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx) goto fail; } - ctx->internal = av_mallocz(sizeof(*ctx->internal)); - if (!ctx->internal) { + bsfi = av_mallocz(sizeof(*bsfi)); + if (!bsfi) { ret = AVERROR(ENOMEM); goto fail; } + ctx->internal = bsfi; - ctx->internal->buffer_pkt = av_packet_alloc(); - if (!ctx->internal->buffer_pkt) { + bsfi->buffer_pkt = av_packet_alloc(); + if (!bsfi->buffer_pkt) { ret = AVERROR(ENOMEM); goto fail; } @@ -175,9 +177,11 @@ int av_bsf_init(AVBSFContext *ctx) void av_bsf_flush(AVBSFContext *ctx) { - ctx->internal->eof = 0; + AVBSFInternal *bsfi = ctx->internal; - av_packet_unref(ctx->internal->buffer_pkt); + bsfi->eof = 0; + + av_packet_unref(bsfi->buffer_pkt); if (ctx->filter->flush) ctx->filter->flush(ctx); @@ -185,26 +189,27 @@ void av_bsf_flush(AVBSFContext *ctx) int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt) { + AVBSFInternal *bsfi = ctx->internal; int ret; if (!pkt || (!pkt->data && !pkt->side_data_elems)) { - ctx->internal->eof = 1; + bsfi->eof = 1; return 0; } - if (ctx->internal->eof) { + if (bsfi->eof) { av_log(ctx, AV_LOG_ERROR, "A non-NULL packet sent after an EOF.\n"); return AVERROR(EINVAL); } - if (ctx->internal->buffer_pkt->data || - ctx->internal->buffer_pkt->side_data_elems) + if (bsfi->buffer_pkt->data || + bsfi->buffer_pkt->side_data_elems) return AVERROR(EAGAIN); ret = av_packet_make_refcounted(pkt); if (ret < 0) return ret; - av_packet_move_ref(ctx->internal->buffer_pkt, pkt); + av_packet_move_ref(bsfi->buffer_pkt, pkt); return 0; } @@ -216,38 +221,38 @@ int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt) int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt) { - AVBSFInternal *in = ctx->internal; + AVBSFInternal *bsfi = ctx->internal; AVPacket *tmp_pkt; - if (in->eof) + if (bsfi->eof) return AVERROR_EOF; - if (!ctx->internal->buffer_pkt->data && - !ctx->internal->buffer_pkt->side_data_elems) + if (!bsfi->buffer_pkt->data && + !bsfi->buffer_pkt->side_data_elems) return AVERROR(EAGAIN); tmp_pkt = av_packet_alloc(); if (!tmp_pkt) return AVERROR(ENOMEM); - *pkt = ctx->internal->buffer_pkt; - ctx->internal->buffer_pkt = tmp_pkt; + *pkt = bsfi->buffer_pkt; + bsfi->buffer_pkt = tmp_pkt; return 0; } int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt) { - AVBSFInternal *in = ctx->internal; + AVBSFInternal *bsfi = ctx->internal; - if (in->eof) + if (bsfi->eof) return AVERROR_EOF; - if (!ctx->internal->buffer_pkt->data && - !ctx->internal->buffer_pkt->side_data_elems) + if (!bsfi->buffer_pkt->data && + !bsfi->buffer_pkt->side_data_elems) return AVERROR(EAGAIN); - av_packet_move_ref(pkt, ctx->internal->buffer_pkt); + av_packet_move_ref(pkt, bsfi->buffer_pkt); return 0; } @@ -517,8 +522,8 @@ static int bsf_parse_single(const char *str, AVBSFList *bsf_lst) ret = av_bsf_list_append2(bsf_lst, bsf_name, &bsf_options); - av_dict_free(&bsf_options); end: + av_dict_free(&bsf_options); av_free(buf); return ret; } diff --git a/libavcodec/cbs_av1.h b/libavcodec/cbs_av1.h index 9eaf5c4de6..fdc629b00c 100644 --- a/libavcodec/cbs_av1.h +++ b/libavcodec/cbs_av1.h @@ -105,7 +105,7 @@ typedef struct AV1RawSequenceHeader { uint8_t use_128x128_superblock; uint8_t enable_filter_intra; uint8_t enable_intra_edge_filter; - uint8_t enable_intraintra_compound; + uint8_t enable_interintra_compound; uint8_t enable_masked_compound; uint8_t enable_warped_motion; uint8_t enable_dual_filter; @@ -256,20 +256,20 @@ typedef struct AV1RawFrameHeader { uint8_t update_grain; uint8_t film_grain_params_ref_idx; uint8_t num_y_points; - uint8_t point_y_value[16]; - uint8_t point_y_scaling[16]; + uint8_t point_y_value[14]; + uint8_t point_y_scaling[14]; uint8_t chroma_scaling_from_luma; uint8_t num_cb_points; - uint8_t point_cb_value[16]; - uint8_t point_cb_scaling[16]; + uint8_t point_cb_value[10]; + uint8_t point_cb_scaling[10]; uint8_t num_cr_points; - uint8_t point_cr_value[16]; - uint8_t point_cr_scaling[16]; + uint8_t point_cr_value[10]; + uint8_t point_cr_scaling[10]; uint8_t grain_scaling_minus_8; uint8_t ar_coeff_lag; uint8_t ar_coeffs_y_plus_128[24]; - uint8_t ar_coeffs_cb_plus_128[24]; - uint8_t ar_coeffs_cr_plus_128[24]; + uint8_t ar_coeffs_cb_plus_128[25]; + uint8_t ar_coeffs_cr_plus_128[25]; uint8_t ar_coeff_shift_minus_6; uint8_t grain_scale_shift; uint8_t cb_mult; diff --git a/libavcodec/cbs_av1_syntax_template.c b/libavcodec/cbs_av1_syntax_template.c index 6c4816f964..f830fb1517 100644 --- a/libavcodec/cbs_av1_syntax_template.c +++ b/libavcodec/cbs_av1_syntax_template.c @@ -268,7 +268,7 @@ static int FUNC(sequence_header_obu)(CodedBitstreamContext *ctx, RWContext *rw, flag(enable_intra_edge_filter); if (current->reduced_still_picture_header) { - infer(enable_intraintra_compound, 0); + infer(enable_interintra_compound, 0); infer(enable_masked_compound, 0); infer(enable_warped_motion, 0); infer(enable_dual_filter, 0); @@ -281,7 +281,7 @@ static int FUNC(sequence_header_obu)(CodedBitstreamContext *ctx, RWContext *rw, infer(seq_force_integer_mv, AV1_SELECT_INTEGER_MV); } else { - flag(enable_intraintra_compound); + flag(enable_interintra_compound); flag(enable_masked_compound); flag(enable_warped_motion); flag(enable_dual_filter); @@ -1155,7 +1155,7 @@ static int FUNC(film_grain_params)(CodedBitstreamContext *ctx, RWContext *rw, return 0; } - fb(4, num_y_points); + fc(4, num_y_points, 0, 14); for (i = 0; i < current->num_y_points; i++) { fbs(8, point_y_value[i], 1, i); fbs(8, point_y_scaling[i], 1, i); @@ -1174,12 +1174,12 @@ static int FUNC(film_grain_params)(CodedBitstreamContext *ctx, RWContext *rw, infer(num_cb_points, 0); infer(num_cr_points, 0); } else { - fb(4, num_cb_points); + fc(4, num_cb_points, 0, 10); for (i = 0; i < current->num_cb_points; i++) { fbs(8, point_cb_value[i], 1, i); fbs(8, point_cb_scaling[i], 1, i); } - fb(4, num_cr_points); + fc(4, num_cr_points, 0, 10); for (i = 0; i < current->num_cr_points; i++) { fbs(8, point_cr_value[i], 1, i); fbs(8, point_cr_scaling[i], 1, i); diff --git a/libavcodec/cbs_h2645.c b/libavcodec/cbs_h2645.c index 5f71d80584..ad8afa6d4a 100644 --- a/libavcodec/cbs_h2645.c +++ b/libavcodec/cbs_h2645.c @@ -568,7 +568,10 @@ static int cbs_h2645_fragment_add_nals(CodedBitstreamContext *ctx, // Remove trailing zeroes. while (size > 0 && nal->data[size - 1] == 0) --size; - av_assert0(size > 0); + if (size == 0) { + av_log(ctx->log_ctx, AV_LOG_VERBOSE, "Discarding empty 0 NAL unit\n"); + continue; + } ref = (nal->data == nal->raw_data) ? frag->data_ref : packet->rbsp.rbsp_buffer_ref; diff --git a/libavcodec/cbs_h264_syntax_template.c b/libavcodec/cbs_h264_syntax_template.c index 1671a15d33..878d348b94 100644 --- a/libavcodec/cbs_h264_syntax_template.c +++ b/libavcodec/cbs_h264_syntax_template.c @@ -954,6 +954,7 @@ static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw, current->payload[k].payload_type = payload_type; current->payload[k].payload_size = payload_size; + current->payload_count++; CHECK(FUNC(sei_payload)(ctx, rw, ¤t->payload[k])); if (!cbs_h2645_read_more_rbsp_data(rw)) @@ -964,7 +965,6 @@ static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw, "SEI message: found %d.\n", k); return AVERROR_INVALIDDATA; } - current->payload_count = k + 1; #else for (k = 0; k < current->payload_count; k++) { PutBitContext start_state; diff --git a/libavcodec/cbs_h265_syntax_template.c b/libavcodec/cbs_h265_syntax_template.c index 54570929ec..15114548c6 100644 --- a/libavcodec/cbs_h265_syntax_template.c +++ b/libavcodec/cbs_h265_syntax_template.c @@ -2184,6 +2184,7 @@ static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw, current->payload[k].payload_type = payload_type; current->payload[k].payload_size = payload_size; + current->payload_count++; CHECK(FUNC(sei_payload)(ctx, rw, ¤t->payload[k], prefix)); if (!cbs_h2645_read_more_rbsp_data(rw)) @@ -2194,7 +2195,6 @@ static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw, "SEI message: found %d.\n", k); return AVERROR_INVALIDDATA; } - current->payload_count = k + 1; #else for (k = 0; k < current->payload_count; k++) { PutBitContext start_state; diff --git a/libavcodec/cbs_vp9.c b/libavcodec/cbs_vp9.c index 42e4dcf5ac..ec82f11c76 100644 --- a/libavcodec/cbs_vp9.c +++ b/libavcodec/cbs_vp9.c @@ -416,6 +416,9 @@ static int cbs_vp9_split_fragment(CodedBitstreamContext *ctx, uint8_t superframe_header; int err; + if (frag->data_size == 0) + return AVERROR_INVALIDDATA; + // Last byte in the packet. superframe_header = frag->data[frag->data_size - 1]; @@ -428,6 +431,9 @@ static int cbs_vp9_split_fragment(CodedBitstreamContext *ctx, index_size = 2 + (((superframe_header & 0x18) >> 3) + 1) * ((superframe_header & 0x07) + 1); + if (index_size > frag->data_size) + return AVERROR_INVALIDDATA; + err = init_get_bits(&gbc, frag->data + frag->data_size - index_size, 8 * index_size); if (err < 0) diff --git a/libavcodec/codec_desc.c b/libavcodec/codec_desc.c index 570bd2f382..529b838e5b 100644 --- a/libavcodec/codec_desc.c +++ b/libavcodec/codec_desc.c @@ -1754,252 +1754,252 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s16le", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16-bit little-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_S16BE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s16be", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16-bit big-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_U16LE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_u16le", .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 16-bit little-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_U16BE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_u16be", .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 16-bit big-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_S8, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s8", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_U8, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_u8", .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 8-bit"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_MULAW, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_mulaw", .long_name = NULL_IF_CONFIG_SMALL("PCM mu-law / G.711 mu-law"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_PCM_ALAW, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_alaw", .long_name = NULL_IF_CONFIG_SMALL("PCM A-law / G.711 A-law"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_PCM_S32LE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s32le", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 32-bit little-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_S32BE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s32be", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 32-bit big-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_U32LE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_u32le", .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 32-bit little-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_U32BE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_u32be", .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 32-bit big-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_S24LE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s24le", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 24-bit little-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_S24BE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s24be", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 24-bit big-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_U24LE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_u24le", .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 24-bit little-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_U24BE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_u24be", .long_name = NULL_IF_CONFIG_SMALL("PCM unsigned 24-bit big-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_S24DAUD, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s24daud", .long_name = NULL_IF_CONFIG_SMALL("PCM D-Cinema audio signed 24-bit"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_ZORK, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_zork", .long_name = NULL_IF_CONFIG_SMALL("PCM Zork"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_PCM_S16LE_PLANAR, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s16le_planar", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16-bit little-endian planar"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_DVD, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_dvd", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 20|24-bit big-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_F32BE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_f32be", .long_name = NULL_IF_CONFIG_SMALL("PCM 32-bit floating point big-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_F32LE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_f32le", .long_name = NULL_IF_CONFIG_SMALL("PCM 32-bit floating point little-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_F64BE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_f64be", .long_name = NULL_IF_CONFIG_SMALL("PCM 64-bit floating point big-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_F64LE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_f64le", .long_name = NULL_IF_CONFIG_SMALL("PCM 64-bit floating point little-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_BLURAY, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_bluray", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16|20|24-bit big-endian for Blu-ray media"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_LXF, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_lxf", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 20-bit little-endian planar"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_S302M, .type = AVMEDIA_TYPE_AUDIO, .name = "s302m", .long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_S8_PLANAR, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s8_planar", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_S24LE_PLANAR, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s24le_planar", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 24-bit little-endian planar"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_S32LE_PLANAR, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s32le_planar", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 32-bit little-endian planar"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_S16BE_PLANAR, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s16be_planar", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 16-bit big-endian planar"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_S64LE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s64le", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 64-bit little-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_S64BE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_s64be", .long_name = NULL_IF_CONFIG_SMALL("PCM signed 64-bit big-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_F16LE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_f16le", .long_name = NULL_IF_CONFIG_SMALL("PCM 16.8 floating point little-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_F24LE, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_f24le", .long_name = NULL_IF_CONFIG_SMALL("PCM 24.0 floating point little-endian"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_PCM_VIDC, .type = AVMEDIA_TYPE_AUDIO, .name = "pcm_vidc", .long_name = NULL_IF_CONFIG_SMALL("PCM Archimedes VIDC"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, /* various ADPCM codecs */ @@ -2008,294 +2008,294 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_qt", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA QuickTime"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_WAV, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_wav", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA WAV"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_DK3, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_dk3", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Duck DK3"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_DK4, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_dk4", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Duck DK4"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_WS, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_ws", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Westwood"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_SMJPEG, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_smjpeg", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Loki SDL MJPEG"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_MS, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ms", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Microsoft"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_4XM, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_4xm", .long_name = NULL_IF_CONFIG_SMALL("ADPCM 4X Movie"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_XA, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_xa", .long_name = NULL_IF_CONFIG_SMALL("ADPCM CDROM XA"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_ADX, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_adx", .long_name = NULL_IF_CONFIG_SMALL("SEGA CRI ADX ADPCM"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_EA, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ea", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_G726, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_g726", .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_CT, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ct", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Creative Technology"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_SWF, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_swf", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Shockwave Flash"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_YAMAHA, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_yamaha", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Yamaha"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_SBPRO_4, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_sbpro_4", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Sound Blaster Pro 4-bit"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_SBPRO_3, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_sbpro_3", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Sound Blaster Pro 2.6-bit"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_SBPRO_2, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_sbpro_2", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Sound Blaster Pro 2-bit"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_THP, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_thp", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Nintendo THP"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_AMV, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_amv", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA AMV"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_EA_R1, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ea_r1", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts R1"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_EA_R3, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ea_r3", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts R3"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_EA_R2, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ea_r2", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts R2"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_EA_SEAD, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_ea_sead", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Electronic Arts SEAD"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_EA_EACS, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_ea_eacs", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Electronic Arts EACS"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_EA_XAS, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ea_xas", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts XAS"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_EA_MAXIS_XA, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ea_maxis_xa", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Electronic Arts Maxis CDROM XA"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_ISS, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_iss", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Funcom ISS"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_G722, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_g722", .long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_APC, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_apc", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA CRYO APC"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_VIMA, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_vima", .long_name = NULL_IF_CONFIG_SMALL("LucasArts VIMA audio"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_AFC, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_afc", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Nintendo Gamecube AFC"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_OKI, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_oki", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Dialogic OKI"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_DTK, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_dtk", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Nintendo Gamecube DTK"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_RAD, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_rad", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Radical"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_G726LE, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_g726le", .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM little-endian"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_THP_LE, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_thp_le", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Nintendo THP (Little-Endian)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_PSX, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_psx", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Playstation"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_AICA, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_aica", .long_name = NULL_IF_CONFIG_SMALL("ADPCM Yamaha AICA"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_IMA_DAT4, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_ima_dat4", .long_name = NULL_IF_CONFIG_SMALL("ADPCM IMA Eurocom DAT4"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_MTAF, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_mtaf", .long_name = NULL_IF_CONFIG_SMALL("ADPCM MTAF"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ADPCM_AGM, .type = AVMEDIA_TYPE_AUDIO, .name = "adpcm_agm", .long_name = NULL_IF_CONFIG_SMALL("ADPCM AmuseGraphics Movie AGM"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, /* AMR */ @@ -2304,14 +2304,14 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "amr_nb", .long_name = NULL_IF_CONFIG_SMALL("AMR-NB (Adaptive Multi-Rate NarrowBand)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_AMR_WB, .type = AVMEDIA_TYPE_AUDIO, .name = "amr_wb", .long_name = NULL_IF_CONFIG_SMALL("AMR-WB (Adaptive Multi-Rate WideBand)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, /* RealAudio codecs*/ @@ -2320,14 +2320,14 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "ra_144", .long_name = NULL_IF_CONFIG_SMALL("RealAudio 1.0 (14.4K)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_RA_288, .type = AVMEDIA_TYPE_AUDIO, .name = "ra_288", .long_name = NULL_IF_CONFIG_SMALL("RealAudio 2.0 (28.8K)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, /* various DPCM codecs */ @@ -2336,42 +2336,42 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "roq_dpcm", .long_name = NULL_IF_CONFIG_SMALL("DPCM id RoQ"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_INTERPLAY_DPCM, .type = AVMEDIA_TYPE_AUDIO, .name = "interplay_dpcm", .long_name = NULL_IF_CONFIG_SMALL("DPCM Interplay"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_XAN_DPCM, .type = AVMEDIA_TYPE_AUDIO, .name = "xan_dpcm", .long_name = NULL_IF_CONFIG_SMALL("DPCM Xan"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_SOL_DPCM, .type = AVMEDIA_TYPE_AUDIO, .name = "sol_dpcm", .long_name = NULL_IF_CONFIG_SMALL("DPCM Sol"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_SDX2_DPCM, .type = AVMEDIA_TYPE_AUDIO, .name = "sdx2_dpcm", .long_name = NULL_IF_CONFIG_SMALL("DPCM Squareroot-Delta-Exact"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_GREMLIN_DPCM, .type = AVMEDIA_TYPE_AUDIO, .name = "gremlin_dpcm", .long_name = NULL_IF_CONFIG_SMALL("DPCM Gremlin"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, /* audio codecs */ @@ -2380,21 +2380,21 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "mp2", .long_name = NULL_IF_CONFIG_SMALL("MP2 (MPEG audio layer 2)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_MP3, .type = AVMEDIA_TYPE_AUDIO, .name = "mp3", .long_name = NULL_IF_CONFIG_SMALL("MP3 (MPEG audio layer 3)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_AAC, .type = AVMEDIA_TYPE_AUDIO, .name = "aac", .long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, .profiles = NULL_IF_CONFIG_SMALL(ff_aac_profiles), }, { @@ -2402,14 +2402,14 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "ac3", .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_DTS, .type = AVMEDIA_TYPE_AUDIO, .name = "dts", .long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"), - .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_LOSSLESS, .profiles = NULL_IF_CONFIG_SMALL(ff_dca_profiles), }, { @@ -2417,49 +2417,49 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "vorbis", .long_name = NULL_IF_CONFIG_SMALL("Vorbis"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_DVAUDIO, .type = AVMEDIA_TYPE_AUDIO, .name = "dvaudio", .long_name = NULL_IF_CONFIG_SMALL("DV audio"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_WMAV1, .type = AVMEDIA_TYPE_AUDIO, .name = "wmav1", .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 1"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_WMAV2, .type = AVMEDIA_TYPE_AUDIO, .name = "wmav2", .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 2"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_MACE3, .type = AVMEDIA_TYPE_AUDIO, .name = "mace3", .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 3:1"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_MACE6, .type = AVMEDIA_TYPE_AUDIO, .name = "mace6", .long_name = NULL_IF_CONFIG_SMALL("MACE (Macintosh Audio Compression/Expansion) 6:1"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_VMDAUDIO, .type = AVMEDIA_TYPE_AUDIO, .name = "vmdaudio", .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_FLAC, @@ -2473,21 +2473,21 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "mp3adu", .long_name = NULL_IF_CONFIG_SMALL("ADU (Application Data Unit) MP3 (MPEG audio layer 3)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_MP3ON4, .type = AVMEDIA_TYPE_AUDIO, .name = "mp3on4", .long_name = NULL_IF_CONFIG_SMALL("MP3onMP4"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_SHORTEN, .type = AVMEDIA_TYPE_AUDIO, .name = "shorten", .long_name = NULL_IF_CONFIG_SMALL("Shorten"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_ALAC, @@ -2501,35 +2501,35 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "westwood_snd1", .long_name = NULL_IF_CONFIG_SMALL("Westwood Audio (SND1)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_GSM, .type = AVMEDIA_TYPE_AUDIO, .name = "gsm", .long_name = NULL_IF_CONFIG_SMALL("GSM"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_QDM2, .type = AVMEDIA_TYPE_AUDIO, .name = "qdm2", .long_name = NULL_IF_CONFIG_SMALL("QDesign Music Codec 2"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_COOK, .type = AVMEDIA_TYPE_AUDIO, .name = "cook", .long_name = NULL_IF_CONFIG_SMALL("Cook / Cooker / Gecko (RealAudio G2)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_TRUESPEECH, .type = AVMEDIA_TYPE_AUDIO, .name = "truespeech", .long_name = NULL_IF_CONFIG_SMALL("DSP Group TrueSpeech"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_TTA, @@ -2543,14 +2543,14 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "smackaudio", .long_name = NULL_IF_CONFIG_SMALL("Smacker audio"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_QCELP, .type = AVMEDIA_TYPE_AUDIO, .name = "qcelp", .long_name = NULL_IF_CONFIG_SMALL("QCELP / PureVoice"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_WAVPACK, @@ -2565,126 +2565,126 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "dsicinaudio", .long_name = NULL_IF_CONFIG_SMALL("Delphine Software International CIN audio"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_IMC, .type = AVMEDIA_TYPE_AUDIO, .name = "imc", .long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_MUSEPACK7, .type = AVMEDIA_TYPE_AUDIO, .name = "musepack7", .long_name = NULL_IF_CONFIG_SMALL("Musepack SV7"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_MLP, .type = AVMEDIA_TYPE_AUDIO, .name = "mlp", .long_name = NULL_IF_CONFIG_SMALL("MLP (Meridian Lossless Packing)"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_GSM_MS, .type = AVMEDIA_TYPE_AUDIO, .name = "gsm_ms", .long_name = NULL_IF_CONFIG_SMALL("GSM Microsoft variant"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ATRAC3, .type = AVMEDIA_TYPE_AUDIO, .name = "atrac3", .long_name = NULL_IF_CONFIG_SMALL("ATRAC3 (Adaptive TRansform Acoustic Coding 3)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_APE, .type = AVMEDIA_TYPE_AUDIO, .name = "ape", .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_NELLYMOSER, .type = AVMEDIA_TYPE_AUDIO, .name = "nellymoser", .long_name = NULL_IF_CONFIG_SMALL("Nellymoser Asao"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_MUSEPACK8, .type = AVMEDIA_TYPE_AUDIO, .name = "musepack8", .long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_SPEEX, .type = AVMEDIA_TYPE_AUDIO, .name = "speex", .long_name = NULL_IF_CONFIG_SMALL("Speex"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_WMAVOICE, .type = AVMEDIA_TYPE_AUDIO, .name = "wmavoice", .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Voice"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_WMAPRO, .type = AVMEDIA_TYPE_AUDIO, .name = "wmapro", .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio 9 Professional"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_WMALOSSLESS, .type = AVMEDIA_TYPE_AUDIO, .name = "wmalossless", .long_name = NULL_IF_CONFIG_SMALL("Windows Media Audio Lossless"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_ATRAC3P, .type = AVMEDIA_TYPE_AUDIO, .name = "atrac3p", .long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ (Adaptive TRansform Acoustic Coding 3+)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_EAC3, .type = AVMEDIA_TYPE_AUDIO, .name = "eac3", .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_SIPR, .type = AVMEDIA_TYPE_AUDIO, .name = "sipr", .long_name = NULL_IF_CONFIG_SMALL("RealAudio SIPR / ACELP.NET"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_MP1, .type = AVMEDIA_TYPE_AUDIO, .name = "mp1", .long_name = NULL_IF_CONFIG_SMALL("MP1 (MPEG audio layer 1)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_TWINVQ, .type = AVMEDIA_TYPE_AUDIO, .name = "twinvq", .long_name = NULL_IF_CONFIG_SMALL("VQF TwinVQ"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_TRUEHD, @@ -2698,35 +2698,35 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "mp4als", .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Audio Lossless Coding (ALS)"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_ATRAC1, .type = AVMEDIA_TYPE_AUDIO, .name = "atrac1", .long_name = NULL_IF_CONFIG_SMALL("ATRAC1 (Adaptive TRansform Acoustic Coding)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_BINKAUDIO_RDFT, .type = AVMEDIA_TYPE_AUDIO, .name = "binkaudio_rdft", .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (RDFT)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_BINKAUDIO_DCT, .type = AVMEDIA_TYPE_AUDIO, .name = "binkaudio_dct", .long_name = NULL_IF_CONFIG_SMALL("Bink Audio (DCT)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_AAC_LATM, .type = AVMEDIA_TYPE_AUDIO, .name = "aac_latm", .long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Coding LATM syntax)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, .profiles = NULL_IF_CONFIG_SMALL(ff_aac_profiles), }, { @@ -2734,84 +2734,84 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "qdmc", .long_name = NULL_IF_CONFIG_SMALL("QDesign Music"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_CELT, .type = AVMEDIA_TYPE_AUDIO, .name = "celt", .long_name = NULL_IF_CONFIG_SMALL("Constrained Energy Lapped Transform (CELT)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_G723_1, .type = AVMEDIA_TYPE_AUDIO, .name = "g723_1", .long_name = NULL_IF_CONFIG_SMALL("G.723.1"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_G729, .type = AVMEDIA_TYPE_AUDIO, .name = "g729", .long_name = NULL_IF_CONFIG_SMALL("G.729"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_8SVX_EXP, .type = AVMEDIA_TYPE_AUDIO, .name = "8svx_exp", .long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_8SVX_FIB, .type = AVMEDIA_TYPE_AUDIO, .name = "8svx_fib", .long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_BMV_AUDIO, .type = AVMEDIA_TYPE_AUDIO, .name = "bmv_audio", .long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV audio"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_RALF, .type = AVMEDIA_TYPE_AUDIO, .name = "ralf", .long_name = NULL_IF_CONFIG_SMALL("RealAudio Lossless"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_IAC, .type = AVMEDIA_TYPE_AUDIO, .name = "iac", .long_name = NULL_IF_CONFIG_SMALL("IAC (Indeo Audio Coder)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ILBC, .type = AVMEDIA_TYPE_AUDIO, .name = "ilbc", .long_name = NULL_IF_CONFIG_SMALL("iLBC (Internet Low Bitrate Codec)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_OPUS, .type = AVMEDIA_TYPE_AUDIO, .name = "opus", .long_name = NULL_IF_CONFIG_SMALL("Opus (Opus Interactive Audio Codec)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_COMFORT_NOISE, .type = AVMEDIA_TYPE_AUDIO, .name = "comfortnoise", .long_name = NULL_IF_CONFIG_SMALL("RFC 3389 Comfort Noise"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_TAK, @@ -2825,192 +2825,202 @@ static const AVCodecDescriptor codec_descriptors[] = { .type = AVMEDIA_TYPE_AUDIO, .name = "metasound", .long_name = NULL_IF_CONFIG_SMALL("Voxware MetaSound"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_PAF_AUDIO, .type = AVMEDIA_TYPE_AUDIO, .name = "paf_audio", .long_name = NULL_IF_CONFIG_SMALL("Amazing Studio Packed Animation File Audio"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ON2AVC, .type = AVMEDIA_TYPE_AUDIO, .name = "avc", .long_name = NULL_IF_CONFIG_SMALL("On2 Audio for Video Codec"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_DSS_SP, .type = AVMEDIA_TYPE_AUDIO, .name = "dss_sp", .long_name = NULL_IF_CONFIG_SMALL("Digital Speech Standard - Standard Play mode (DSS SP)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_CODEC2, .type = AVMEDIA_TYPE_AUDIO, .name = "codec2", .long_name = NULL_IF_CONFIG_SMALL("codec2 (very low bitrate speech codec)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_FFWAVESYNTH, .type = AVMEDIA_TYPE_AUDIO, .name = "wavesynth", .long_name = NULL_IF_CONFIG_SMALL("Wave synthesis pseudo-codec"), + .props = AV_CODEC_PROP_INTRA_ONLY, }, { .id = AV_CODEC_ID_SONIC, .type = AVMEDIA_TYPE_AUDIO, .name = "sonic", .long_name = NULL_IF_CONFIG_SMALL("Sonic"), + .props = AV_CODEC_PROP_INTRA_ONLY, }, { .id = AV_CODEC_ID_SONIC_LS, .type = AVMEDIA_TYPE_AUDIO, .name = "sonicls", .long_name = NULL_IF_CONFIG_SMALL("Sonic lossless"), + .props = AV_CODEC_PROP_INTRA_ONLY, }, { .id = AV_CODEC_ID_EVRC, .type = AVMEDIA_TYPE_AUDIO, .name = "evrc", .long_name = NULL_IF_CONFIG_SMALL("EVRC (Enhanced Variable Rate Codec)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_SMV, .type = AVMEDIA_TYPE_AUDIO, .name = "smv", .long_name = NULL_IF_CONFIG_SMALL("SMV (Selectable Mode Vocoder)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_DSD_LSBF, .type = AVMEDIA_TYPE_AUDIO, .name = "dsd_lsbf", .long_name = NULL_IF_CONFIG_SMALL("DSD (Direct Stream Digital), least significant bit first"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_DSD_MSBF, .type = AVMEDIA_TYPE_AUDIO, .name = "dsd_msbf", .long_name = NULL_IF_CONFIG_SMALL("DSD (Direct Stream Digital), most significant bit first"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_DSD_LSBF_PLANAR, .type = AVMEDIA_TYPE_AUDIO, .name = "dsd_lsbf_planar", .long_name = NULL_IF_CONFIG_SMALL("DSD (Direct Stream Digital), least significant bit first, planar"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_DSD_MSBF_PLANAR, .type = AVMEDIA_TYPE_AUDIO, .name = "dsd_msbf_planar", .long_name = NULL_IF_CONFIG_SMALL("DSD (Direct Stream Digital), most significant bit first, planar"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_4GV, .type = AVMEDIA_TYPE_AUDIO, .name = "4gv", .long_name = NULL_IF_CONFIG_SMALL("4GV (Fourth Generation Vocoder)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_INTERPLAY_ACM, .type = AVMEDIA_TYPE_AUDIO, .name = "interplayacm", .long_name = NULL_IF_CONFIG_SMALL("Interplay ACM"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_XMA1, .type = AVMEDIA_TYPE_AUDIO, .name = "xma1", .long_name = NULL_IF_CONFIG_SMALL("Xbox Media Audio 1"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_XMA2, .type = AVMEDIA_TYPE_AUDIO, .name = "xma2", .long_name = NULL_IF_CONFIG_SMALL("Xbox Media Audio 2"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_DST, .type = AVMEDIA_TYPE_AUDIO, .name = "dst", .long_name = NULL_IF_CONFIG_SMALL("DST (Direct Stream Transfer)"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_ATRAC3AL, .type = AVMEDIA_TYPE_AUDIO, .name = "atrac3al", .long_name = NULL_IF_CONFIG_SMALL("ATRAC3 AL (Adaptive TRansform Acoustic Coding 3 Advanced Lossless)"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_ATRAC3PAL, .type = AVMEDIA_TYPE_AUDIO, .name = "atrac3pal", .long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ AL (Adaptive TRansform Acoustic Coding 3+ Advanced Lossless)"), - .props = AV_CODEC_PROP_LOSSLESS, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS, }, { .id = AV_CODEC_ID_DOLBY_E, .type = AVMEDIA_TYPE_AUDIO, .name = "dolby_e", .long_name = NULL_IF_CONFIG_SMALL("Dolby E"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_APTX, .type = AVMEDIA_TYPE_AUDIO, .name = "aptx", .long_name = NULL_IF_CONFIG_SMALL("aptX (Audio Processing Technology for Bluetooth)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_APTX_HD, .type = AVMEDIA_TYPE_AUDIO, .name = "aptx_hd", .long_name = NULL_IF_CONFIG_SMALL("aptX HD (Audio Processing Technology for Bluetooth)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_SBC, .type = AVMEDIA_TYPE_AUDIO, .name = "sbc", .long_name = NULL_IF_CONFIG_SMALL("SBC (low-complexity subband codec)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ATRAC9, .type = AVMEDIA_TYPE_AUDIO, .name = "atrac9", .long_name = NULL_IF_CONFIG_SMALL("ATRAC9 (Adaptive TRansform Acoustic Coding 9)"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_HCOM, .type = AVMEDIA_TYPE_AUDIO, .name = "hcom", .long_name = NULL_IF_CONFIG_SMALL("HCOM Audio"), - .props = AV_CODEC_PROP_LOSSY, + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, }, { .id = AV_CODEC_ID_ACELP_KELVIN, .type = AVMEDIA_TYPE_AUDIO, .name = "acelp.kelvin", .long_name = NULL_IF_CONFIG_SMALL("Sipro ACELP.KELVIN"), + .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, + }, + { + .id = AV_CODEC_ID_MPEGH_3D_AUDIO, + .type = AVMEDIA_TYPE_AUDIO, + .name = "mpegh_3d_audio", + .long_name = NULL_IF_CONFIG_SMALL("MPEG-H 3D Audio"), .props = AV_CODEC_PROP_LOSSY, }, diff --git a/libavcodec/cook.c b/libavcodec/cook.c index 248e59256f..d0b41a2431 100644 --- a/libavcodec/cook.c +++ b/libavcodec/cook.c @@ -759,7 +759,7 @@ static int decouple_info(COOKContext *q, COOKSubpacket *p, int *decouple_tab) for (i = 0; i < length; i++) decouple_tab[start + i] = get_vlc2(&q->gb, p->channel_coupling.table, - p->channel_coupling.bits, 2); + p->channel_coupling.bits, 3); else for (i = 0; i < length; i++) { int v = get_bits(&q->gb, p->js_vlc_bits); diff --git a/libavcodec/decode.c b/libavcodec/decode.c index 0883c7209c..03b9da25f9 100644 --- a/libavcodec/decode.c +++ b/libavcodec/decode.c @@ -479,32 +479,32 @@ static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame) side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size); if(side && side_size>=10) { - avctx->internal->skip_samples = AV_RL32(side) * avctx->internal->skip_samples_multiplier; + avci->skip_samples = AV_RL32(side) * avci->skip_samples_multiplier; discard_padding = AV_RL32(side + 4); av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n", - avctx->internal->skip_samples, (int)discard_padding); + avci->skip_samples, (int)discard_padding); skip_reason = AV_RL8(side + 8); discard_reason = AV_RL8(side + 9); } if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame && !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) { - avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples); + avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples); got_frame = 0; } - if (avctx->internal->skip_samples > 0 && got_frame && + if (avci->skip_samples > 0 && got_frame && !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) { - if(frame->nb_samples <= avctx->internal->skip_samples){ + if(frame->nb_samples <= avci->skip_samples){ got_frame = 0; - avctx->internal->skip_samples -= frame->nb_samples; + avci->skip_samples -= frame->nb_samples; av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n", - avctx->internal->skip_samples); + avci->skip_samples); } else { - av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples, - frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format); + av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples, + frame->nb_samples - avci->skip_samples, avctx->channels, frame->format); if(avctx->pkt_timebase.num && avctx->sample_rate) { - int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples, + int64_t diff_ts = av_rescale_q(avci->skip_samples, (AVRational){1, avctx->sample_rate}, avctx->pkt_timebase); if(frame->pts!=AV_NOPTS_VALUE) @@ -523,9 +523,9 @@ FF_ENABLE_DEPRECATION_WARNINGS av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n"); } av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n", - avctx->internal->skip_samples, frame->nb_samples); - frame->nb_samples -= avctx->internal->skip_samples; - avctx->internal->skip_samples = 0; + avci->skip_samples, frame->nb_samples); + frame->nb_samples -= avci->skip_samples; + avci->skip_samples = 0; } } @@ -551,11 +551,11 @@ FF_ENABLE_DEPRECATION_WARNINGS if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) { AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10); if (fside) { - AV_WL32(fside->data, avctx->internal->skip_samples); + AV_WL32(fside->data, avci->skip_samples); AV_WL32(fside->data + 4, discard_padding); AV_WL8(fside->data + 8, skip_reason); AV_WL8(fside->data + 9, discard_reason); - avctx->internal->skip_samples = 0; + avci->skip_samples = 0; } } } @@ -580,7 +580,7 @@ FF_ENABLE_DEPRECATION_WARNINGS /* do not stop draining when actual_got_frame != 0 or ret < 0 */ /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */ - if (avctx->internal->draining && !actual_got_frame) { + if (avci->draining && !actual_got_frame) { if (ret < 0) { /* prevent infinite loop if a decoder wrongly always return error on draining */ /* reasonable nb_errors_max = maximum b frames + thread count */ @@ -1925,7 +1925,7 @@ static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags) return AVERROR(EINVAL); } } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { - if (frame->nb_samples * avctx->channels > avctx->max_samples) { + if (frame->nb_samples * (int64_t)avctx->channels > avctx->max_samples) { av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples); return AVERROR(EINVAL); } @@ -2030,15 +2030,17 @@ static void bsfs_flush(AVCodecContext *avctx) void avcodec_flush_buffers(AVCodecContext *avctx) { - avctx->internal->draining = 0; - avctx->internal->draining_done = 0; - avctx->internal->nb_draining_errors = 0; - av_frame_unref(avctx->internal->buffer_frame); - av_frame_unref(avctx->internal->compat_decode_frame); - av_packet_unref(avctx->internal->buffer_pkt); - avctx->internal->buffer_pkt_valid = 0; + AVCodecInternal *avci = avctx->internal; - av_packet_unref(avctx->internal->ds.in_pkt); + avci->draining = 0; + avci->draining_done = 0; + avci->nb_draining_errors = 0; + av_frame_unref(avci->buffer_frame); + av_frame_unref(avci->compat_decode_frame); + av_packet_unref(avci->buffer_pkt); + avci->buffer_pkt_valid = 0; + + av_packet_unref(avci->ds.in_pkt); if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) ff_thread_flush(avctx); @@ -2051,7 +2053,7 @@ void avcodec_flush_buffers(AVCodecContext *avctx) bsfs_flush(avctx); if (!avctx->refcounted_frames) - av_frame_unref(avctx->internal->to_free); + av_frame_unref(avci->to_free); } void ff_decode_bsfs_uninit(AVCodecContext *avctx) diff --git a/libavcodec/dstdec.c b/libavcodec/dstdec.c index ae3fe428dd..880b838b0c 100644 --- a/libavcodec/dstdec.c +++ b/libavcodec/dstdec.c @@ -121,7 +121,7 @@ static int read_map(GetBitContext *gb, Table *t, unsigned int map[DST_MAX_CHANNE static av_always_inline int get_sr_golomb_dst(GetBitContext *gb, unsigned int k) { - int v = get_ur_golomb(gb, k, get_bits_left(gb), 0); + int v = get_ur_golomb_jpegls(gb, k, get_bits_left(gb), 0); if (v && get_bits1(gb)) v = -v; return v; diff --git a/libavcodec/escape124.c b/libavcodec/escape124.c index cffd3e12b1..94c2a961e6 100644 --- a/libavcodec/escape124.c +++ b/libavcodec/escape124.c @@ -252,7 +252,7 @@ static int escape124_decode_frame(AVCodecContext *avctx, if (i == 2) { // This codebook can be cut off at places other than // powers of 2, leaving some of the entries undefined. - cb_size = get_bits_long(&gb, 20); + cb_size = get_bits(&gb, 20); if (!cb_size) { av_log(avctx, AV_LOG_ERROR, "Invalid codebook size 0.\n"); return AVERROR_INVALIDDATA; diff --git a/libavcodec/ffv1_template.c b/libavcodec/ffv1_template.c index f2ab93313a..c5f61b0182 100644 --- a/libavcodec/ffv1_template.c +++ b/libavcodec/ffv1_template.c @@ -37,7 +37,7 @@ static inline int RENAME(get_context)(PlaneContext *p, TYPE *src, const int RT = last[1]; const int L = src[-1]; - if (p->quant_table[3][127]) { + if (p->quant_table[3][127] || p->quant_table[4][127]) { const int TT = last2[0]; const int LL = src[-2]; return p->quant_table[0][(L - LT) & 0xFF] + diff --git a/libavcodec/ffwavesynth.c b/libavcodec/ffwavesynth.c index 349b45534d..b9c63abb8d 100644 --- a/libavcodec/ffwavesynth.c +++ b/libavcodec/ffwavesynth.c @@ -350,7 +350,8 @@ fail: static void wavesynth_synth_sample(struct wavesynth_context *ws, int64_t ts, int32_t *channels) { - int32_t amp, val, *cv; + int32_t amp, *cv; + unsigned val; struct ws_interval *in; int i, *last, pink; uint32_t c, all_ch = 0; diff --git a/libavcodec/flac.c b/libavcodec/flac.c index 5ffbf93190..7b075d4bd3 100644 --- a/libavcodec/flac.c +++ b/libavcodec/flac.c @@ -217,9 +217,9 @@ int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s, } skip_bits(&gb, 24); /* skip min frame size */ - s->max_framesize = get_bits_long(&gb, 24); + s->max_framesize = get_bits(&gb, 24); - s->samplerate = get_bits_long(&gb, 20); + s->samplerate = get_bits(&gb, 20); s->channels = get_bits(&gb, 3) + 1; s->bps = get_bits(&gb, 5) + 1; diff --git a/libavcodec/flvdec.c b/libavcodec/flvdec.c index f9beb40afa..c19f07fe05 100644 --- a/libavcodec/flvdec.c +++ b/libavcodec/flvdec.c @@ -30,7 +30,7 @@ int ff_flv_decode_picture_header(MpegEncContext *s) int format, width, height; /* picture header */ - if (get_bits_long(&s->gb, 17) != 1) { + if (get_bits(&s->gb, 17) != 1) { av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n"); return AVERROR_INVALIDDATA; } diff --git a/libavcodec/g729postfilter.c b/libavcodec/g729postfilter.c index fc9a8d54cc..ab668594d2 100644 --- a/libavcodec/g729postfilter.c +++ b/libavcodec/g729postfilter.c @@ -486,14 +486,14 @@ static int16_t apply_tilt_comp(int16_t* out, int16_t* res_pst, int refl_coeff, if (refl_coeff > 0) { gt = (refl_coeff * G729_TILT_FACTOR_PLUS + 0x4000) >> 15; - fact = 0x4000; // 0.5 in (0.15) - sh_fact = 15; + fact = 0x2000; // 0.5 in (0.15) + sh_fact = 14; } else { gt = (refl_coeff * G729_TILT_FACTOR_MINUS + 0x4000) >> 15; - fact = 0x800; // 0.5 in (3.12) - sh_fact = 12; + fact = 0x400; // 0.5 in (3.12) + sh_fact = 11; } - ga = (fact << 15) / av_clip_int16(32768 - FFABS(gt)); + ga = (fact << 16) / av_clip_int16(32768 - FFABS(gt)); gt >>= 1; /* Apply tilt compensation filter to signal. */ @@ -503,12 +503,12 @@ static int16_t apply_tilt_comp(int16_t* out, int16_t* res_pst, int refl_coeff, tmp2 = (gt * res_pst[i-1]) * 2 + 0x4000; tmp2 = res_pst[i] + (tmp2 >> 15); - tmp2 = (tmp2 * ga * 2 + fact) >> sh_fact; + tmp2 = (tmp2 * ga + fact) >> sh_fact; out[i] = tmp2; } tmp2 = (gt * ht_prev_data) * 2 + 0x4000; tmp2 = res_pst[0] + (tmp2 >> 15); - tmp2 = (tmp2 * ga * 2 + fact) >> sh_fact; + tmp2 = (tmp2 * ga + fact) >> sh_fact; out[0] = tmp2; return tmp; diff --git a/libavcodec/golomb.h b/libavcodec/golomb.h index 5cdfa0945d..7fd46a91bd 100644 --- a/libavcodec/golomb.h +++ b/libavcodec/golomb.h @@ -313,7 +313,7 @@ static inline int get_interleaved_se_golomb(GetBitContext *gb) } else { int log; skip_bits(gb, 8); - buf |= 1 | show_bits_long(gb, 24); + buf |= 1 | show_bits(gb, 24); if ((buf & 0xAAAAAAAA) == 0) return INVALID_VLC; diff --git a/libavcodec/h264_metadata_bsf.c b/libavcodec/h264_metadata_bsf.c index 5de74be9d6..d96a50dbf7 100644 --- a/libavcodec/h264_metadata_bsf.c +++ b/libavcodec/h264_metadata_bsf.c @@ -381,7 +381,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt) } else { goto invalid_user_data; } - if (i & 1) + if (j & 1) udu->uuid_iso_iec_11578[j / 2] |= v; else udu->uuid_iso_iec_11578[j / 2] = v << 4; diff --git a/libavcodec/h264_mp4toannexb_bsf.c b/libavcodec/h264_mp4toannexb_bsf.c index fb3f24ea40..bbf124ad04 100644 --- a/libavcodec/h264_mp4toannexb_bsf.c +++ b/libavcodec/h264_mp4toannexb_bsf.c @@ -21,6 +21,7 @@ #include +#include "libavutil/avassert.h" #include "libavutil/intreadwrite.h" #include "libavutil/mem.h" @@ -68,7 +69,7 @@ static int h264_extradata_to_annexb(AVBSFContext *ctx, const int padding) { H264BSFContext *s = ctx->priv_data; uint16_t unit_size; - uint64_t total_size = 0; + uint32_t total_size = 0; uint8_t *out = NULL, unit_nb, sps_done = 0, sps_seen = 0, pps_seen = 0; const uint8_t *extradata = ctx->par_in->extradata + 4; @@ -91,12 +92,7 @@ static int h264_extradata_to_annexb(AVBSFContext *ctx, const int padding) unit_size = AV_RB16(extradata); total_size += unit_size + 4; - if (total_size > INT_MAX - padding) { - av_log(ctx, AV_LOG_ERROR, - "Too big extradata size, corrupted stream or invalid MP4/AVCC bitstream\n"); - av_free(out); - return AVERROR(EINVAL); - } + av_assert1(total_size <= INT_MAX - padding); if (extradata + 2 + unit_size > ctx->par_in->extradata + ctx->par_in->extradata_size) { av_log(ctx, AV_LOG_ERROR, "Packet header is not contained in global extradata, " "corrupted stream or invalid MP4/AVCC bitstream\n"); diff --git a/libavcodec/h264_ps.c b/libavcodec/h264_ps.c index e8738d8502..edbaa96b64 100644 --- a/libavcodec/h264_ps.c +++ b/libavcodec/h264_ps.c @@ -186,7 +186,7 @@ static inline int decode_vui_parameters(GetBitContext *gb, AVCodecContext *avctx } if (show_bits1(gb) && get_bits_left(gb) < 10) { - av_log(avctx, AV_LOG_WARNING, "Truncated VUI\n"); + av_log(avctx, AV_LOG_WARNING, "Truncated VUI (%d)\n", get_bits_left(gb)); return 0; } diff --git a/libavcodec/h264_sei.c b/libavcodec/h264_sei.c index d4eb9c0dab..a565feabe2 100644 --- a/libavcodec/h264_sei.c +++ b/libavcodec/h264_sei.c @@ -247,14 +247,14 @@ static int decode_unregistered_user_data(H264SEIUnregistered *h, GetBitContext * uint8_t *user_data; int e, build, i; - if (size < 16 || size >= INT_MAX - 16) + if (size < 16 || size >= INT_MAX - 1) return AVERROR_INVALIDDATA; - user_data = av_malloc(16 + size + 1); + user_data = av_malloc(size + 1); if (!user_data) return AVERROR(ENOMEM); - for (i = 0; i < size + 16; i++) + for (i = 0; i < size; i++) user_data[i] = get_bits(gb, 8); user_data[i] = 0; diff --git a/libavcodec/h264dec.h b/libavcodec/h264dec.h index 1d9723260d..530e2d4071 100644 --- a/libavcodec/h264dec.h +++ b/libavcodec/h264dec.h @@ -832,8 +832,6 @@ int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl); void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height); -int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl, - const H2645NAL *nal); /** * Submit a slice for decoding. * diff --git a/libavcodec/hevc_cabac.c b/libavcodec/hevc_cabac.c index 8abb780dd7..3dc0987dad 100644 --- a/libavcodec/hevc_cabac.c +++ b/libavcodec/hevc_cabac.c @@ -66,7 +66,7 @@ static const int8_t num_bins_in_se[] = { 1, // no_residual_data_flag 3, // split_transform_flag 2, // cbf_luma - 4, // cbf_cb, cbf_cr + 5, // cbf_cb, cbf_cr 2, // transform_skip_flag[][] 2, // explicit_rdpcm_flag[][] 2, // explicit_rdpcm_dir_flag[][] @@ -122,23 +122,23 @@ static const int elem_offset[sizeof(num_bins_in_se)] = { 37, // split_transform_flag 40, // cbf_luma 42, // cbf_cb, cbf_cr - 46, // transform_skip_flag[][] - 48, // explicit_rdpcm_flag[][] - 50, // explicit_rdpcm_dir_flag[][] - 52, // last_significant_coeff_x_prefix - 70, // last_significant_coeff_y_prefix - 88, // last_significant_coeff_x_suffix - 88, // last_significant_coeff_y_suffix - 88, // significant_coeff_group_flag - 92, // significant_coeff_flag - 136, // coeff_abs_level_greater1_flag - 160, // coeff_abs_level_greater2_flag - 166, // coeff_abs_level_remaining - 166, // coeff_sign_flag - 166, // log2_res_scale_abs - 174, // res_scale_sign_flag - 176, // cu_chroma_qp_offset_flag - 177, // cu_chroma_qp_offset_idx + 47, // transform_skip_flag[][] + 49, // explicit_rdpcm_flag[][] + 51, // explicit_rdpcm_dir_flag[][] + 53, // last_significant_coeff_x_prefix + 71, // last_significant_coeff_y_prefix + 89, // last_significant_coeff_x_suffix + 89, // last_significant_coeff_y_suffix + 89, // significant_coeff_group_flag + 93, // significant_coeff_flag + 137, // coeff_abs_level_greater1_flag + 161, // coeff_abs_level_greater2_flag + 167, // coeff_abs_level_remaining + 167, // coeff_sign_flag + 167, // log2_res_scale_abs + 175, // res_scale_sign_flag + 177, // cu_chroma_qp_offset_flag + 178, // cu_chroma_qp_offset_idx }; #define CNU 154 @@ -189,7 +189,7 @@ static const uint8_t init_values[3][HEVC_CONTEXTS] = { // cbf_luma 111, 141, // cbf_cb, cbf_cr - 94, 138, 182, 154, + 94, 138, 182, 154, 154, // transform_skip_flag 139, 139, // explicit_rdpcm_flag @@ -266,7 +266,7 @@ static const uint8_t init_values[3][HEVC_CONTEXTS] = { // cbf_luma 153, 111, // cbf_cb, cbf_cr - 149, 107, 167, 154, + 149, 107, 167, 154, 154, // transform_skip_flag 139, 139, // explicit_rdpcm_flag @@ -343,7 +343,7 @@ static const uint8_t init_values[3][HEVC_CONTEXTS] = { // cbf_luma 153, 111, // cbf_cb, cbf_cr - 149, 92, 167, 154, + 149, 92, 167, 154, 154, // transform_skip_flag 139, 139, // explicit_rdpcm_flag diff --git a/libavcodec/hevc_sei.c b/libavcodec/hevc_sei.c index c59bd4321e..562ce8b516 100644 --- a/libavcodec/hevc_sei.c +++ b/libavcodec/hevc_sei.c @@ -76,8 +76,8 @@ static int decode_nal_sei_mastering_display_info(HEVCSEIMasteringDisplay *s, Get static int decode_nal_sei_content_light_info(HEVCSEIContentLight *s, GetBitContext *gb) { // Max and average light levels - s->max_content_light_level = get_bits_long(gb, 16); - s->max_pic_average_light_level = get_bits_long(gb, 16); + s->max_content_light_level = get_bits(gb, 16); + s->max_pic_average_light_level = get_bits(gb, 16); // As this SEI message comes before the first frame that references it, // initialize the flag to 2 and decrement on IRAP access unit so it // persists for the coded video sequence (e.g., between two IRAPs) @@ -177,7 +177,8 @@ static int decode_registered_user_data_closed_caption(HEVCSEIA53Caption *s, GetB size -= 2; if (cc_count && size >= cc_count * 3) { - const uint64_t new_size = (s->a53_caption_size + cc_count + int old_size = s->buf_ref ? s->buf_ref->size : 0; + const uint64_t new_size = (old_size + cc_count * UINT64_C(3)); int i, ret; @@ -185,14 +186,14 @@ static int decode_registered_user_data_closed_caption(HEVCSEIA53Caption *s, GetB return AVERROR(EINVAL); /* Allow merging of the cc data from two fields. */ - ret = av_reallocp(&s->a53_caption, new_size); + ret = av_buffer_realloc(&s->buf_ref, new_size); if (ret < 0) return ret; for (i = 0; i < cc_count; i++) { - s->a53_caption[s->a53_caption_size++] = get_bits(gb, 8); - s->a53_caption[s->a53_caption_size++] = get_bits(gb, 8); - s->a53_caption[s->a53_caption_size++] = get_bits(gb, 8); + s->buf_ref->data[old_size++] = get_bits(gb, 8); + s->buf_ref->data[old_size++] = get_bits(gb, 8); + s->buf_ref->data[old_size++] = get_bits(gb, 8); } skip_bits(gb, 8); // marker_bits } @@ -363,6 +364,5 @@ int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, void ff_hevc_reset_sei(HEVCSEI *s) { - s->a53_caption.a53_caption_size = 0; - av_freep(&s->a53_caption.a53_caption); + av_buffer_unref(&s->a53_caption.buf_ref); } diff --git a/libavcodec/hevc_sei.h b/libavcodec/hevc_sei.h index f6516ae982..2769d41445 100644 --- a/libavcodec/hevc_sei.h +++ b/libavcodec/hevc_sei.h @@ -83,8 +83,7 @@ typedef struct HEVCSEIPictureTiming { } HEVCSEIPictureTiming; typedef struct HEVCSEIA53Caption { - int a53_caption_size; - uint8_t *a53_caption; + AVBufferRef *buf_ref; } HEVCSEIA53Caption; typedef struct HEVCSEIMasteringDisplay { diff --git a/libavcodec/hevcdec.c b/libavcodec/hevcdec.c index 8f1c162ace..19b0cd815d 100644 --- a/libavcodec/hevcdec.c +++ b/libavcodec/hevcdec.c @@ -2778,14 +2778,14 @@ static int set_side_data(HEVCContext *s) metadata->MaxCLL, metadata->MaxFALL); } - if (s->sei.a53_caption.a53_caption) { - AVFrameSideData* sd = av_frame_new_side_data(out, - AV_FRAME_DATA_A53_CC, - s->sei.a53_caption.a53_caption_size); - if (sd) - memcpy(sd->data, s->sei.a53_caption.a53_caption, s->sei.a53_caption.a53_caption_size); - av_freep(&s->sei.a53_caption.a53_caption); - s->sei.a53_caption.a53_caption_size = 0; + if (s->sei.a53_caption.buf_ref) { + HEVCSEIA53Caption *a53 = &s->sei.a53_caption; + + AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, a53->buf_ref); + if (!sd) + av_buffer_unref(&a53->buf_ref); + a53->buf_ref = NULL; + s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; } @@ -3463,6 +3463,13 @@ static int hevc_update_thread_context(AVCodecContext *dst, s->max_ra = INT_MAX; } + av_buffer_unref(&s->sei.a53_caption.buf_ref); + if (s0->sei.a53_caption.buf_ref) { + s->sei.a53_caption.buf_ref = av_buffer_ref(s0->sei.a53_caption.buf_ref); + if (!s->sei.a53_caption.buf_ref) + return AVERROR(ENOMEM); + } + s->sei.frame_packing = s0->sei.frame_packing; s->sei.display_orientation = s0->sei.display_orientation; s->sei.mastering_display = s0->sei.mastering_display; diff --git a/libavcodec/iff.c b/libavcodec/iff.c index 0656ae5509..f82141d2e7 100644 --- a/libavcodec/iff.c +++ b/libavcodec/iff.c @@ -322,6 +322,8 @@ static int extract_header(AVCodecContext *const avctx, av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp); return AVERROR_INVALIDDATA; } + if (s->video_size && s->planesize * s->bpp * avctx->height > s->video_size) + return AVERROR_INVALIDDATA; av_freep(&s->ham_buf); av_freep(&s->ham_palbuf); @@ -1359,6 +1361,8 @@ static void decode_delta_d(uint8_t *dst, bytestream2_seek_p(&pb, (offset / planepitch_byte) * pitch + (offset % planepitch_byte) + k * planepitch, SEEK_SET); if (opcode >= 0) { uint32_t x = bytestream2_get_be32(&gb); + if (opcode && 4 + (opcode - 1LL) * pitch > bytestream2_get_bytes_left_p(&pb)) + continue; while (opcode && bytestream2_get_bytes_left_p(&pb) > 0) { bytestream2_put_be32(&pb, x); bytestream2_skip_p(&pb, pitch - 4); diff --git a/libavcodec/indeo5.c b/libavcodec/indeo5.c index 7b9da53df4..4ccdbcaf0a 100644 --- a/libavcodec/indeo5.c +++ b/libavcodec/indeo5.c @@ -264,7 +264,7 @@ static int decode_gop_header(IVI45DecContext *ctx, AVCodecContext *avctx) } if (get_bits1(&ctx->gb)) - skip_bits_long(&ctx->gb, 24); /* skip transparency fill color */ + skip_bits(&ctx->gb, 24); /* skip transparency fill color */ } align_get_bits(&ctx->gb); @@ -348,7 +348,7 @@ static int decode_pic_hdr(IVI45DecContext *ctx, AVCodecContext *avctx) if (ctx->frame_type != FRAMETYPE_NULL) { ctx->frame_flags = get_bits(&ctx->gb, 8); - ctx->pic_hdr_size = (ctx->frame_flags & 1) ? get_bits_long(&ctx->gb, 24) : 0; + ctx->pic_hdr_size = (ctx->frame_flags & 1) ? get_bits(&ctx->gb, 24) : 0; ctx->checksum = (ctx->frame_flags & 0x10) ? get_bits(&ctx->gb, 16) : 0; @@ -392,7 +392,7 @@ static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band, return 0; } - band->data_size = (ctx->frame_flags & 0x80) ? get_bits_long(&ctx->gb, 24) : 0; + band->data_size = (ctx->frame_flags & 0x80) ? get_bits(&ctx->gb, 24) : 0; band->inherit_mv = band_flags & 2; band->inherit_qdelta = band_flags & 8; diff --git a/libavcodec/intelh263dec.c b/libavcodec/intelh263dec.c index d321dd4dba..283fb1cb0a 100644 --- a/libavcodec/intelh263dec.c +++ b/libavcodec/intelh263dec.c @@ -33,7 +33,7 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s) } /* picture header */ - if (get_bits_long(&s->gb, 22) != 0x20) { + if (get_bits(&s->gb, 22) != 0x20) { av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n"); return -1; } diff --git a/libavcodec/internal.h b/libavcodec/internal.h index 9db3d36acb..5930dd0da0 100644 --- a/libavcodec/internal.h +++ b/libavcodec/internal.h @@ -392,6 +392,8 @@ AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx); int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type); +int ff_side_data_set_prft(AVPacket *pkt, int64_t timestamp); + /** * Check AVFrame for A53 side data and allocate and fill SEI message with A53 info * diff --git a/libavcodec/ivi.c b/libavcodec/ivi.c index 18192cbf23..05ec06c8bf 100644 --- a/libavcodec/ivi.c +++ b/libavcodec/ivi.c @@ -476,7 +476,7 @@ static int ivi_dec_tile_data_size(GetBitContext *gb) if (get_bits1(gb)) { len = get_bits(gb, 8); if (len == 255) - len = get_bits_long(gb, 24); + len = get_bits(gb, 24); } /* align the bitstream reader on the byte boundary */ @@ -1193,7 +1193,7 @@ int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, left = get_bits_count(&ctx->gb) & 0x18; skip_bits_long(&ctx->gb, 64 - left); if (get_bits_left(&ctx->gb) > 18 && - show_bits_long(&ctx->gb, 21) == 0xBFFF8) { // syncheader + inter type + show_bits(&ctx->gb, 21) == 0xBFFF8) { // syncheader + inter type AVPacket pkt; pkt.data = avpkt->data + (get_bits_count(&ctx->gb) >> 3); pkt.size = get_bits_left(&ctx->gb) >> 3; diff --git a/libavcodec/libkvazaar.c b/libavcodec/libkvazaar.c index a89ca7f749..02bcae3d5c 100644 --- a/libavcodec/libkvazaar.c +++ b/libavcodec/libkvazaar.c @@ -110,8 +110,8 @@ static av_cold int libkvazaar_init(AVCodecContext *avctx) entry->key, entry->value); } } - av_dict_free(&dict); } + av_dict_free(&dict); } ctx->encoder = enc = api->encoder_open(cfg); diff --git a/libavcodec/librav1e.c b/libavcodec/librav1e.c index a5aedd512c..b8b1b4f8f1 100644 --- a/libavcodec/librav1e.c +++ b/libavcodec/librav1e.c @@ -42,7 +42,7 @@ typedef struct librav1eContext { size_t pass_pos; int pass_size; - char *rav1e_opts; + AVDictionary *rav1e_opts; int quantizer; int speed; int tiles; @@ -244,17 +244,12 @@ static av_cold int librav1e_encode_init(AVCodecContext *avctx) } } - if (ctx->rav1e_opts) { - AVDictionary *dict = NULL; + { AVDictionaryEntry *en = NULL; - - if (!av_dict_parse_string(&dict, ctx->rav1e_opts, "=", ":", 0)) { - while (en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX)) { - int parse_ret = rav1e_config_parse(cfg, en->key, en->value); - if (parse_ret < 0) - av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value); - } - av_dict_free(&dict); + while ((en = av_dict_get(ctx->rav1e_opts, "", en, AV_DICT_IGNORE_SUFFIX))) { + int parse_ret = rav1e_config_parse(cfg, en->key, en->value); + if (parse_ret < 0) + av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value); } } @@ -538,7 +533,7 @@ static const AVOption options[] = { { "tiles", "number of tiles encode with", OFFSET(tiles), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE }, { "tile-rows", "number of tiles rows to encode with", OFFSET(tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE }, { "tile-columns", "number of tiles columns to encode with", OFFSET(tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE }, - { "rav1e-params", "set the rav1e configuration using a :-separated list of key=value parameters", OFFSET(rav1e_opts), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE }, + { "rav1e-params", "set the rav1e configuration using a :-separated list of key=value parameters", OFFSET(rav1e_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE }, { NULL } }; diff --git a/libavcodec/libvpxenc.c b/libavcodec/libvpxenc.c index 721a052641..0b8a070304 100644 --- a/libavcodec/libvpxenc.c +++ b/libavcodec/libvpxenc.c @@ -100,7 +100,7 @@ typedef struct VPxEncoderContext { int rc_undershoot_pct; int rc_overshoot_pct; - char *vp8_ts_parameters; + AVDictionary *vp8_ts_parameters; // VP9-only int lossless; @@ -757,19 +757,13 @@ FF_ENABLE_DEPRECATION_WARNINGS enccfg.g_error_resilient = ctx->error_resilient || ctx->flags & VP8F_ERROR_RESILIENT; - if (CONFIG_LIBVPX_VP8_ENCODER && avctx->codec_id == AV_CODEC_ID_VP8 && ctx->vp8_ts_parameters) { - AVDictionary *dict = NULL; + if (CONFIG_LIBVPX_VP8_ENCODER && avctx->codec_id == AV_CODEC_ID_VP8) { AVDictionaryEntry* en = NULL; - - if (!av_dict_parse_string(&dict, ctx->vp8_ts_parameters, "=", ":", 0)) { - while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) { - if (vp8_ts_param_parse(&enccfg, en->key, en->value) < 0) - av_log(avctx, AV_LOG_WARNING, - "Error parsing option '%s = %s'.\n", - en->key, en->value); - } - - av_dict_free(&dict); + while ((en = av_dict_get(ctx->vp8_ts_parameters, "", en, AV_DICT_IGNORE_SUFFIX))) { + if (vp8_ts_param_parse(&enccfg, en->key, en->value) < 0) + av_log(avctx, AV_LOG_WARNING, + "Error parsing option '%s = %s'.\n", + en->key, en->value); } } @@ -1047,8 +1041,7 @@ static int queue_frames(AVCodecContext *avctx, AVPacket *pkt_out) if (size < 0) return size; } else { - struct FrameListData *cx_frame = - av_malloc(sizeof(struct FrameListData)); + struct FrameListData *cx_frame = av_malloc(sizeof(*cx_frame)); if (!cx_frame) { av_log(avctx, AV_LOG_ERROR, @@ -1462,7 +1455,7 @@ static const AVOption vp8_options[] = { "frames (2-pass only)", OFFSET(auto_alt_ref), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 2, VE}, { "cpu-used", "Quality/Speed ratio modifier", OFFSET(cpu_used), AV_OPT_TYPE_INT, {.i64 = 1}, -16, 16, VE}, { "ts-parameters", "Temporal scaling configuration using a " - ":-separated list of key=value parameters", OFFSET(vp8_ts_parameters), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, VE}, + ":-separated list of key=value parameters", OFFSET(vp8_ts_parameters), AV_OPT_TYPE_DICT, {.str=NULL}, 0, 0, VE}, LEGACY_OPTIONS { NULL } }; diff --git a/libavcodec/libx264.c b/libavcodec/libx264.c index bfd91bb900..ca8f6c0873 100644 --- a/libavcodec/libx264.c +++ b/libavcodec/libx264.c @@ -25,6 +25,7 @@ #include "libavutil/mem.h" #include "libavutil/pixdesc.h" #include "libavutil/stereo3d.h" +#include "libavutil/time.h" #include "libavutil/intreadwrite.h" #include "avcodec.h" #include "internal.h" @@ -44,6 +45,11 @@ // blocks of pixels (with respect to the luma plane) #define MB_SIZE 16 +typedef struct X264Opaque { + int64_t reordered_opaque; + int64_t wallclock; +} X264Opaque; + typedef struct X264Context { AVClass *class; x264_param_t params; @@ -95,10 +101,10 @@ typedef struct X264Context { int scenechange_threshold; int noise_reduction; - char *x264_params; + AVDictionary *x264_params; int nb_reordered_opaque, next_reordered_opaque; - int64_t *reordered_opaque; + X264Opaque *reordered_opaque; /** * If the encoder does not support ROI then warn the first time we @@ -292,7 +298,8 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame, x264_picture_t pic_out = {0}; int pict_type; int bit_depth; - int64_t *out_opaque; + int64_t wallclock = 0; + X264Opaque *out_opaque; AVFrameSideData *sd; x264_picture_init( &x4->pic ); @@ -314,7 +321,8 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame, x4->pic.i_pts = frame->pts; - x4->reordered_opaque[x4->next_reordered_opaque] = frame->reordered_opaque; + x4->reordered_opaque[x4->next_reordered_opaque].reordered_opaque = frame->reordered_opaque; + x4->reordered_opaque[x4->next_reordered_opaque].wallclock = av_gettime(); x4->pic.opaque = &x4->reordered_opaque[x4->next_reordered_opaque]; x4->next_reordered_opaque++; x4->next_reordered_opaque %= x4->nb_reordered_opaque; @@ -443,7 +451,8 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame, out_opaque = pic_out.opaque; if (out_opaque >= x4->reordered_opaque && out_opaque < &x4->reordered_opaque[x4->nb_reordered_opaque]) { - ctx->reordered_opaque = *out_opaque; + ctx->reordered_opaque = out_opaque->reordered_opaque; + wallclock = out_opaque->wallclock; } else { // Unexpected opaque pointer on picture output ctx->reordered_opaque = 0; @@ -473,6 +482,8 @@ FF_ENABLE_DEPRECATION_WARNINGS pkt->flags |= AV_PKT_FLAG_KEY*pic_out.b_keyframe; if (ret) { ff_side_data_set_encoder_stats(pkt, (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA, NULL, 0, pict_type); + if (wallclock) + ff_side_data_set_prft(pkt, wallclock); #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS @@ -892,19 +903,14 @@ FF_ENABLE_DEPRECATION_WARNINGS } } - if (x4->x264_params) { - AVDictionary *dict = NULL; + + { AVDictionaryEntry *en = NULL; - - if (!av_dict_parse_string(&dict, x4->x264_params, "=", ":", 0)) { - while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) { - if (x264_param_parse(&x4->params, en->key, en->value) < 0) - av_log(avctx, AV_LOG_WARNING, - "Error parsing option '%s = %s'.\n", - en->key, en->value); - } - - av_dict_free(&dict); + while (en = av_dict_get(x4->x264_params, "", en, AV_DICT_IGNORE_SUFFIX)) { + if (x264_param_parse(&x4->params, en->key, en->value) < 0) + av_log(avctx, AV_LOG_WARNING, + "Error parsing option '%s = %s'.\n", + en->key, en->value); } } @@ -1116,7 +1122,7 @@ static const AVOption options[] = { { "sc_threshold", "Scene change threshold", OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX, VE }, { "noise_reduction", "Noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX, VE }, - { "x264-params", "Override the x264 configuration using a :-separated list of key=value parameters", OFFSET(x264_params), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE }, + { "x264-params", "Override the x264 configuration using a :-separated list of key=value parameters", OFFSET(x264_params), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE }, { NULL }, }; diff --git a/libavcodec/libx265.c b/libavcodec/libx265.c index 4e7507728f..9e4711e50b 100644 --- a/libavcodec/libx265.c +++ b/libavcodec/libx265.c @@ -42,11 +42,12 @@ typedef struct libx265Context { const x265_api *api; float crf; + int cqp; int forced_idr; char *preset; char *tune; char *profile; - char *x265_opts; + AVDictionary *x265_opts; /** * If the encoder does not support ROI then warn the first time we @@ -82,10 +83,41 @@ static av_cold int libx265_encode_close(AVCodecContext *avctx) return 0; } +static av_cold int libx265_param_parse_float(AVCodecContext *avctx, + const char *key, float value) +{ + libx265Context *ctx = avctx->priv_data; + char buf[256]; + + snprintf(buf, sizeof(buf), "%2.2f", value); + if (ctx->api->param_parse(ctx->params, key, buf) == X265_PARAM_BAD_VALUE) { + av_log(avctx, AV_LOG_ERROR, "Invalid value %2.2f for param \"%s\".\n", value, key); + return AVERROR(EINVAL); + } + + return 0; +} + +static av_cold int libx265_param_parse_int(AVCodecContext *avctx, + const char *key, int value) +{ + libx265Context *ctx = avctx->priv_data; + char buf[256]; + + snprintf(buf, sizeof(buf), "%d", value); + if (ctx->api->param_parse(ctx->params, key, buf) == X265_PARAM_BAD_VALUE) { + av_log(avctx, AV_LOG_ERROR, "Invalid value %d for param \"%s\".\n", value, key); + return AVERROR(EINVAL); + } + + return 0; +} + static av_cold int libx265_encode_init(AVCodecContext *avctx) { libx265Context *ctx = avctx->priv_data; AVCPBProperties *cpb_props = NULL; + int ret; ctx->api = x265_api_get(av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth); if (!ctx->api) @@ -159,6 +191,10 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx) // x265 validates the parameters internally ctx->params->vui.colorPrimaries = avctx->color_primaries; ctx->params->vui.transferCharacteristics = avctx->color_trc; +#if X265_BUILD >= 159 + if (avctx->color_trc == AVCOL_TRC_ARIB_STD_B67) + ctx->params->preferredTransferCharacteristics = ctx->params->vui.transferCharacteristics; +#endif ctx->params->vui.matrixCoeffs = avctx->colorspace; } @@ -222,6 +258,48 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx) } else if (avctx->bit_rate > 0) { ctx->params->rc.bitrate = avctx->bit_rate / 1000; ctx->params->rc.rateControlMode = X265_RC_ABR; + } else if (ctx->cqp >= 0) { + ret = libx265_param_parse_int(avctx, "qp", ctx->cqp); + if (ret < 0) + return ret; + } + +#if X265_BUILD >= 89 + if (avctx->qmin >= 0) { + ret = libx265_param_parse_int(avctx, "qpmin", avctx->qmin); + if (ret < 0) + return ret; + } + if (avctx->qmax >= 0) { + ret = libx265_param_parse_int(avctx, "qpmax", avctx->qmax); + if (ret < 0) + return ret; + } +#endif + if (avctx->max_qdiff >= 0) { + ret = libx265_param_parse_int(avctx, "qpstep", avctx->max_qdiff); + if (ret < 0) + return ret; + } + if (avctx->qblur >= 0) { + ret = libx265_param_parse_float(avctx, "qblur", avctx->qblur); + if (ret < 0) + return ret; + } + if (avctx->qcompress >= 0) { + ret = libx265_param_parse_float(avctx, "qcomp", avctx->qcompress); + if (ret < 0) + return ret; + } + if (avctx->i_quant_factor >= 0) { + ret = libx265_param_parse_float(avctx, "ipratio", avctx->i_quant_factor); + if (ret < 0) + return ret; + } + if (avctx->b_quant_factor >= 0) { + ret = libx265_param_parse_float(avctx, "pbratio", avctx->b_quant_factor); + if (ret < 0) + return ret; } ctx->params->rc.vbvBufferSize = avctx->rc_buffer_size / 1000; @@ -237,28 +315,44 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx) if (!(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) ctx->params->bRepeatHeaders = 1; - if (ctx->x265_opts) { - AVDictionary *dict = NULL; + if (avctx->gop_size >= 0) { + ret = libx265_param_parse_int(avctx, "keyint", avctx->gop_size); + if (ret < 0) + return ret; + } + if (avctx->keyint_min > 0) { + ret = libx265_param_parse_int(avctx, "min-keyint", avctx->keyint_min); + if (ret < 0) + return ret; + } + if (avctx->max_b_frames >= 0) { + ret = libx265_param_parse_int(avctx, "bframes", avctx->max_b_frames); + if (ret < 0) + return ret; + } + if (avctx->refs >= 0) { + ret = libx265_param_parse_int(avctx, "ref", avctx->refs); + if (ret < 0) + return ret; + } + + { AVDictionaryEntry *en = NULL; + while ((en = av_dict_get(ctx->x265_opts, "", en, AV_DICT_IGNORE_SUFFIX))) { + int parse_ret = ctx->api->param_parse(ctx->params, en->key, en->value); - if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) { - while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) { - int parse_ret = ctx->api->param_parse(ctx->params, en->key, en->value); - - switch (parse_ret) { - case X265_PARAM_BAD_NAME: - av_log(avctx, AV_LOG_WARNING, - "Unknown option: %s.\n", en->key); - break; - case X265_PARAM_BAD_VALUE: - av_log(avctx, AV_LOG_WARNING, - "Invalid value for %s: %s.\n", en->key, en->value); - break; - default: - break; - } + switch (parse_ret) { + case X265_PARAM_BAD_NAME: + av_log(avctx, AV_LOG_WARNING, + "Unknown option: %s.\n", en->key); + break; + case X265_PARAM_BAD_VALUE: + av_log(avctx, AV_LOG_WARNING, + "Invalid value for %s: %s.\n", en->key, en->value); + break; + default: + break; } - av_dict_free(&dict); } } @@ -383,6 +477,7 @@ static int libx265_encode_frame(AVCodecContext *avctx, AVPacket *pkt, x265_picture x265pic_out = { 0 }; x265_nal *nal; uint8_t *dst; + int pict_type; int payload = 0; int nnal; int ret; @@ -442,20 +537,23 @@ static int libx265_encode_frame(AVCodecContext *avctx, AVPacket *pkt, pkt->pts = x265pic_out.pts; pkt->dts = x265pic_out.dts; -#if FF_API_CODED_FRAME -FF_DISABLE_DEPRECATION_WARNINGS switch (x265pic_out.sliceType) { case X265_TYPE_IDR: case X265_TYPE_I: - avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; + pict_type = AV_PICTURE_TYPE_I; break; case X265_TYPE_P: - avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P; + pict_type = AV_PICTURE_TYPE_P; break; case X265_TYPE_B: - avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B; + case X265_TYPE_BREF: + pict_type = AV_PICTURE_TYPE_B; break; } + +#if FF_API_CODED_FRAME +FF_DISABLE_DEPRECATION_WARNINGS + avctx->coded_frame->pict_type = pict_type; FF_ENABLE_DEPRECATION_WARNINGS #endif @@ -466,6 +564,8 @@ FF_ENABLE_DEPRECATION_WARNINGS #endif pkt->flags |= AV_PKT_FLAG_DISPOSABLE; + ff_side_data_set_encoder_stats(pkt, x265pic_out.frameData.qp * FF_QP2LAMBDA, NULL, 0, pict_type); + *got_packet = 1; return 0; } @@ -535,11 +635,12 @@ static av_cold void libx265_encode_init_csp(AVCodec *codec) #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM static const AVOption options[] = { { "crf", "set the x265 crf", OFFSET(crf), AV_OPT_TYPE_FLOAT, { .dbl = -1 }, -1, FLT_MAX, VE }, + { "qp", "set the x265 qp", OFFSET(cqp), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, VE }, { "forced-idr", "if forcing keyframes, force them as IDR frames", OFFSET(forced_idr),AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, { "preset", "set the x265 preset", OFFSET(preset), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE }, { "tune", "set the x265 tune parameter", OFFSET(tune), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE }, { "profile", "set the x265 profile", OFFSET(profile), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE }, - { "x265-params", "set the x265 configuration using a :-separated list of key=value parameters", OFFSET(x265_opts), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE }, + { "x265-params", "set the x265 configuration using a :-separated list of key=value parameters", OFFSET(x265_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE }, { NULL } }; @@ -552,6 +653,17 @@ static const AVClass class = { static const AVCodecDefault x265_defaults[] = { { "b", "0" }, + { "bf", "-1" }, + { "g", "-1" }, + { "keyint_min", "-1" }, + { "refs", "-1" }, + { "qmin", "-1" }, + { "qmax", "-1" }, + { "qdiff", "-1" }, + { "qblur", "-1" }, + { "qcomp", "-1" }, + { "i_qfactor", "-1" }, + { "b_qfactor", "-1" }, { NULL }, }; diff --git a/libavcodec/libxavs2.c b/libavcodec/libxavs2.c index 2f8460ab8e..76b57e731e 100644 --- a/libavcodec/libxavs2.c +++ b/libavcodec/libxavs2.c @@ -48,7 +48,7 @@ typedef struct XAVS2EContext { int log_level; void *encoder; - char *xavs2_opts; + AVDictionary *xavs2_opts; xavs2_outpacket_t packet; xavs2_param_t *param; @@ -92,16 +92,10 @@ static av_cold int xavs2_init(AVCodecContext *avctx) xavs2_opt_set2("OpenGOP", "%d", !(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)); - if (cae->xavs2_opts) { - AVDictionary *dict = NULL; + { AVDictionaryEntry *en = NULL; - - if (!av_dict_parse_string(&dict, cae->xavs2_opts, "=", ":", 0)) { - while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) { - xavs2_opt_set2(en->key, "%s", en->value); - } - av_dict_free(&dict); - } + while ((en = av_dict_get(cae->xavs2_opts, "", en, AV_DICT_IGNORE_SUFFIX))) + xavs2_opt_set2(en->key, "%s", en->value); } /* Rate control */ @@ -267,7 +261,7 @@ static const AVOption options[] = { { "min_qp" , "min qp for rate control" , OFFSET(min_qp) , AV_OPT_TYPE_INT, {.i64 = 20 }, 0, 63, VE }, { "speed_level" , "Speed level, higher is better but slower", OFFSET(preset_level) , AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 9, VE }, { "log_level" , "log level: -1: none, 0: error, 1: warning, 2: info, 3: debug", OFFSET(log_level) , AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 3, VE }, - { "xavs2-params" , "set the xavs2 configuration using a :-separated list of key=value parameters", OFFSET(xavs2_opts), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE }, + { "xavs2-params" , "set the xavs2 configuration using a :-separated list of key=value parameters", OFFSET(xavs2_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE }, { NULL }, }; diff --git a/libavcodec/midivid.c b/libavcodec/midivid.c index 38465c5fbc..bb5105bd57 100644 --- a/libavcodec/midivid.c +++ b/libavcodec/midivid.c @@ -63,7 +63,7 @@ static int decode_mvdv(MidiVidContext *s, AVCodecContext *avctx, AVFrame *frame) if (intra_flag) { nb_blocks = (avctx->width / 2) * (avctx->height / 2); } else { - int skip_linesize; + int ret, skip_linesize; nb_blocks = bytestream2_get_le32(gb); skip_linesize = avctx->width >> 1; @@ -73,7 +73,9 @@ static int decode_mvdv(MidiVidContext *s, AVCodecContext *avctx, AVFrame *frame) if (bytestream2_get_bytes_left(gb) < mask_size) return AVERROR_INVALIDDATA; - init_get_bits8(&mask, mask_start, mask_size); + ret = init_get_bits8(&mask, mask_start, mask_size); + if (ret < 0) + return ret; bytestream2_skip(gb, mask_size); skip = s->skip; diff --git a/libavcodec/mips/h264pred_init_mips.c b/libavcodec/mips/h264pred_init_mips.c index 63637b8732..e537ad8bd4 100644 --- a/libavcodec/mips/h264pred_init_mips.c +++ b/libavcodec/mips/h264pred_init_mips.c @@ -73,10 +73,7 @@ static av_cold void h264_pred_init_msa(H264PredContext *h, int codec_id, switch (codec_id) { case AV_CODEC_ID_SVQ3: - ; - break; case AV_CODEC_ID_RV40: - ; break; case AV_CODEC_ID_VP7: case AV_CODEC_ID_VP8: diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c index 8b053613fc..d5e7c21610 100644 --- a/libavcodec/mjpegdec.c +++ b/libavcodec/mjpegdec.c @@ -2049,7 +2049,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s) unsigned nummarkers; id = get_bits_long(&s->gb, 32); - id2 = get_bits_long(&s->gb, 24); + id2 = get_bits(&s->gb, 24); len -= 7; if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) { av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n"); diff --git a/libavcodec/mlp_parse.c b/libavcodec/mlp_parse.c index 3a71f2c0b7..45715352c2 100644 --- a/libavcodec/mlp_parse.c +++ b/libavcodec/mlp_parse.c @@ -102,7 +102,7 @@ int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb) return AVERROR_INVALIDDATA; } - if (get_bits_long(gb, 24) != 0xf8726f) /* Sync words */ + if (get_bits(gb, 24) != 0xf8726f) /* Sync words */ return AVERROR_INVALIDDATA; mh->stream_type = get_bits(gb, 8); diff --git a/libavcodec/mlp_parser.c b/libavcodec/mlp_parser.c index b5d3ff7226..5d2ddc5a70 100644 --- a/libavcodec/mlp_parser.c +++ b/libavcodec/mlp_parser.c @@ -61,6 +61,8 @@ static int mlp_parse(AVCodecParserContext *s, int ret; int i, p = 0; + s->key_frame = 0; + *poutbuf_size = 0; if (buf_size == 0) return 0; @@ -136,6 +138,8 @@ static int mlp_parse(AVCodecParserContext *s, * access unit header and all the 2- or 4-byte substream headers. */ // Only check when this isn't a sync frame - syncs have a checksum. + s->key_frame = 0; + parity_bits = 0; for (i = -1; i < mp->num_substreams; i++) { parity_bits ^= buf[p++]; @@ -159,6 +163,8 @@ static int mlp_parse(AVCodecParserContext *s, if (ff_mlp_read_major_sync(avctx, &mh, &gb) < 0) goto lost_sync; + s->key_frame = 1; + avctx->bits_per_raw_sample = mh.group1_bits; if (avctx->bits_per_raw_sample > 16) avctx->sample_fmt = AV_SAMPLE_FMT_S32; diff --git a/libavcodec/mp3_header_decompress_bsf.c b/libavcodec/mp3_header_decompress_bsf.c index 294858953c..ab3d420300 100644 --- a/libavcodec/mp3_header_decompress_bsf.c +++ b/libavcodec/mp3_header_decompress_bsf.c @@ -62,6 +62,11 @@ static int mp3_header_decompress(AVBSFContext *ctx, AVPacket *out) lsf = sample_rate < (24000+32000)/2; mpeg25 = sample_rate < (12000+16000)/2; sample_rate_index= (header>>10)&3; + if (sample_rate_index == 3) { + ret = AVERROR_INVALIDDATA; + goto fail; + } + sample_rate= avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25); //in case sample rate is a little off for(bitrate_index=2; bitrate_index<30; bitrate_index++){ diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c index 775579f9f0..17f9495a1d 100644 --- a/libavcodec/mpeg12dec.c +++ b/libavcodec/mpeg12dec.c @@ -1669,8 +1669,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size) return AVERROR_INVALIDDATA; } - if (s->avctx->hwaccel && - (s->avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD)) { + if (s->avctx->hwaccel) { if ((ret = s->avctx->hwaccel->end_frame(s->avctx)) < 0) { av_log(avctx, AV_LOG_ERROR, "hardware accelerator failed to decode first field\n"); diff --git a/libavcodec/mpeg4audio.c b/libavcodec/mpeg4audio.c index e4d6716f7f..0d83fb8d25 100644 --- a/libavcodec/mpeg4audio.c +++ b/libavcodec/mpeg4audio.c @@ -118,8 +118,8 @@ int ff_mpeg4audio_get_config_gb(MPEG4AudioConfig *c, GetBitContext *gb, if (c->object_type == AOT_ALS) { skip_bits(gb, 5); - if (show_bits_long(gb, 24) != MKBETAG('\0','A','L','S')) - skip_bits_long(gb, 24); + if (show_bits(gb, 24) != MKBETAG('\0','A','L','S')) + skip_bits(gb, 24); specific_config_bitindex = get_bits_count(gb); diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c index 055afabc7e..cc03486646 100644 --- a/libavcodec/mpeg4videodec.c +++ b/libavcodec/mpeg4videodec.c @@ -711,7 +711,7 @@ static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx) int i; do { - if (show_bits_long(&s->gb, 19) == DC_MARKER) + if (show_bits(&s->gb, 19) == DC_MARKER) return mb_num - 1; cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2); @@ -1001,7 +1001,7 @@ int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx) if (s->pict_type == AV_PICTURE_TYPE_I) { while (show_bits(&s->gb, 9) == 1) skip_bits(&s->gb, 9); - if (get_bits_long(&s->gb, 19) != DC_MARKER) { + if (get_bits(&s->gb, 19) != DC_MARKER) { av_log(s->avctx, AV_LOG_ERROR, "marker missing after first I partition at %d %d\n", s->mb_x, s->mb_y); @@ -1782,7 +1782,7 @@ static void next_start_code_studio(GetBitContext *gb) { align_get_bits(gb); - while (get_bits_left(gb) >= 24 && show_bits_long(gb, 24) != 0x1) { + while (get_bits_left(gb) >= 24 && show_bits(gb, 24) != 0x1) { get_bits(gb, 8); } } diff --git a/libavcodec/mss2.c b/libavcodec/mss2.c index d977cb1160..9434a740a7 100644 --- a/libavcodec/mss2.c +++ b/libavcodec/mss2.c @@ -412,8 +412,6 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, ff_mpeg_er_frame_start(s); - v->bits = buf_size * 8; - v->end_mb_x = (w + 15) >> 4; s->end_mb_y = (h + 15) >> 4; if (v->respic & 1) diff --git a/libavcodec/mvha.c b/libavcodec/mvha.c index c270063b1c..afe5e511f2 100644 --- a/libavcodec/mvha.c +++ b/libavcodec/mvha.c @@ -161,6 +161,9 @@ static int decode_frame(AVCodecContext *avctx, type = AV_RB32(avpkt->data); size = AV_RL32(avpkt->data + 4); + if (size < 1 || size >= avpkt->size) + return AVERROR_INVALIDDATA; + if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) return ret; diff --git a/libavcodec/noise_bsf.c b/libavcodec/noise_bsf.c index d79f63b777..721fd217ec 100644 --- a/libavcodec/noise_bsf.c +++ b/libavcodec/noise_bsf.c @@ -39,7 +39,7 @@ static int noise(AVBSFContext *ctx, AVPacket *pkt) { NoiseContext *s = ctx->priv_data; int amount = s->amount > 0 ? s->amount : (s->state % 10001 + 1); - int i, ret = 0; + int i, ret; if (amount <= 0) return AVERROR(EINVAL); @@ -55,19 +55,18 @@ static int noise(AVBSFContext *ctx, AVPacket *pkt) } ret = av_packet_make_writable(pkt); - if (ret < 0) - goto fail; + if (ret < 0) { + av_packet_unref(pkt); + return ret; + } for (i = 0; i < pkt->size; i++) { s->state += pkt->data[i] + 1; if (s->state % amount == 0) pkt->data[i] = s->state; } -fail: - if (ret < 0) - av_packet_unref(pkt); - return ret; + return 0; } #define OFFSET(x) offsetof(NoiseContext, x) diff --git a/libavcodec/nvdec_mpeg12.c b/libavcodec/nvdec_mpeg12.c index 300e1d3d88..9a9030d8d3 100644 --- a/libavcodec/nvdec_mpeg12.c +++ b/libavcodec/nvdec_mpeg12.c @@ -50,6 +50,10 @@ static int nvdec_mpeg12_start_frame(AVCodecContext *avctx, const uint8_t *buffer .FrameHeightInMbs = (cur_frame->height + 15) / 16, .CurrPicIdx = cf->idx, + .field_pic_flag = s->picture_structure != PICT_FRAME, + .bottom_field_flag = s->picture_structure == PICT_BOTTOM_FIELD, + .second_field = s->picture_structure != PICT_FRAME && !s->first_field, + .intra_pic_flag = s->pict_type == AV_PICTURE_TYPE_I, .ref_pic_flag = s->pict_type == AV_PICTURE_TYPE_I || s->pict_type == AV_PICTURE_TYPE_P, diff --git a/libavcodec/nvenc.c b/libavcodec/nvenc.c index 310e30805d..9a96bf2bba 100644 --- a/libavcodec/nvenc.c +++ b/libavcodec/nvenc.c @@ -2262,3 +2262,8 @@ int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt, return 0; } + +av_cold void ff_nvenc_encode_flush(AVCodecContext *avctx) +{ + ff_nvenc_send_frame(avctx, NULL); +} diff --git a/libavcodec/nvenc.h b/libavcodec/nvenc.h index a269bd97bb..c44c81e675 100644 --- a/libavcodec/nvenc.h +++ b/libavcodec/nvenc.h @@ -214,6 +214,8 @@ int ff_nvenc_receive_packet(AVCodecContext *avctx, AVPacket *pkt); int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet); +void ff_nvenc_encode_flush(AVCodecContext *avctx); + extern const enum AVPixelFormat ff_nvenc_pix_fmts[]; #endif /* AVCODEC_NVENC_H */ diff --git a/libavcodec/nvenc_h264.c b/libavcodec/nvenc_h264.c index d5c7370aaa..479155fe15 100644 --- a/libavcodec/nvenc_h264.c +++ b/libavcodec/nvenc_h264.c @@ -240,6 +240,7 @@ AVCodec ff_h264_nvenc_encoder = { .receive_packet = ff_nvenc_receive_packet, .encode2 = ff_nvenc_encode_frame, .close = ff_nvenc_encode_close, + .flush = ff_nvenc_encode_flush, .priv_data_size = sizeof(NvencContext), .priv_class = &h264_nvenc_class, .defaults = defaults, diff --git a/libavcodec/nvenc_hevc.c b/libavcodec/nvenc_hevc.c index c668b97f86..7c9b3848f1 100644 --- a/libavcodec/nvenc_hevc.c +++ b/libavcodec/nvenc_hevc.c @@ -198,6 +198,7 @@ AVCodec ff_hevc_nvenc_encoder = { .receive_packet = ff_nvenc_receive_packet, .encode2 = ff_nvenc_encode_frame, .close = ff_nvenc_encode_close, + .flush = ff_nvenc_encode_flush, .priv_data_size = sizeof(NvencContext), .priv_class = &hevc_nvenc_class, .defaults = defaults, diff --git a/libavcodec/options_table.h b/libavcodec/options_table.h index 4b9f3fff2b..d4c0cdeb48 100644 --- a/libavcodec/options_table.h +++ b/libavcodec/options_table.h @@ -141,8 +141,8 @@ static const AVOption avcodec_options[] = { {"explode", "abort decoding on minor error detection", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_EXPLODE }, INT_MIN, INT_MAX, A|V|D, "err_detect"}, {"ignore_err", "ignore errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_IGNORE_ERR }, INT_MIN, INT_MAX, A|V|D, "err_detect"}, {"careful", "consider things that violate the spec, are fast to check and have not been seen in the wild as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CAREFUL }, INT_MIN, INT_MAX, A|V|D, "err_detect"}, -{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT }, INT_MIN, INT_MAX, A|V|D, "err_detect"}, -{"aggressive", "consider things that a sane encoder should not do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE }, INT_MIN, INT_MAX, A|V|D, "err_detect"}, +{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT | AV_EF_CAREFUL }, INT_MIN, INT_MAX, A|V|D, "err_detect"}, +{"aggressive", "consider things that a sane encoder should not do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE | AV_EF_COMPLIANT | AV_EF_CAREFUL}, INT_MIN, INT_MAX, A|V|D, "err_detect"}, {"has_b_frames", NULL, OFFSET(has_b_frames), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX}, {"block_align", NULL, OFFSET(block_align), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX}, #if FF_API_PRIVATE_OPT diff --git a/libavcodec/pgssubdec.c b/libavcodec/pgssubdec.c index 8c10f6d573..7fadcb8b4b 100644 --- a/libavcodec/pgssubdec.c +++ b/libavcodec/pgssubdec.c @@ -691,8 +691,11 @@ static int decode(AVCodecContext *avctx, void *data, int *data_size, ret = AVERROR_INVALIDDATA; break; } - if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) + if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) { + avsubtitle_free(data); + *data_size = 0; return ret; + } buf += segment_length; } diff --git a/libavcodec/pnm.c b/libavcodec/pnm.c index b4e5d3076b..b5c2881948 100644 --- a/libavcodec/pnm.c +++ b/libavcodec/pnm.c @@ -117,6 +117,9 @@ int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s) return AVERROR_INVALIDDATA; } } + if (!pnm_space(s->bytestream[-1])) + return AVERROR_INVALIDDATA; + /* check that all tags are present */ if (w <= 0 || h <= 0 || maxval <= 0 || maxval > UINT16_MAX || depth <= 0 || tuple_type[0] == '\0' || av_image_check_size(w, h, 0, avctx) || s->bytestream >= s->bytestream_end) @@ -197,6 +200,10 @@ int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s) } }else s->maxval=1; + + if (!pnm_space(s->bytestream[-1])) + return AVERROR_INVALIDDATA; + /* more check if YUV420 */ if (av_pix_fmt_desc_get(avctx->pix_fmt)->flags & AV_PIX_FMT_FLAG_PLANAR) { if ((avctx->width & 1) != 0) diff --git a/libavcodec/pnmdec.c b/libavcodec/pnmdec.c index 958c5e43b0..dbcaef3884 100644 --- a/libavcodec/pnmdec.c +++ b/libavcodec/pnmdec.c @@ -143,7 +143,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data, v = (*s->bytestream++)&1; } else { /* read a sequence of digits */ - for (k = 0; k < 5 && c <= 9; k += 1) { + for (k = 0; k < 6 && c <= 9; k += 1) { v = 10*v + c; c = (*s->bytestream++) - '0'; } diff --git a/libavcodec/proresenc_anatoliy.c b/libavcodec/proresenc_anatoliy.c index 0fc79fc1de..1fcb0ae913 100644 --- a/libavcodec/proresenc_anatoliy.c +++ b/libavcodec/proresenc_anatoliy.c @@ -224,7 +224,7 @@ static void encode_codeword(PutBitContext *pb, int val, int codebook) } #define QSCALE(qmat,ind,val) ((val) / ((qmat)[ind])) -#define TO_GOLOMB(val) (((val) << 1) ^ ((val) >> 31)) +#define TO_GOLOMB(val) (((val) * 2) ^ ((val) >> 31)) #define DIFF_SIGN(val, sign) (((val) >> 31) ^ (sign)) #define IS_NEGATIVE(val) ((((val) >> 31) ^ -1) + 1) #define TO_GOLOMB2(val,sign) ((val)==0 ? 0 : ((val) << 1) + (sign)) diff --git a/libavcodec/qsv.c b/libavcodec/qsv.c index b00e427435..db98c75073 100644 --- a/libavcodec/qsv.c +++ b/libavcodec/qsv.c @@ -72,58 +72,6 @@ int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id) return AVERROR(ENOSYS); } - -static const struct { - enum AVCodecID codec_id; - int codec_profile; - int mfx_profile; -} qsv_profile_map[] = { -#define MAP(c, p, v) { AV_CODEC_ID_ ## c, FF_PROFILE_ ## p, MFX_PROFILE_ ## v } - MAP(MPEG2VIDEO, MPEG2_SIMPLE, MPEG2_SIMPLE ), - MAP(MPEG2VIDEO, MPEG2_MAIN, MPEG2_MAIN ), - MAP(MPEG2VIDEO, MPEG2_HIGH, MPEG2_HIGH ), - - MAP(H264, H264_BASELINE, AVC_BASELINE ), - MAP(H264, H264_CONSTRAINED_BASELINE, AVC_BASELINE), -#if QSV_VERSION_ATLEAST(1, 3) - MAP(H264, H264_EXTENDED, AVC_EXTENDED ), -#endif - MAP(H264, H264_MAIN, AVC_MAIN ), - MAP(H264, H264_HIGH, AVC_HIGH ), - MAP(H264, H264_HIGH_422, AVC_HIGH_422 ), - -#if QSV_VERSION_ATLEAST(1, 8) - MAP(HEVC, HEVC_MAIN, HEVC_MAIN ), - MAP(HEVC, HEVC_MAIN_10, HEVC_MAIN10 ), - MAP(HEVC, HEVC_MAIN_STILL_PICTURE, HEVC_MAINSP ), -#endif -#if QSV_VERSION_ATLEAST(1, 16) - MAP(HEVC, HEVC_REXT, HEVC_REXT ), -#endif - - MAP(VC1, VC1_SIMPLE, VC1_SIMPLE ), - MAP(VC1, VC1_MAIN, VC1_MAIN ), - MAP(VC1, VC1_COMPLEX, VC1_ADVANCED ), - MAP(VC1, VC1_ADVANCED, VC1_ADVANCED ), -#undef MAP -}; - -int ff_qsv_profile_to_mfx(enum AVCodecID codec_id, int profile) -{ - int i; - if (profile == FF_PROFILE_UNKNOWN) - return MFX_PROFILE_UNKNOWN; - - for (i = 0; i < FF_ARRAY_ELEMS(qsv_profile_map); i++) { - if (qsv_profile_map[i].codec_id != codec_id) - continue; - if (qsv_profile_map[i].codec_profile == profile) - return qsv_profile_map[i].mfx_profile; - } - - return MFX_PROFILE_UNKNOWN; -} - int ff_qsv_level_to_mfx(enum AVCodecID codec_id, int level) { if (level == FF_LEVEL_UNKNOWN) diff --git a/libavcodec/qsv_internal.h b/libavcodec/qsv_internal.h index 37559270e5..6489836a67 100644 --- a/libavcodec/qsv_internal.h +++ b/libavcodec/qsv_internal.h @@ -116,7 +116,6 @@ int ff_qsv_print_warning(void *log_ctx, mfxStatus err, const char *warning_string); int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id); -int ff_qsv_profile_to_mfx(enum AVCodecID codec_id, int profile); int ff_qsv_level_to_mfx(enum AVCodecID codec_id, int level); enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc); diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c index 0d34021b42..fc25dc73e5 100644 --- a/libavcodec/qsvdec.c +++ b/libavcodec/qsvdec.c @@ -74,7 +74,7 @@ static int ff_qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, A break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n"); - return AVERROR(ENOMEM); + return AVERROR(EINVAL); } frame->linesize[1] = frame->linesize[0]; @@ -99,9 +99,11 @@ static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession ses int ret; if (q->gpu_copy == MFX_GPUCOPY_ON && - !(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) + !(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) { av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy " - "only works in MFX_IOPATTERN_OUT_SYSTEM_MEMORY.\n"); + "only works in system memory mode.\n"); + q->gpu_copy = MFX_GPUCOPY_OFF; + } if (session) { q->session = session; } else if (hw_frames_ref) { diff --git a/libavcodec/qsvenc.c b/libavcodec/qsvenc.c index 4f103b9ff6..9e416500e9 100644 --- a/libavcodec/qsvenc.c +++ b/libavcodec/qsvenc.c @@ -139,6 +139,9 @@ static void dump_video_param(AVCodecContext *avctx, QSVEncContext *q, #if QSV_HAVE_CO3 mfxExtCodingOption3 *co3 = (mfxExtCodingOption3*)coding_opts[2]; #endif +#if QSV_HAVE_EXT_HEVC_TILES + mfxExtHEVCTiles *exthevctiles = (mfxExtHEVCTiles *)coding_opts[3 + QSV_HAVE_CO_VPS]; +#endif av_log(avctx, AV_LOG_VERBOSE, "profile: %s; level: %"PRIu16"\n", print_profile(info->CodecProfile), info->CodecLevel); @@ -204,6 +207,12 @@ static void dump_video_param(AVCodecContext *avctx, QSVEncContext *q, av_log(avctx, AV_LOG_VERBOSE, "RateDistortionOpt: %s\n", print_threestate(co->RateDistortionOpt)); +#if QSV_HAVE_EXT_HEVC_TILES + if (avctx->codec_id == AV_CODEC_ID_HEVC) + av_log(avctx, AV_LOG_VERBOSE, "NumTileColumns: %"PRIu16"; NumTileRows: %"PRIu16"\n", + exthevctiles->NumTileColumns, exthevctiles->NumTileRows); +#endif + #if QSV_HAVE_CO2 av_log(avctx, AV_LOG_VERBOSE, "RecoveryPointSEI: %s IntRefType: %"PRIu16"; IntRefCycleSize: %"PRIu16"; IntRefQPDelta: %"PRId16"\n", @@ -771,6 +780,16 @@ FF_ENABLE_DEPRECATION_WARNINGS } #endif +#if QSV_HAVE_EXT_HEVC_TILES + if (avctx->codec_id == AV_CODEC_ID_HEVC) { + q->exthevctiles.Header.BufferId = MFX_EXTBUFF_HEVC_TILES; + q->exthevctiles.Header.BufferSz = sizeof(q->exthevctiles); + q->exthevctiles.NumTileColumns = q->tile_cols; + q->exthevctiles.NumTileRows = q->tile_rows; + q->extparam_internal[q->nb_extparam_internal++] = (mfxExtBuffer *)&q->exthevctiles; + } +#endif + if (!check_enc_param(avctx,q)) { av_log(avctx, AV_LOG_ERROR, "some encoding parameters are not supported by the QSV " @@ -889,7 +908,14 @@ static int qsv_retrieve_enc_params(AVCodecContext *avctx, QSVEncContext *q) }; #endif - mfxExtBuffer *ext_buffers[2 + QSV_HAVE_CO2 + QSV_HAVE_CO3 + QSV_HAVE_CO_VPS]; +#if QSV_HAVE_EXT_HEVC_TILES + mfxExtHEVCTiles hevc_tile_buf = { + .Header.BufferId = MFX_EXTBUFF_HEVC_TILES, + .Header.BufferSz = sizeof(hevc_tile_buf), + }; +#endif + + mfxExtBuffer *ext_buffers[2 + QSV_HAVE_CO2 + QSV_HAVE_CO3 + QSV_HAVE_CO_VPS + QSV_HAVE_EXT_HEVC_TILES]; int need_pps = avctx->codec_id != AV_CODEC_ID_MPEG2VIDEO; int ret, ext_buf_num = 0, extradata_offset = 0; @@ -907,6 +933,10 @@ static int qsv_retrieve_enc_params(AVCodecContext *avctx, QSVEncContext *q) if (q->hevc_vps) ext_buffers[ext_buf_num++] = (mfxExtBuffer*)&extradata_vps; #endif +#if QSV_HAVE_EXT_HEVC_TILES + if (avctx->codec_id == AV_CODEC_ID_HEVC) + ext_buffers[ext_buf_num++] = (mfxExtBuffer*)&hevc_tile_buf; +#endif q->param.ExtParam = ext_buffers; q->param.NumExtParam = ext_buf_num; diff --git a/libavcodec/qsvenc.h b/libavcodec/qsvenc.h index ee35582075..6609171af3 100644 --- a/libavcodec/qsvenc.h +++ b/libavcodec/qsvenc.h @@ -38,6 +38,7 @@ #define QSV_HAVE_CO3 QSV_VERSION_ATLEAST(1, 11) #define QSV_HAVE_CO_VPS QSV_VERSION_ATLEAST(1, 17) +#define QSV_HAVE_EXT_HEVC_TILES QSV_VERSION_ATLEAST(1, 13) #define QSV_HAVE_EXT_VP9_PARAM QSV_VERSION_ATLEAST(1, 26) #define QSV_HAVE_TRELLIS QSV_VERSION_ATLEAST(1, 8) @@ -124,6 +125,9 @@ typedef struct QSVEncContext { mfxExtMultiFrameParam extmfp; mfxExtMultiFrameControl extmfc; #endif +#if QSV_HAVE_EXT_HEVC_TILES + mfxExtHEVCTiles exthevctiles; +#endif #if QSV_HAVE_EXT_VP9_PARAM mfxExtVP9Param extvp9param; #endif @@ -161,6 +165,9 @@ typedef struct QSVEncContext { int max_frame_size; int max_slice_size; + int tile_cols; + int tile_rows; + int aud; int single_sei_nal_unit; diff --git a/libavcodec/qsvenc_hevc.c b/libavcodec/qsvenc_hevc.c index da64b4c21b..27e2232a9f 100644 --- a/libavcodec/qsvenc_hevc.c +++ b/libavcodec/qsvenc_hevc.c @@ -243,6 +243,9 @@ static const AVOption options[] = { { "gpb", "1: GPB (generalized P/B frame); 0: regular P frame", OFFSET(qsv.gpb), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE}, + { "tile_cols", "Number of columns for tiled encoding", OFFSET(qsv.tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE }, + { "tile_rows", "Number of rows for tiled encoding", OFFSET(qsv.tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE }, + { NULL }, }; diff --git a/libavcodec/simple_idct_template.c b/libavcodec/simple_idct_template.c index d8fcfd7c53..5ddd0b45a2 100644 --- a/libavcodec/simple_idct_template.c +++ b/libavcodec/simple_idct_template.c @@ -121,7 +121,7 @@ static inline void FUNC6(idctRowCondDC)(idctin *row, int extra_shift) // TODO: Add DC-only support for int32_t input #if IN_IDCT_DEPTH == 16 #if HAVE_FAST_64BIT -#define ROW0_MASK (0xffffLL << 48 * HAVE_BIGENDIAN) +#define ROW0_MASK (0xffffULL << 48 * HAVE_BIGENDIAN) if (((AV_RN64A(row) & ~ROW0_MASK) | AV_RN64A(row+4)) == 0) { uint64_t temp; if (DC_SHIFT - extra_shift >= 0) { diff --git a/libavcodec/sonic.c b/libavcodec/sonic.c index 219412eb77..c975774b04 100644 --- a/libavcodec/sonic.c +++ b/libavcodec/sonic.c @@ -144,6 +144,8 @@ static inline av_flatten int get_symbol(RangeCoder *c, uint8_t *state, int is_si e= 0; while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10 e++; + if (e > 31) + return AVERROR_INVALIDDATA; } a= 1; diff --git a/libavcodec/targa.c b/libavcodec/targa.c index 93e0ef7905..a61fef1d7b 100644 --- a/libavcodec/targa.c +++ b/libavcodec/targa.c @@ -132,12 +132,6 @@ static int decode_frame(AVCodecContext *avctx, h = bytestream2_get_le16(&s->gb); bpp = bytestream2_get_byte(&s->gb); - if (bytestream2_get_bytes_left(&s->gb) <= idlen) { - av_log(avctx, AV_LOG_ERROR, - "Not enough data to read header\n"); - return AVERROR_INVALIDDATA; - } - flags = bytestream2_get_byte(&s->gb); if (!pal && (first_clr || colors || csize)) { @@ -146,6 +140,12 @@ static int decode_frame(AVCodecContext *avctx, first_clr = colors = csize = 0; } + if (bytestream2_get_bytes_left(&s->gb) < idlen + 2*colors) { + av_log(avctx, AV_LOG_ERROR, + "Not enough data to read header\n"); + return AVERROR_INVALIDDATA; + } + // skip identifier if any bytestream2_skip(&s->gb, idlen); diff --git a/libavcodec/tiff.c b/libavcodec/tiff.c index 636614aa28..e8357114de 100644 --- a/libavcodec/tiff.c +++ b/libavcodec/tiff.c @@ -1218,6 +1218,8 @@ static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den) static int tiff_decode_tag(TiffContext *s, AVFrame *frame) { + AVFrameSideData *sd; + GetByteContext gb_temp; unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1 int i, start; int pos; @@ -1643,6 +1645,22 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame) } } break; + case TIFF_ICC_PROFILE: + if (type != TIFF_UNDEFINED) + return AVERROR_INVALIDDATA; + + gb_temp = s->gb; + bytestream2_seek(&gb_temp, SEEK_SET, off); + + if (bytestream2_get_bytes_left(&gb_temp) < count) + return AVERROR_INVALIDDATA; + + sd = av_frame_new_side_data(frame, AV_FRAME_DATA_ICC_PROFILE, count); + if (!sd) + return AVERROR(ENOMEM); + + bytestream2_get_bufferu(&gb_temp, sd->data, count); + break; case TIFF_ARTIST: ADD_METADATA(count, "artist", NULL); break; diff --git a/libavcodec/tiff.h b/libavcodec/tiff.h index 2184c2c829..c07a5d4fa9 100644 --- a/libavcodec/tiff.h +++ b/libavcodec/tiff.h @@ -92,6 +92,7 @@ enum TiffTags { TIFF_MODEL_TIEPOINT = 0x8482, TIFF_MODEL_PIXEL_SCALE = 0x830E, TIFF_MODEL_TRANSFORMATION= 0x8480, + TIFF_ICC_PROFILE = 0x8773, TIFF_GEO_KEY_DIRECTORY = 0x87AF, TIFF_GEO_DOUBLE_PARAMS = 0x87B0, TIFF_GEO_ASCII_PARAMS = 0x87B1, diff --git a/libavcodec/truemotion2.c b/libavcodec/truemotion2.c index 7d70746339..d90a8baff3 100644 --- a/libavcodec/truemotion2.c +++ b/libavcodec/truemotion2.c @@ -155,7 +155,7 @@ static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code) huff.val_bits = get_bits(&ctx->gb, 5); huff.max_bits = get_bits(&ctx->gb, 5); huff.min_bits = get_bits(&ctx->gb, 5); - huff.nodes = get_bits_long(&ctx->gb, 17); + huff.nodes = get_bits(&ctx->gb, 17); huff.num = 0; /* check for correct codes parameters */ diff --git a/libavcodec/twinvqdec.c b/libavcodec/twinvqdec.c index c2353f51b5..c00ebb2ad5 100644 --- a/libavcodec/twinvqdec.c +++ b/libavcodec/twinvqdec.c @@ -404,7 +404,7 @@ static av_cold int twinvq_decode_init(AVCodecContext *avctx) tctx->frame_size = avctx->bit_rate * tctx->mtab->size / avctx->sample_rate + 8; tctx->is_6kbps = 0; - if (avctx->block_align && avctx->block_align * 8 / tctx->frame_size > 1) { + if (avctx->block_align && avctx->block_align * 8LL / tctx->frame_size > 1) { av_log(avctx, AV_LOG_ERROR, "VQF TwinVQ should have only one frame per packet\n"); return AVERROR_INVALIDDATA; diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 75e7035b8a..ab48754a64 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -551,6 +551,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code int codec_init_ok = 0; AVDictionary *tmp = NULL; const AVPixFmtDescriptor *pixdesc; + AVCodecInternal *avci; if (avcodec_is_open(avctx)) return 0; @@ -575,55 +576,56 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code ff_lock_avcodec(avctx, codec); - avctx->internal = av_mallocz(sizeof(*avctx->internal)); - if (!avctx->internal) { + avci = av_mallocz(sizeof(*avci)); + if (!avci) { ret = AVERROR(ENOMEM); goto end; } + avctx->internal = avci; - avctx->internal->pool = av_mallocz(sizeof(*avctx->internal->pool)); - if (!avctx->internal->pool) { + avci->pool = av_mallocz(sizeof(*avci->pool)); + if (!avci->pool) { ret = AVERROR(ENOMEM); goto free_and_end; } - avctx->internal->to_free = av_frame_alloc(); - if (!avctx->internal->to_free) { + avci->to_free = av_frame_alloc(); + if (!avci->to_free) { ret = AVERROR(ENOMEM); goto free_and_end; } - avctx->internal->compat_decode_frame = av_frame_alloc(); - if (!avctx->internal->compat_decode_frame) { + avci->compat_decode_frame = av_frame_alloc(); + if (!avci->compat_decode_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } - avctx->internal->buffer_frame = av_frame_alloc(); - if (!avctx->internal->buffer_frame) { + avci->buffer_frame = av_frame_alloc(); + if (!avci->buffer_frame) { ret = AVERROR(ENOMEM); goto free_and_end; } - avctx->internal->buffer_pkt = av_packet_alloc(); - if (!avctx->internal->buffer_pkt) { + avci->buffer_pkt = av_packet_alloc(); + if (!avci->buffer_pkt) { ret = AVERROR(ENOMEM); goto free_and_end; } - avctx->internal->ds.in_pkt = av_packet_alloc(); - if (!avctx->internal->ds.in_pkt) { + avci->ds.in_pkt = av_packet_alloc(); + if (!avci->ds.in_pkt) { ret = AVERROR(ENOMEM); goto free_and_end; } - avctx->internal->last_pkt_props = av_packet_alloc(); - if (!avctx->internal->last_pkt_props) { + avci->last_pkt_props = av_packet_alloc(); + if (!avci->last_pkt_props) { ret = AVERROR(ENOMEM); goto free_and_end; } - avctx->internal->skip_samples_multiplier = 1; + avci->skip_samples_multiplier = 1; if (codec->priv_data_size > 0) { if (!avctx->priv_data) { @@ -654,12 +656,12 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code // only call ff_set_dimensions() for non H.264/VP6F/DXV codecs so as not to overwrite previously setup dimensions if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height && (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F || avctx->codec_id == AV_CODEC_ID_DXV))) { - if (avctx->coded_width && avctx->coded_height) - ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); - else if (avctx->width && avctx->height) - ret = ff_set_dimensions(avctx, avctx->width, avctx->height); - if (ret < 0) - goto free_and_end; + if (avctx->coded_width && avctx->coded_height) + ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); + else if (avctx->width && avctx->height) + ret = ff_set_dimensions(avctx, avctx->width, avctx->height); + if (ret < 0) + goto free_and_end; } if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height) @@ -755,7 +757,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code } if (HAVE_THREADS - && !(avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))) { + && !(avci->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))) { ret = ff_thread_init(avctx); if (ret < 0) { goto free_and_end; @@ -947,7 +949,7 @@ FF_ENABLE_DEPRECATION_WARNINGS "gray decoding requested but not enabled at configuration time\n"); if ( avctx->codec->init && (!(avctx->active_thread_type&FF_THREAD_FRAME) - || avctx->internal->frame_thread_encoder)) { + || avci->frame_thread_encoder)) { ret = avctx->codec->init(avctx); if (ret < 0) { goto free_and_end; @@ -1045,7 +1047,7 @@ free_and_end: (avctx->codec->caps_internal & FF_CODEC_CAP_INIT_CLEANUP))) avctx->codec->close(avctx); - if (HAVE_THREADS && avctx->internal->thread_ctx) + if (HAVE_THREADS && avci->thread_ctx) ff_thread_free(avctx); if (codec->priv_class && codec->priv_data_size) @@ -1061,19 +1063,20 @@ FF_ENABLE_DEPRECATION_WARNINGS av_dict_free(&tmp); av_freep(&avctx->priv_data); av_freep(&avctx->subtitle_header); - if (avctx->internal) { - av_frame_free(&avctx->internal->to_free); - av_frame_free(&avctx->internal->compat_decode_frame); - av_frame_free(&avctx->internal->buffer_frame); - av_packet_free(&avctx->internal->buffer_pkt); - av_packet_free(&avctx->internal->last_pkt_props); + if (avci) { + av_frame_free(&avci->to_free); + av_frame_free(&avci->compat_decode_frame); + av_frame_free(&avci->buffer_frame); + av_packet_free(&avci->buffer_pkt); + av_packet_free(&avci->last_pkt_props); - av_packet_free(&avctx->internal->ds.in_pkt); + av_packet_free(&avci->ds.in_pkt); ff_decode_bsfs_uninit(avctx); - av_freep(&avctx->internal->pool); + av_freep(&avci->pool); } - av_freep(&avctx->internal); + av_freep(&avci); + avctx->internal = NULL; avctx->codec = NULL; goto end; } @@ -1444,7 +1447,7 @@ const char *avcodec_configuration(void) const char *avcodec_license(void) { #define LICENSE_PREFIX "libavcodec license: " - return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; + return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1]; } int av_get_exact_bits_per_sample(enum AVCodecID codec_id) @@ -1510,7 +1513,7 @@ int av_get_exact_bits_per_sample(enum AVCodecID codec_id) enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be) { - static const enum AVCodecID map[AV_SAMPLE_FMT_NB][2] = { + static const enum AVCodecID map[][2] = { [AV_SAMPLE_FMT_U8 ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 }, [AV_SAMPLE_FMT_S16 ] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE }, [AV_SAMPLE_FMT_S32 ] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE }, @@ -1523,7 +1526,7 @@ enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be) [AV_SAMPLE_FMT_FLTP] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE }, [AV_SAMPLE_FMT_DBLP] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE }, }; - if (fmt < 0 || fmt >= AV_SAMPLE_FMT_NB) + if (fmt < 0 || fmt >= FF_ARRAY_ELEMS(map)) return AV_CODEC_ID_NONE; if (be < 0 || be > 1) be = AV_NE(1, 0); diff --git a/libavcodec/v210dec.c b/libavcodec/v210dec.c index 8483023815..044d35338b 100644 --- a/libavcodec/v210dec.c +++ b/libavcodec/v210dec.c @@ -153,7 +153,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, int aligned_width = ((avctx->width + 47) / 48) * 48; stride = aligned_width * 8 / 3; } - td.stride = stride; if (avpkt->size < stride * avctx->height) { if ((((avctx->width + 23) / 24) * 24 * 8) / 3 * avctx->height == avpkt->size) { @@ -166,6 +165,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, return AVERROR_INVALIDDATA; } } + td.stride = stride; if ( avctx->codec_tag == MKTAG('C', '2', '1', '0') && avpkt->size > 64 && AV_RN32(psrc) == AV_RN32("INFO") diff --git a/libavcodec/v4l2_context.c b/libavcodec/v4l2_context.c index bdc2b84153..1bfa8b2cec 100644 --- a/libavcodec/v4l2_context.c +++ b/libavcodec/v4l2_context.c @@ -207,7 +207,7 @@ static int v4l2_handle_event(V4L2Context *ctx) ret = ff_v4l2_m2m_codec_full_reinit(s); if (ret) { av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_full_reinit\n"); - return -EINVAL; + return AVERROR(EINVAL); } goto reinit_run; } @@ -221,7 +221,7 @@ static int v4l2_handle_event(V4L2Context *ctx) ret = ff_v4l2_m2m_codec_reinit(s); if (ret) { av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_reinit\n"); - return -EINVAL; + return AVERROR(EINVAL); } goto reinit_run; } diff --git a/libavcodec/v4l2_m2m_enc.c b/libavcodec/v4l2_m2m_enc.c index 474e6bef89..8059e3bb48 100644 --- a/libavcodec/v4l2_m2m_enc.c +++ b/libavcodec/v4l2_m2m_enc.c @@ -30,6 +30,7 @@ #include "libavutil/opt.h" #include "v4l2_context.h" #include "v4l2_m2m.h" +#include "v4l2_fmt.h" #define MPEG_CID(x) V4L2_CID_MPEG_VIDEO_##x #define MPEG_VIDEO(x) V4L2_MPEG_VIDEO_##x @@ -288,6 +289,8 @@ static av_cold int v4l2_encode_init(AVCodecContext *avctx) V4L2Context *capture, *output; V4L2m2mContext *s; V4L2m2mPriv *priv = avctx->priv_data; + enum AVPixelFormat pix_fmt_output; + uint32_t v4l2_fmt_output; int ret; ret = ff_v4l2_m2m_create_context(priv, &s); @@ -316,6 +319,18 @@ static av_cold int v4l2_encode_init(AVCodecContext *avctx) } s->avctx = avctx; + if (V4L2_TYPE_IS_MULTIPLANAR(output->type)) + v4l2_fmt_output = output->format.fmt.pix_mp.pixelformat; + else + v4l2_fmt_output = output->format.fmt.pix.pixelformat; + + pix_fmt_output = ff_v4l2_format_v4l2_to_avfmt(v4l2_fmt_output, AV_CODEC_ID_RAWVIDEO); + if (pix_fmt_output != avctx->pix_fmt) { + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt_output); + av_log(avctx, AV_LOG_ERROR, "Encoder requires %s pixel format.\n", desc->name); + return AVERROR(EINVAL); + } + return v4l2_prepare_encoder(s); } diff --git a/libavcodec/vaapi_encode.c b/libavcodec/vaapi_encode.c index 3be9159d37..b0235114df 100644 --- a/libavcodec/vaapi_encode.c +++ b/libavcodec/vaapi_encode.c @@ -579,6 +579,8 @@ static int vaapi_encode_output(AVCodecContext *avctx, VAAPIEncodeContext *ctx = avctx->priv_data; VACodedBufferSegment *buf_list, *buf; VAStatus vas; + int total_size = 0; + uint8_t *ptr; int err; err = vaapi_encode_wait(avctx, pic); @@ -595,15 +597,21 @@ static int vaapi_encode_output(AVCodecContext *avctx, goto fail; } + for (buf = buf_list; buf; buf = buf->next) + total_size += buf->size; + + err = av_new_packet(pkt, total_size); + ptr = pkt->data; + + if (err < 0) + goto fail_mapped; + for (buf = buf_list; buf; buf = buf->next) { av_log(avctx, AV_LOG_DEBUG, "Output buffer: %u bytes " "(status %08x).\n", buf->size, buf->status); - err = av_new_packet(pkt, buf->size); - if (err < 0) - goto fail_mapped; - - memcpy(pkt->data, buf->buf, buf->size); + memcpy(ptr, buf->buf, buf->size); + ptr += buf->size; } if (pic->type == PICTURE_TYPE_IDR) diff --git a/libavcodec/vc1.h b/libavcodec/vc1.h index 69f6ca9e4d..4559a06cb6 100644 --- a/libavcodec/vc1.h +++ b/libavcodec/vc1.h @@ -176,8 +176,6 @@ typedef struct VC1Context{ H264ChromaContext h264chroma; VC1DSPContext vc1dsp; - int bits; - /** Simple/Main Profile sequence header */ //@{ int res_sprite; ///< reserved, sprite mode diff --git a/libavcodec/vc1_block.c b/libavcodec/vc1_block.c index 0ccaf6b022..16542dba3a 100644 --- a/libavcodec/vc1_block.c +++ b/libavcodec/vc1_block.c @@ -2632,10 +2632,10 @@ static void vc1_decode_i_blocks(VC1Context *v) if (v->s.loop_filter) ff_vc1_i_loop_filter(v); - if (get_bits_count(&s->gb) > v->bits) { + if (get_bits_left(&s->gb) < 0) { ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR); av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", - get_bits_count(&s->gb), v->bits); + get_bits_count(&s->gb), s->gb.size_in_bits); return; } @@ -2778,11 +2778,11 @@ static int vc1_decode_i_blocks_adv(VC1Context *v) if (v->s.loop_filter) ff_vc1_i_loop_filter(v); - if (get_bits_count(&s->gb) > v->bits) { + if (get_bits_left(&s->gb) < 0) { // TODO: may need modification to handle slice coding ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR); av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", - get_bits_count(&s->gb), v->bits); + get_bits_count(&s->gb), s->gb.size_in_bits); return 0; } inc_blk_idx(v->topleft_blk_idx); @@ -2862,11 +2862,11 @@ static void vc1_decode_p_blocks(VC1Context *v) if (apply_loop_filter) ff_vc1_p_loop_filter(v); } - if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) { + if (get_bits_left(&s->gb) < 0 || get_bits_count(&s->gb) < 0) { // TODO: may need modification to handle slice coding ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR); av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", - get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y); + get_bits_count(&s->gb), s->gb.size_in_bits, s->mb_x, s->mb_y); return; } inc_blk_idx(v->topleft_blk_idx); @@ -2951,11 +2951,11 @@ static void vc1_decode_b_blocks(VC1Context *v) if (v->s.loop_filter) ff_vc1_i_loop_filter(v); } - if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) { + if (get_bits_left(&s->gb) < 0 || get_bits_count(&s->gb) < 0) { // TODO: may need modification to handle slice coding ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR); av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", - get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y); + get_bits_count(&s->gb), s->gb.size_in_bits, s->mb_x, s->mb_y); return; } } diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index ac3198e4fd..c526b200b5 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -431,7 +431,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx) v->output_height = avctx->height; if (!avctx->extradata_size || !avctx->extradata) - return -1; + return AVERROR_INVALIDDATA; v->s.avctx = avctx; if ((ret = ff_vc1_init_common(v)) < 0) @@ -472,7 +472,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx) if (avctx->extradata_size < 16) { av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size); - return -1; + return AVERROR_INVALIDDATA; } buf2 = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); @@ -508,7 +508,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx) av_free(buf2); if (!seq_initialized || !ep_initialized) { av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n"); - return -1; + return AVERROR_INVALIDDATA; } v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE); } @@ -576,14 +576,21 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx) if (v->sprite_width > 1 << 14 || v->sprite_height > 1 << 14 || v->output_width > 1 << 14 || - v->output_height > 1 << 14) return -1; + v->output_height > 1 << 14) { + ret = AVERROR_INVALIDDATA; + goto error; + } if ((v->sprite_width&1) || (v->sprite_height&1)) { avpriv_request_sample(avctx, "odd sprites support"); - return AVERROR_PATCHWELCOME; + ret = AVERROR_PATCHWELCOME; + goto error; } } return 0; +error: + av_frame_free(&v->sprite_output_frame); + return ret; } /** Close a VC1/WMV3 decoder @@ -688,7 +695,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, int buf_size3; if (avctx->hwaccel) buf_start_second_field = start; - tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1)); + tmp = av_realloc_array(slices, sizeof(*slices), n_slices+1); if (!tmp) { ret = AVERROR(ENOMEM); goto err; @@ -717,7 +724,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, break; case VC1_CODE_SLICE: { int buf_size3; - tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1)); + tmp = av_realloc_array(slices, sizeof(*slices), n_slices+1); if (!tmp) { ret = AVERROR(ENOMEM); goto err; @@ -752,7 +759,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, } else { // found field marker, unescape second field if (avctx->hwaccel) buf_start_second_field = divider; - tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1)); + tmp = av_realloc_array(slices, sizeof(*slices), n_slices+1); if (!tmp) { ret = AVERROR(ENOMEM); goto err; @@ -847,7 +854,12 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, ret = AVERROR_INVALIDDATA; goto err; } - + if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) + && v->field_mode) { + av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected Frames not Fields\n"); + ret = AVERROR_INVALIDDATA; + goto err; + } if ((s->mb_height >> v->field_mode) == 0) { av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n"); ret = AVERROR_INVALIDDATA; @@ -1026,7 +1038,6 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, ff_mpeg_er_frame_start(s); - v->bits = buf_size * 8; v->end_mb_x = s->mb_width; if (v->field_mode) { s->current_picture.f->linesize[0] <<= 1; @@ -1100,8 +1111,9 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, continue; } ff_vc1_decode_blocks(v); - if (i != n_slices) + if (i != n_slices) { s->gb = slices[i].gb; + } } if (v->field_mode) { v->second_field = 0; diff --git a/libavcodec/vc2enc.c b/libavcodec/vc2enc.c index d0101e01e4..ba5a03e4ec 100644 --- a/libavcodec/vc2enc.c +++ b/libavcodec/vc2enc.c @@ -867,6 +867,7 @@ static int dwt_plane(AVCodecContext *avctx, void *arg) for (x = 0; x < p->width; x++) { buf[x] = pix[x] - s->diff_offset; } + memset(&buf[x], 0, (p->coef_stride - p->width)*sizeof(dwtcoef)); buf += p->coef_stride; pix += pix_stride; } @@ -876,6 +877,7 @@ static int dwt_plane(AVCodecContext *avctx, void *arg) for (x = 0; x < p->width; x++) { buf[x] = pix[x] - s->diff_offset; } + memset(&buf[x], 0, (p->coef_stride - p->width)*sizeof(dwtcoef)); buf += p->coef_stride; pix += pix_stride; } diff --git a/libavcodec/vdpau_vp9.c b/libavcodec/vdpau_vp9.c index f1ee4ac5e0..54e060fad5 100644 --- a/libavcodec/vdpau_vp9.c +++ b/libavcodec/vdpau_vp9.c @@ -35,16 +35,15 @@ static int vdpau_vp9_start_frame(AVCodecContext *avctx, { VP9Context *s = avctx->priv_data; VP9SharedContext *h = &(s->s); - const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); - if (!pixdesc) { - return AV_PIX_FMT_NONE; - } - VP9Frame pic = h->frames[CUR_FRAME]; struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private; int i; VdpPictureInfoVP9 *info = &pic_ctx->info.vp9; + const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); + if (!pixdesc) { + return AV_PIX_FMT_NONE; + } info->width = avctx->width; info->height = avctx->height; diff --git a/libavcodec/version.h b/libavcodec/version.h index 8b9c27378c..6cf333eeb6 100644 --- a/libavcodec/version.h +++ b/libavcodec/version.h @@ -28,8 +28,8 @@ #include "libavutil/version.h" #define LIBAVCODEC_VERSION_MAJOR 58 -#define LIBAVCODEC_VERSION_MINOR 64 -#define LIBAVCODEC_VERSION_MICRO 101 +#define LIBAVCODEC_VERSION_MINOR 66 +#define LIBAVCODEC_VERSION_MICRO 100 #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ LIBAVCODEC_VERSION_MINOR, \ diff --git a/libavcodec/vmdaudio.c b/libavcodec/vmdaudio.c index c7826fa3ce..dfbd49fd84 100644 --- a/libavcodec/vmdaudio.c +++ b/libavcodec/vmdaudio.c @@ -76,7 +76,9 @@ static av_cold int vmdaudio_decode_init(AVCodecContext *avctx) av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n"); return AVERROR(EINVAL); } - if (avctx->block_align < 1 || avctx->block_align % avctx->channels) { + if (avctx->block_align < 1 || avctx->block_align % avctx->channels || + avctx->block_align > INT_MAX - avctx->channels + ) { av_log(avctx, AV_LOG_ERROR, "invalid block align\n"); return AVERROR(EINVAL); } diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c index 9a3821a8b9..903871e93d 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c @@ -2967,7 +2967,7 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) AVRational fps, aspect; s->theora_header = 0; - s->theora = get_bits_long(gb, 24); + s->theora = get_bits(gb, 24); av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora); if (!s->theora) { s->theora = 1; @@ -2988,8 +2988,8 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) s->height = get_bits(gb, 16) << 4; if (s->theora >= 0x030200) { - visible_width = get_bits_long(gb, 24); - visible_height = get_bits_long(gb, 24); + visible_width = get_bits(gb, 24); + visible_height = get_bits(gb, 24); offset_x = get_bits(gb, 8); /* offset x */ offset_y = get_bits(gb, 8); /* offset y, from bottom */ @@ -3017,8 +3017,8 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) fps.den, fps.num, 1 << 30); } - aspect.num = get_bits_long(gb, 24); - aspect.den = get_bits_long(gb, 24); + aspect.num = get_bits(gb, 24); + aspect.den = get_bits(gb, 24); if (aspect.num && aspect.den) { av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den, diff --git a/libavcodec/vp9.c b/libavcodec/vp9.c index 0fd15efed3..7aaae9b792 100644 --- a/libavcodec/vp9.c +++ b/libavcodec/vp9.c @@ -514,7 +514,7 @@ static int decode_frame_header(AVCodecContext *avctx, s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible; if (s->s.h.keyframe) { - if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode + if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n"); return AVERROR_INVALIDDATA; } @@ -530,7 +530,7 @@ static int decode_frame_header(AVCodecContext *avctx, s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0; s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2); if (s->s.h.intraonly) { - if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode + if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n"); return AVERROR_INVALIDDATA; } diff --git a/libavcodec/wavpack.c b/libavcodec/wavpack.c index d0242809fe..edc0f7911d 100644 --- a/libavcodec/wavpack.c +++ b/libavcodec/wavpack.c @@ -1114,9 +1114,7 @@ static int wavpack_decode_frame(AVCodecContext *avctx, void *data, avctx->bits_per_raw_sample = ((frame_flags & 0x03) + 1) << 3; } - while (buf_size > 0) { - if (buf_size <= WV_HEADER_SIZE) - break; + while (buf_size > WV_HEADER_SIZE) { frame_size = AV_RL32(buf + 4) - 12; buf += 20; buf_size -= 20; diff --git a/libavcodec/wma.h b/libavcodec/wma.h index 8344cb5b93..c7fcf5047c 100644 --- a/libavcodec/wma.h +++ b/libavcodec/wma.h @@ -123,7 +123,7 @@ typedef struct WMACodecContext { uint8_t last_superframe[MAX_CODED_SUPERFRAME_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; /* padding added */ int last_bitoffset; int last_superframe_len; - int exponents_initialized; + int exponents_initialized[MAX_CHANNELS]; float noise_table[NOISE_TAB_SIZE]; int noise_index; float noise_mult; /* XXX: suppress that and integrate it in the noise array */ diff --git a/libavcodec/wmadec.c b/libavcodec/wmadec.c index bb9bc8d236..6365fe7f47 100644 --- a/libavcodec/wmadec.c +++ b/libavcodec/wmadec.c @@ -585,11 +585,14 @@ static int wma_decode_block(WMACodecContext *s) decode_exp_lsp(s, ch); } s->exponents_bsize[ch] = bsize; + s->exponents_initialized[ch] = 1; } } - s->exponents_initialized = 1; - }else if (!s->exponents_initialized) { - return AVERROR_INVALIDDATA; + } + + for (ch = 0; ch < s->avctx->channels; ch++) { + if (s->channel_coded[ch] && !s->exponents_initialized[ch]) + return AVERROR_INVALIDDATA; } /* parse spectral coefficients : just RLE encoding */ diff --git a/libavcodec/wmalosslessdec.c b/libavcodec/wmalosslessdec.c index 7ed6ce3135..b15f812d81 100644 --- a/libavcodec/wmalosslessdec.c +++ b/libavcodec/wmalosslessdec.c @@ -189,6 +189,13 @@ static av_cold int decode_init(AVCodecContext *avctx) return AVERROR(EINVAL); } + av_assert0(avctx->channels >= 0); + if (avctx->channels > WMALL_MAX_CHANNELS) { + avpriv_request_sample(avctx, + "More than " AV_STRINGIFY(WMALL_MAX_CHANNELS) " channels"); + return AVERROR_PATCHWELCOME; + } + s->max_frame_size = MAX_FRAMESIZE * avctx->channels; s->frame_data = av_mallocz(s->max_frame_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!s->frame_data) @@ -267,16 +274,6 @@ static av_cold int decode_init(AVCodecContext *avctx) ++s->lfe_channel; } - if (s->num_channels < 0) { - av_log(avctx, AV_LOG_ERROR, "invalid number of channels %"PRId8"\n", - s->num_channels); - return AVERROR_INVALIDDATA; - } else if (s->num_channels > WMALL_MAX_CHANNELS) { - avpriv_request_sample(avctx, - "More than %d channels", WMALL_MAX_CHANNELS); - return AVERROR_PATCHWELCOME; - } - s->frame = av_frame_alloc(); if (!s->frame) return AVERROR(ENOMEM); @@ -628,7 +625,7 @@ static void mclms_update(WmallDecodeCtx *s, int icoef, int *pred) int range = 1 << (s->bits_per_sample - 1); for (ich = 0; ich < num_channels; ich++) { - pred_error = s->channel_residues[ich][icoef] - pred[ich]; + pred_error = s->channel_residues[ich][icoef] - (unsigned)pred[ich]; if (pred_error > 0) { for (i = 0; i < order * num_channels; i++) s->mclms_coeffs[i + ich * order * num_channels] += @@ -678,9 +675,9 @@ static void mclms_predict(WmallDecodeCtx *s, int icoef, int *pred) for (i = 0; i < ich; i++) pred[ich] += (uint32_t)s->channel_residues[i][icoef] * s->mclms_coeffs_cur[i + num_channels * ich]; - pred[ich] += 1 << s->mclms_scaling - 1; + pred[ich] += (1 << s->mclms_scaling) >> 1; pred[ich] >>= s->mclms_scaling; - s->channel_residues[ich][icoef] += pred[ich]; + s->channel_residues[ich][icoef] += (unsigned)pred[ich]; } } @@ -811,19 +808,19 @@ static void revert_acfilter(WmallDecodeCtx *s, int tile_size) pred = 0; for (j = 0; j < order; j++) { if (i <= j) - pred += filter_coeffs[j] * prevvalues[j - i]; + pred += (uint32_t)filter_coeffs[j] * prevvalues[j - i]; else - pred += s->channel_residues[ich][i - j - 1] * filter_coeffs[j]; + pred += (uint32_t)s->channel_residues[ich][i - j - 1] * filter_coeffs[j]; } pred >>= scaling; - s->channel_residues[ich][i] += pred; + s->channel_residues[ich][i] += (unsigned)pred; } for (i = order; i < tile_size; i++) { pred = 0; for (j = 0; j < order; j++) pred += (uint32_t)s->channel_residues[ich][i - j - 1] * filter_coeffs[j]; pred >>= scaling; - s->channel_residues[ich][i] += pred; + s->channel_residues[ich][i] += (unsigned)pred; } for (j = 0; j < order; j++) prevvalues[j] = s->channel_residues[ich][tile_size - j - 1]; @@ -950,6 +947,8 @@ static int decode_subframe(WmallDecodeCtx *s) for (j = 0; j < subframe_len; j++) s->channel_residues[i][j] = get_sbits_long(&s->gb, bits); } else { + if (s->bits_per_sample < padding_zeroes) + return AVERROR_INVALIDDATA; for (i = 0; i < s->num_channels; i++) { if (s->is_channel_coded[i]) { decode_channel_residues(s, i, subframe_len); @@ -977,7 +976,7 @@ static int decode_subframe(WmallDecodeCtx *s) if (s->quant_stepsize != 1) for (i = 0; i < s->num_channels; i++) for (j = 0; j < subframe_len; j++) - s->channel_residues[i][j] *= s->quant_stepsize; + s->channel_residues[i][j] *= (unsigned)s->quant_stepsize; } /* Write to proper output buffer depending on bit-depth */ diff --git a/libavcodec/wmaprodec.c b/libavcodec/wmaprodec.c index 91a47aeb41..cbf5fa7fd5 100644 --- a/libavcodec/wmaprodec.c +++ b/libavcodec/wmaprodec.c @@ -544,7 +544,7 @@ static av_cold int decode_init(WMAProDecodeCtx *s, AVCodecContext *avctx, int nu for (i = 0; i < WMAPRO_BLOCK_SIZES; i++) ff_mdct_init(&s->mdct_ctx[i], WMAPRO_BLOCK_MIN_BITS+1+i, 1, 1.0 / (1 << (WMAPRO_BLOCK_MIN_BITS + i - 1)) - / (1 << (s->bits_per_sample - 1))); + / (1ll << (s->bits_per_sample - 1))); /** init MDCT windows: simple sine window */ for (i = 0; i < WMAPRO_BLOCK_SIZES; i++) { diff --git a/libavcodec/wmavoice.c b/libavcodec/wmavoice.c index 5dd9b3dbb7..85c80ecca8 100644 --- a/libavcodec/wmavoice.c +++ b/libavcodec/wmavoice.c @@ -1843,6 +1843,9 @@ static int parse_packet_header(WMAVoiceContext *s) skip_bits(gb, 4); // packet sequence number s->has_residual_lsps = get_bits1(gb); do { + if (get_bits_left(gb) < 6 + s->spillover_bitsize) + return AVERROR_INVALIDDATA; + res = get_bits(gb, 6); // number of superframes per packet // (minus first one if there is spillover) n_superframes += res; @@ -2001,5 +2004,6 @@ AVCodec ff_wmavoice_decoder = { .close = wmavoice_decode_end, .decode = wmavoice_decode_packet, .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY, + .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .flush = wmavoice_flush, }; diff --git a/libavdevice/avdevice.c b/libavdevice/avdevice.c index 72e1b67887..3d03d89f04 100644 --- a/libavdevice/avdevice.c +++ b/libavdevice/avdevice.c @@ -75,7 +75,7 @@ const char * avdevice_configuration(void) const char * avdevice_license(void) { #define LICENSE_PREFIX "libavdevice license: " - return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; + return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1]; } static void *device_next(void *prev, int output, diff --git a/libavdevice/decklink_common.cpp b/libavdevice/decklink_common.cpp index 659aa9be3f..04c0f99edc 100644 --- a/libavdevice/decklink_common.cpp +++ b/libavdevice/decklink_common.cpp @@ -200,7 +200,7 @@ int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, enum AVFieldOrder field_order, - decklink_direction_t direction, int num) + decklink_direction_t direction) { struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data; struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx; @@ -214,8 +214,8 @@ int ff_decklink_set_format(AVFormatContext *avctx, int i = 1; HRESULT res; - av_log(avctx, AV_LOG_DEBUG, "Trying to find mode for frame size %dx%d, frame timing %d/%d, field order %d, direction %d, mode number %d, format code %s\n", - width, height, tb_num, tb_den, field_order, direction, num, (cctx->format_code) ? cctx->format_code : "(unset)"); + av_log(avctx, AV_LOG_DEBUG, "Trying to find mode for frame size %dx%d, frame timing %d/%d, field order %d, direction %d, format code %s\n", + width, height, tb_num, tb_den, field_order, direction, cctx->format_code ? cctx->format_code : "(unset)"); if (direction == DIRECTION_IN) { res = ctx->dli->GetDisplayModeIterator (&itermode); @@ -248,7 +248,6 @@ int ff_decklink_set_format(AVFormatContext *avctx, bmd_height == height && !av_cmp_q(mode_tb, target_tb) && field_order_eq(field_order, bmd_field_dominance)) - || i == num || target_mode == bmd_mode) { ctx->bmd_mode = bmd_mode; ctx->bmd_width = bmd_width; @@ -314,8 +313,8 @@ int ff_decklink_set_format(AVFormatContext *avctx, return -1; } -int ff_decklink_set_format(AVFormatContext *avctx, decklink_direction_t direction, int num) { - return ff_decklink_set_format(avctx, 0, 0, 0, 0, AV_FIELD_UNKNOWN, direction, num); +int ff_decklink_set_format(AVFormatContext *avctx, decklink_direction_t direction) { + return ff_decklink_set_format(avctx, 0, 0, 0, 0, AV_FIELD_UNKNOWN, direction); } int ff_decklink_list_devices(AVFormatContext *avctx, diff --git a/libavdevice/decklink_common.h b/libavdevice/decklink_common.h index 35422a300b..8b3dbce2fb 100644 --- a/libavdevice/decklink_common.h +++ b/libavdevice/decklink_common.h @@ -197,8 +197,8 @@ static const BMDTimecodeFormat decklink_timecode_format_map[] = { }; int ff_decklink_set_configs(AVFormatContext *avctx, decklink_direction_t direction); -int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, enum AVFieldOrder field_order, decklink_direction_t direction = DIRECTION_OUT, int num = 0); -int ff_decklink_set_format(AVFormatContext *avctx, decklink_direction_t direction, int num); +int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, enum AVFieldOrder field_order, decklink_direction_t direction = DIRECTION_OUT); +int ff_decklink_set_format(AVFormatContext *avctx, decklink_direction_t direction); int ff_decklink_list_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list, int show_inputs, int show_outputs); void ff_decklink_list_devices_legacy(AVFormatContext *avctx, int show_inputs, int show_outputs); int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction = DIRECTION_OUT); diff --git a/libavdevice/decklink_common_c.h b/libavdevice/decklink_common_c.h index b78630b5fc..88b1eae18d 100644 --- a/libavdevice/decklink_common_c.h +++ b/libavdevice/decklink_common_c.h @@ -42,7 +42,6 @@ struct decklink_cctx { int list_formats; int64_t teletext_lines; double preroll; - int v210; int audio_channels; int audio_depth; int duplex_mode; diff --git a/libavdevice/decklink_dec.cpp b/libavdevice/decklink_dec.cpp index ab7f28112e..1fd5adf515 100644 --- a/libavdevice/decklink_dec.cpp +++ b/libavdevice/decklink_dec.cpp @@ -1005,9 +1005,6 @@ av_cold int ff_decklink_read_header(AVFormatContext *avctx) class decklink_input_callback *input_callback; AVStream *st; HRESULT result; - char fname[1024]; - char *tmp; - int mode_num = 0; int ret; ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx)); @@ -1053,24 +1050,12 @@ av_cold int ff_decklink_read_header(AVFormatContext *avctx) /* List available devices. */ if (ctx->list_devices) { + av_log(avctx, AV_LOG_WARNING, "The -list_devices option is deprecated and will be removed. Please use ffmpeg -sources decklink instead.\n"); ff_decklink_list_devices_legacy(avctx, 1, 0); return AVERROR_EXIT; } - if (cctx->v210) { - av_log(avctx, AV_LOG_WARNING, "The bm_v210 option is deprecated and will be removed. Please use the -raw_format yuv422p10.\n"); - cctx->raw_format = MKBETAG('v','2','1','0'); - } - - av_strlcpy(fname, avctx->url, sizeof(fname)); - tmp=strchr (fname, '@'); - if (tmp != NULL) { - av_log(avctx, AV_LOG_WARNING, "The @mode syntax is deprecated and will be removed. Please use the -format_code option.\n"); - mode_num = atoi (tmp+1); - *tmp = 0; - } - - ret = ff_decklink_init_device(avctx, fname); + ret = ff_decklink_init_device(avctx, avctx->url); if (ret < 0) return ret; @@ -1111,7 +1096,7 @@ av_cold int ff_decklink_read_header(AVFormatContext *avctx) goto error; } - if (mode_num == 0 && !cctx->format_code) { + if (!cctx->format_code) { if (decklink_autodetect(cctx) < 0) { av_log(avctx, AV_LOG_ERROR, "Cannot Autodetect input stream or No signal\n"); ret = AVERROR(EIO); @@ -1119,9 +1104,9 @@ av_cold int ff_decklink_read_header(AVFormatContext *avctx) } av_log(avctx, AV_LOG_INFO, "Autodetected the input mode\n"); } - if (ff_decklink_set_format(avctx, DIRECTION_IN, mode_num) < 0) { - av_log(avctx, AV_LOG_ERROR, "Could not set mode number %d or format code %s for %s\n", - mode_num, (cctx->format_code) ? cctx->format_code : "(unset)", fname); + if (ff_decklink_set_format(avctx, DIRECTION_IN) < 0) { + av_log(avctx, AV_LOG_ERROR, "Could not set format code %s for %s\n", + cctx->format_code ? cctx->format_code : "(unset)", avctx->url); ret = AVERROR(EIO); goto error; } diff --git a/libavdevice/decklink_dec_c.c b/libavdevice/decklink_dec_c.c index 99439f91ae..b59876994a 100644 --- a/libavdevice/decklink_dec_c.c +++ b/libavdevice/decklink_dec_c.c @@ -33,7 +33,6 @@ static const AVOption options[] = { { "list_devices", "list available devices" , OFFSET(list_devices), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, DEC }, { "list_formats", "list supported formats" , OFFSET(list_formats), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, DEC }, { "format_code", "set format by fourcc" , OFFSET(format_code), AV_OPT_TYPE_STRING, { .str = NULL}, 0, 0, DEC }, - { "bm_v210", "v210 10 bit per channel" , OFFSET(v210), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, DEC }, { "raw_format", "pixel format to be returned by the card when capturing" , OFFSET(raw_format), AV_OPT_TYPE_INT, { .i64 = MKBETAG('2','v','u','y')}, 0, UINT_MAX, DEC, "raw_format" }, { "uyvy422", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MKBETAG('2','v','u','y') }, 0, 0, DEC, "raw_format"}, { "yuv422p10", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MKBETAG('v','2','1','0') }, 0, 0, DEC, "raw_format"}, diff --git a/libavdevice/decklink_enc.cpp b/libavdevice/decklink_enc.cpp index 04b06aee3a..883fdeadfb 100644 --- a/libavdevice/decklink_enc.cpp +++ b/libavdevice/decklink_enc.cpp @@ -568,6 +568,7 @@ av_cold int ff_decklink_write_header(AVFormatContext *avctx) /* List available devices and exit. */ if (ctx->list_devices) { + av_log(avctx, AV_LOG_WARNING, "The -list_devices option is deprecated and will be removed. Please use ffmpeg -sinks decklink instead.\n"); ff_decklink_list_devices_legacy(avctx, 0, 1); return AVERROR_EXIT; } diff --git a/libavdevice/version.h b/libavdevice/version.h index 68302908cf..10717564e9 100644 --- a/libavdevice/version.h +++ b/libavdevice/version.h @@ -29,7 +29,7 @@ #define LIBAVDEVICE_VERSION_MAJOR 58 #define LIBAVDEVICE_VERSION_MINOR 9 -#define LIBAVDEVICE_VERSION_MICRO 101 +#define LIBAVDEVICE_VERSION_MICRO 103 #define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \ LIBAVDEVICE_VERSION_MINOR, \ diff --git a/libavdevice/xcbgrab.c b/libavdevice/xcbgrab.c index 158f381c04..113cce71a5 100644 --- a/libavdevice/xcbgrab.c +++ b/libavdevice/xcbgrab.c @@ -49,16 +49,15 @@ typedef struct XCBGrabContext { const AVClass *class; - uint8_t *buffer; - xcb_connection_t *conn; xcb_screen_t *screen; xcb_window_t window; #if CONFIG_LIBXCB_SHM - xcb_shm_seg_t segment; + AVBufferPool *shm_pool; #endif int64_t time_frame; AVRational time_base; + int64_t frame_duration; int x, y; int width, height; @@ -71,7 +70,6 @@ typedef struct XCBGrabContext { int region_border; int centered; - const char *video_size; const char *framerate; int has_shm; @@ -86,7 +84,7 @@ static const AVOption options[] = { { "y", "Initial y coordinate.", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, D }, { "grab_x", "Initial x coordinate.", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, D }, { "grab_y", "Initial y coordinate.", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, D }, - { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = "vga" }, 0, 0, D }, + { "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL }, 0, 0, D }, { "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc" }, 0, 0, D }, { "draw_mouse", "Draw the mouse pointer.", OFFSET(draw_mouse), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, D }, { "follow_mouse", "Move the grabbing region when the mouse pointer reaches within specified amount of pixels to the edge of region.", @@ -197,13 +195,12 @@ static int xcbgrab_frame(AVFormatContext *s, AVPacket *pkt) return 0; } -static void wait_frame(AVFormatContext *s, AVPacket *pkt) +static int64_t wait_frame(AVFormatContext *s, AVPacket *pkt) { XCBGrabContext *c = s->priv_data; int64_t curtime, delay; - int64_t frame_time = av_rescale_q(1, c->time_base, AV_TIME_BASE_Q); - c->time_frame += frame_time; + c->time_frame += c->frame_duration; for (;;) { curtime = av_gettime(); @@ -213,7 +210,7 @@ static void wait_frame(AVFormatContext *s, AVPacket *pkt) av_usleep(delay); } - pkt->pts = curtime; + return curtime; } #if CONFIG_LIBXCB_SHM @@ -231,31 +228,35 @@ static int check_shm(xcb_connection_t *conn) return 0; } -static int allocate_shm(AVFormatContext *s) +static void free_shm_buffer(void *opaque, uint8_t *data) { - XCBGrabContext *c = s->priv_data; - int size = c->frame_size + AV_INPUT_BUFFER_PADDING_SIZE; + shmdt(data); +} + +static AVBufferRef *allocate_shm_buffer(void *opaque, int size) +{ + xcb_connection_t *conn = opaque; + xcb_shm_seg_t segment; + AVBufferRef *ref; uint8_t *data; int id; - if (c->buffer) - return 0; id = shmget(IPC_PRIVATE, size, IPC_CREAT | 0777); - if (id == -1) { - char errbuf[1024]; - int err = AVERROR(errno); - av_strerror(err, errbuf, sizeof(errbuf)); - av_log(s, AV_LOG_ERROR, "Cannot get %d bytes of shared memory: %s.\n", - size, errbuf); - return err; - } - xcb_shm_attach(c->conn, c->segment, id, 0); + if (id == -1) + return NULL; + + segment = xcb_generate_id(conn); + xcb_shm_attach(conn, segment, id, 0); data = shmat(id, NULL, 0); shmctl(id, IPC_RMID, 0); if ((intptr_t)data == -1 || !data) - return AVERROR(errno); - c->buffer = data; - return 0; + return NULL; + + ref = av_buffer_create(data, size, free_shm_buffer, (void *)(ptrdiff_t)segment, 0); + if (!ref) + shmdt(data); + + return ref; } static int xcbgrab_frame_shm(AVFormatContext *s, AVPacket *pkt) @@ -265,15 +266,19 @@ static int xcbgrab_frame_shm(AVFormatContext *s, AVPacket *pkt) xcb_shm_get_image_reply_t *img; xcb_drawable_t drawable = c->screen->root; xcb_generic_error_t *e = NULL; - int ret; + AVBufferRef *buf; + xcb_shm_seg_t segment; - ret = allocate_shm(s); - if (ret < 0) - return ret; + buf = av_buffer_pool_get(c->shm_pool); + if (!buf) { + av_log(s, AV_LOG_ERROR, "Could not get shared memory buffer.\n"); + return AVERROR(ENOMEM); + } + segment = (xcb_shm_seg_t)av_buffer_pool_buffer_get_opaque(buf); iq = xcb_shm_get_image(c->conn, drawable, c->x, c->y, c->width, c->height, ~0, - XCB_IMAGE_FORMAT_Z_PIXMAP, c->segment, 0); + XCB_IMAGE_FORMAT_Z_PIXMAP, segment, 0); img = xcb_shm_get_image_reply(c->conn, iq, &e); xcb_flush(c->conn); @@ -287,12 +292,16 @@ static int xcbgrab_frame_shm(AVFormatContext *s, AVPacket *pkt) e->sequence, e->resource_id, e->minor_code, e->major_code); free(e); + av_buffer_unref(&buf); return AVERROR(EACCES); } free(img); - pkt->data = c->buffer; + av_init_packet(pkt); + + pkt->buf = buf; + pkt->data = buf->data; pkt->size = c->frame_size; return 0; @@ -408,8 +417,9 @@ static int xcbgrab_read_packet(AVFormatContext *s, AVPacket *pkt) xcb_query_pointer_reply_t *p = NULL; xcb_get_geometry_reply_t *geo = NULL; int ret = 0; + int64_t pts; - wait_frame(s, pkt); + pts = wait_frame(s, pkt); if (c->follow_mouse || c->draw_mouse) { pc = xcb_query_pointer(c->conn, c->screen->root); @@ -425,11 +435,15 @@ static int xcbgrab_read_packet(AVFormatContext *s, AVPacket *pkt) xcbgrab_update_region(s); #if CONFIG_LIBXCB_SHM - if (c->has_shm && xcbgrab_frame_shm(s, pkt) < 0) + if (c->has_shm && xcbgrab_frame_shm(s, pkt) < 0) { + av_log(s, AV_LOG_WARNING, "Continuing without shared memory.\n"); c->has_shm = 0; + } #endif if (!c->has_shm) ret = xcbgrab_frame(s, pkt); + pkt->dts = pkt->pts = pts; + pkt->duration = c->frame_duration; #if CONFIG_LIBXCB_XFIXES if (ret >= 0 && c->draw_mouse && p->same_screen) @@ -447,9 +461,7 @@ static av_cold int xcbgrab_read_close(AVFormatContext *s) XCBGrabContext *ctx = s->priv_data; #if CONFIG_LIBXCB_SHM - if (ctx->buffer) { - shmdt(ctx->buffer); - } + av_buffer_pool_uninit(&ctx->shm_pool); #endif xcb_disconnect(ctx->conn); @@ -515,6 +527,12 @@ static int pixfmt_from_pixmap_format(AVFormatContext *s, int depth, if (*pix_fmt) { c->bpp = fmt->bits_per_pixel; c->frame_size = c->width * c->height * fmt->bits_per_pixel / 8; +#if CONFIG_LIBXCB_SHM + c->shm_pool = av_buffer_pool_init2(c->frame_size + AV_INPUT_BUFFER_PADDING_SIZE, + c->conn, allocate_shm_buffer, NULL); + if (!c->shm_pool) + return AVERROR(ENOMEM); +#endif return 0; } @@ -536,10 +554,6 @@ static int create_stream(AVFormatContext *s) if (!st) return AVERROR(ENOMEM); - ret = av_parse_video_size(&c->width, &c->height, c->video_size); - if (ret < 0) - return ret; - ret = av_parse_video_rate(&st->avg_frame_rate, c->framerate); if (ret < 0) return ret; @@ -551,6 +565,11 @@ static int create_stream(AVFormatContext *s) if (!geo) return AVERROR_EXTERNAL; + if (!c->width || !c->height) { + c->width = geo->width; + c->height = geo->height; + } + if (c->x + c->width > geo->width || c->y + c->height > geo->height) { av_log(s, AV_LOG_ERROR, @@ -565,6 +584,7 @@ static int create_stream(AVFormatContext *s) c->time_base = (AVRational){ st->avg_frame_rate.den, st->avg_frame_rate.num }; + c->frame_duration = av_rescale_q(1, c->time_base, AV_TIME_BASE_Q); c->time_frame = av_gettime(); st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; @@ -680,8 +700,7 @@ static av_cold int xcbgrab_read_header(AVFormatContext *s) } #if CONFIG_LIBXCB_SHM - if ((c->has_shm = check_shm(c->conn))) - c->segment = xcb_generate_id(c->conn); + c->has_shm = check_shm(c->conn); #endif #if CONFIG_LIBXCB_XFIXES diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 446c802b98..58b3077dec 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -254,6 +254,7 @@ OBJS-$(CONFIG_FRAMEPACK_FILTER) += vf_framepack.o OBJS-$(CONFIG_FRAMERATE_FILTER) += vf_framerate.o OBJS-$(CONFIG_FRAMESTEP_FILTER) += vf_framestep.o OBJS-$(CONFIG_FREEZEDETECT_FILTER) += vf_freezedetect.o +OBJS-$(CONFIG_FREEZEFRAMES_FILTER) += vf_freezeframes.o OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o OBJS-$(CONFIG_FSPP_FILTER) += vf_fspp.o OBJS-$(CONFIG_GBLUR_FILTER) += vf_gblur.o @@ -401,6 +402,7 @@ OBJS-$(CONFIG_SWAPRECT_FILTER) += vf_swaprect.o OBJS-$(CONFIG_SWAPUV_FILTER) += vf_swapuv.o OBJS-$(CONFIG_TBLEND_FILTER) += vf_blend.o framesync.o OBJS-$(CONFIG_TELECINE_FILTER) += vf_telecine.o +OBJS-$(CONFIG_THISTOGRAM_FILTER) += vf_histogram.o OBJS-$(CONFIG_THRESHOLD_FILTER) += vf_threshold.o framesync.o OBJS-$(CONFIG_THUMBNAIL_FILTER) += vf_thumbnail.o OBJS-$(CONFIG_THUMBNAIL_CUDA_FILTER) += vf_thumbnail_cuda.o vf_thumbnail_cuda.ptx.o @@ -411,6 +413,7 @@ OBJS-$(CONFIG_TMIX_FILTER) += vf_mix.o framesync.o OBJS-$(CONFIG_TONEMAP_FILTER) += vf_tonemap.o colorspace.o OBJS-$(CONFIG_TONEMAP_OPENCL_FILTER) += vf_tonemap_opencl.o colorspace.o opencl.o \ opencl/tonemap.o opencl/colorspace_common.o +OBJS-$(CONFIG_TONEMAP_VAAPI_FILTER) += vf_tonemap_vaapi.o vaapi_vpp.o OBJS-$(CONFIG_TPAD_FILTER) += vf_tpad.o OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o OBJS-$(CONFIG_TRANSPOSE_NPP_FILTER) += vf_transpose_npp.o diff --git a/libavfilter/af_adeclick.c b/libavfilter/af_adeclick.c index 092f290888..e86a1f7bef 100644 --- a/libavfilter/af_adeclick.c +++ b/libavfilter/af_adeclick.c @@ -63,6 +63,7 @@ typedef struct AudioDeclickContext { int hop_size; int overlap_skip; + AVFrame *enabled; AVFrame *in; AVFrame *out; AVFrame *buffer; @@ -77,6 +78,7 @@ typedef struct AudioDeclickContext { int samples_left; int eof; + AVAudioFifo *efifo; AVAudioFifo *fifo; double *window_func_lut; @@ -159,13 +161,17 @@ static int config_input(AVFilterLink *inlink) av_frame_free(&s->out); av_frame_free(&s->buffer); av_frame_free(&s->is); + s->enabled = ff_get_audio_buffer(inlink, s->window_size); s->in = ff_get_audio_buffer(inlink, s->window_size); s->out = ff_get_audio_buffer(inlink, s->window_size); s->buffer = ff_get_audio_buffer(inlink, s->window_size * 2); s->is = ff_get_audio_buffer(inlink, s->window_size); - if (!s->in || !s->out || !s->buffer || !s->is) + if (!s->in || !s->out || !s->buffer || !s->is || !s->enabled) return AVERROR(ENOMEM); + s->efifo = av_audio_fifo_alloc(inlink->format, 1, s->window_size); + if (!s->efifo) + return AVERROR(ENOMEM); s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->window_size); if (!s->fifo) return AVERROR(ENOMEM); @@ -513,14 +519,20 @@ static int filter_channel(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) nb_errors = s->detector(s, c, sigmae, c->detection, c->acoefficients, c->click, index, src, dst); if (nb_errors > 0) { + double *enabled = (double *)s->enabled->extended_data[0]; + ret = interpolation(c, src, s->ar_order, c->acoefficients, index, nb_errors, c->auxiliary, interpolated); if (ret < 0) return ret; + av_audio_fifo_peek(s->efifo, (void**)s->enabled->extended_data, s->window_size); + for (j = 0; j < nb_errors; j++) { - dst[index[j]] = interpolated[j]; - is[index[j]] = 1; + if (enabled[index[j]]) { + dst[index[j]] = interpolated[j]; + is[index[j]] = 1; + } } } } else { @@ -580,6 +592,7 @@ static int filter_frame(AVFilterLink *inlink) } av_audio_fifo_drain(s->fifo, s->hop_size); + av_audio_fifo_drain(s->efifo, s->hop_size); if (s->samples_left > 0) out->nb_samples = FFMIN(s->hop_size, s->samples_left); @@ -621,11 +634,17 @@ static int activate(AVFilterContext *ctx) if (ret < 0) return ret; if (ret > 0) { + double *e = (double *)s->enabled->extended_data[0]; + if (s->pts == AV_NOPTS_VALUE) s->pts = in->pts; ret = av_audio_fifo_write(s->fifo, (void **)in->extended_data, in->nb_samples); + for (int i = 0; i < in->nb_samples; i++) + e[i] = !ctx->is_disabled; + + av_audio_fifo_write(s->efifo, (void**)s->enabled->extended_data, in->nb_samples); av_frame_free(&in); if (ret < 0) return ret; @@ -684,7 +703,9 @@ static av_cold void uninit(AVFilterContext *ctx) s->nb_samples, 100. * s->detected_errors / s->nb_samples); av_audio_fifo_free(s->fifo); + av_audio_fifo_free(s->efifo); av_freep(&s->window_func_lut); + av_frame_free(&s->enabled); av_frame_free(&s->in); av_frame_free(&s->out); av_frame_free(&s->buffer); @@ -744,7 +765,7 @@ AVFilter ff_af_adeclick = { .uninit = uninit, .inputs = inputs, .outputs = outputs, - .flags = AVFILTER_FLAG_SLICE_THREADS, + .flags = AVFILTER_FLAG_SLICE_THREADS | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL, }; static const AVOption adeclip_options[] = { @@ -772,5 +793,5 @@ AVFilter ff_af_adeclip = { .uninit = uninit, .inputs = inputs, .outputs = outputs, - .flags = AVFILTER_FLAG_SLICE_THREADS, + .flags = AVFILTER_FLAG_SLICE_THREADS | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL, }; diff --git a/libavfilter/af_afir.c b/libavfilter/af_afir.c index 31919f62e9..7c7e8458d4 100644 --- a/libavfilter/af_afir.c +++ b/libavfilter/af_afir.c @@ -25,6 +25,7 @@ #include +#include "libavutil/avstring.h" #include "libavutil/common.h" #include "libavutil/float_dsp.h" #include "libavutil/intreadwrite.h" @@ -56,10 +57,17 @@ static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t le sum[2 * n] += t[2 * n] * c[2 * n]; } +static void direct(const float *in, const FFTComplex *ir, int len, float *out) +{ + for (int n = 0; n < len; n++) + for (int m = 0; m <= n; m++) + out[n] += ir[m].re * in[n - m]; +} + static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset) { AudioFIRContext *s = ctx->priv; - const float *in = (const float *)s->in[0]->extended_data[ch] + offset; + const float *in = (const float *)s->in->extended_data[ch] + offset; float *block, *buf, *ptr = (float *)out->extended_data[ch] + offset; const int nb_samples = FFMIN(s->min_part_size, out->nb_samples - offset); int n, i, j; @@ -70,8 +78,13 @@ static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset) float *dst = (float *)seg->output->extended_data[ch]; float *sum = (float *)seg->sum->extended_data[ch]; - s->fdsp->vector_fmul_scalar(src + seg->input_offset, in, s->dry_gain, FFALIGN(nb_samples, 4)); - emms_c(); + if (s->min_part_size >= 8) { + s->fdsp->vector_fmul_scalar(src + seg->input_offset, in, s->dry_gain, FFALIGN(nb_samples, 4)); + emms_c(); + } else { + for (n = 0; n < nb_samples; n++) + src[seg->input_offset + n] = in[n] * s->dry_gain; + } seg->output_offset[ch] += s->min_part_size; if (seg->output_offset[ch] == seg->part_size) { @@ -86,6 +99,32 @@ static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset) continue; } + if (seg->part_size < 8) { + memset(dst, 0, sizeof(*dst) * seg->part_size * seg->nb_partitions); + + j = seg->part_index[ch]; + + for (i = 0; i < seg->nb_partitions; i++) { + const int coffset = j * seg->coeff_size; + const FFTComplex *coeff = (const FFTComplex *)seg->coeff->extended_data[ch * !s->one2many] + coffset; + + direct(src, coeff, nb_samples, dst); + + if (j == 0) + j = seg->nb_partitions; + j--; + } + + seg->part_index[ch] = (seg->part_index[ch] + 1) % seg->nb_partitions; + + memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src)); + + for (n = 0; n < nb_samples; n++) { + ptr[n] += dst[n]; + } + continue; + } + memset(sum, 0, sizeof(*sum) * seg->fft_length); block = (float *)seg->block->extended_data[ch] + seg->part_index[ch] * seg->block_size; memset(block + seg->part_size, 0, sizeof(*block) * (seg->fft_length - seg->part_size)); @@ -132,8 +171,13 @@ static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset) } } - s->fdsp->vector_fmul_scalar(ptr, ptr, s->wet_gain, FFALIGN(nb_samples, 4)); - emms_c(); + if (s->min_part_size >= 8) { + s->fdsp->vector_fmul_scalar(ptr, ptr, s->wet_gain, FFALIGN(nb_samples, 4)); + emms_c(); + } else { + for (n = 0; n < nb_samples; n++) + ptr[n] *= s->wet_gain; + } return 0; } @@ -175,7 +219,7 @@ static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink) if (s->pts == AV_NOPTS_VALUE) s->pts = in->pts; - s->in[0] = in; + s->in = in; ctx->internal->execute(ctx, fir_channels, out, NULL, FFMIN(outlink->channels, ff_filter_get_nb_threads(ctx))); @@ -184,7 +228,7 @@ static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink) s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); av_frame_free(&in); - s->in[0] = NULL; + s->in = NULL; return ff_filter_frame(outlink, out); } @@ -255,9 +299,9 @@ static void draw_response(AVFilterContext *ctx, AVFrame *out) if (!mag || !phase || !delay) goto end; - channel = av_clip(s->ir_channel, 0, s->in[1]->channels - 1); + channel = av_clip(s->ir_channel, 0, s->ir[s->selir]->channels - 1); for (i = 0; i < s->w; i++) { - const float *src = (const float *)s->in[1]->extended_data[channel]; + const float *src = (const float *)s->ir[s->selir]->extended_data[channel]; double w = i * M_PI / (s->w - 1); double div, real_num = 0., imag_num = 0., real = 0., imag = 0.; @@ -350,7 +394,7 @@ static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, if (!seg->part_index || !seg->output_offset) return AVERROR(ENOMEM); - for (int ch = 0; ch < ctx->inputs[0]->channels; ch++) { + for (int ch = 0; ch < ctx->inputs[0]->channels && part_size >= 8; ch++) { seg->rdft[ch] = av_rdft_init(av_log2(2 * part_size), DFT_R2C); seg->irdft[ch] = av_rdft_init(av_log2(2 * part_size), IDFT_C2R); if (!seg->rdft[ch] || !seg->irdft[ch]) @@ -360,7 +404,7 @@ static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, seg->sum = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length); seg->block = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->block_size); seg->buffer = ff_get_audio_buffer(ctx->inputs[0], seg->part_size); - seg->coeff = ff_get_audio_buffer(ctx->inputs[1], seg->nb_partitions * seg->coeff_size * 2); + seg->coeff = ff_get_audio_buffer(ctx->inputs[1 + s->selir], seg->nb_partitions * seg->coeff_size * 2); seg->input = ff_get_audio_buffer(ctx->inputs[0], seg->input_size); seg->output = ff_get_audio_buffer(ctx->inputs[0], seg->part_size); if (!seg->buffer || !seg->sum || !seg->block || !seg->coeff || !seg->input || !seg->output) @@ -369,79 +413,116 @@ static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, return 0; } +static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg) +{ + AudioFIRContext *s = ctx->priv; + + if (seg->rdft) { + for (int ch = 0; ch < s->nb_channels; ch++) { + av_rdft_end(seg->rdft[ch]); + } + } + av_freep(&seg->rdft); + + if (seg->irdft) { + for (int ch = 0; ch < s->nb_channels; ch++) { + av_rdft_end(seg->irdft[ch]); + } + } + av_freep(&seg->irdft); + + av_freep(&seg->output_offset); + av_freep(&seg->part_index); + + av_frame_free(&seg->block); + av_frame_free(&seg->sum); + av_frame_free(&seg->buffer); + av_frame_free(&seg->coeff); + av_frame_free(&seg->input); + av_frame_free(&seg->output); + seg->input_size = 0; +} + static int convert_coeffs(AVFilterContext *ctx) { AudioFIRContext *s = ctx->priv; - int left, offset = 0, part_size, max_part_size; - int ret, i, ch, n; + int ret, i, ch, n, cur_nb_taps; float power = 0; - s->nb_taps = ff_inlink_queued_samples(ctx->inputs[1]); - if (s->nb_taps <= 0) - return AVERROR(EINVAL); + if (!s->nb_taps) { + int part_size, max_part_size; + int left, offset = 0; - if (s->minp > s->maxp) { - s->maxp = s->minp; + s->nb_taps = ff_inlink_queued_samples(ctx->inputs[1 + s->selir]); + if (s->nb_taps <= 0) + return AVERROR(EINVAL); + + if (s->minp > s->maxp) { + s->maxp = s->minp; + } + + left = s->nb_taps; + part_size = 1 << av_log2(s->minp); + max_part_size = 1 << av_log2(s->maxp); + + s->min_part_size = part_size; + + for (i = 0; left > 0; i++) { + int step = part_size == max_part_size ? INT_MAX : 1 + (i == 0); + int nb_partitions = FFMIN(step, (left + part_size - 1) / part_size); + + s->nb_segments = i + 1; + ret = init_segment(ctx, &s->seg[i], offset, nb_partitions, part_size); + if (ret < 0) + return ret; + offset += nb_partitions * part_size; + left -= nb_partitions * part_size; + part_size *= 2; + part_size = FFMIN(part_size, max_part_size); + } } - left = s->nb_taps; - part_size = 1 << av_log2(s->minp); - max_part_size = 1 << av_log2(s->maxp); - - s->min_part_size = part_size; - - for (i = 0; left > 0; i++) { - int step = part_size == max_part_size ? INT_MAX : 1 + (i == 0); - int nb_partitions = FFMIN(step, (left + part_size - 1) / part_size); - - s->nb_segments = i + 1; - ret = init_segment(ctx, &s->seg[i], offset, nb_partitions, part_size); + if (!s->ir[s->selir]) { + ret = ff_inlink_consume_samples(ctx->inputs[1 + s->selir], s->nb_taps, s->nb_taps, &s->ir[s->selir]); if (ret < 0) return ret; - offset += nb_partitions * part_size; - left -= nb_partitions * part_size; - part_size *= 2; - part_size = FFMIN(part_size, max_part_size); + if (ret == 0) + return AVERROR_BUG; } - ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_taps, s->nb_taps, &s->in[1]); - if (ret < 0) - return ret; - if (ret == 0) - return AVERROR_BUG; - if (s->response) draw_response(ctx, s->video); s->gain = 1; + cur_nb_taps = s->ir[s->selir]->nb_samples; switch (s->gtype) { case -1: /* nothing to do */ break; case 0: - for (ch = 0; ch < ctx->inputs[1]->channels; ch++) { - float *time = (float *)s->in[1]->extended_data[!s->one2many * ch]; + for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) { + float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch]; - for (i = 0; i < s->nb_taps; i++) + for (i = 0; i < cur_nb_taps; i++) power += FFABS(time[i]); } - s->gain = ctx->inputs[1]->channels / power; + s->gain = ctx->inputs[1 + s->selir]->channels / power; break; case 1: - for (ch = 0; ch < ctx->inputs[1]->channels; ch++) { - float *time = (float *)s->in[1]->extended_data[!s->one2many * ch]; + for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) { + float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch]; - for (i = 0; i < s->nb_taps; i++) + for (i = 0; i < cur_nb_taps; i++) power += time[i]; } - s->gain = ctx->inputs[1]->channels / power; + s->gain = ctx->inputs[1 + s->selir]->channels / power; break; case 2: - for (ch = 0; ch < ctx->inputs[1]->channels; ch++) { - float *time = (float *)s->in[1]->extended_data[!s->one2many * ch]; + for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) { + float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch]; - for (i = 0; i < s->nb_taps; i++) + for (i = 0; i < cur_nb_taps; i++) power += time[i] * time[i]; } s->gain = sqrtf(ch / power); @@ -452,17 +533,17 @@ static int convert_coeffs(AVFilterContext *ctx) s->gain = FFMIN(s->gain * s->ir_gain, 1.f); av_log(ctx, AV_LOG_DEBUG, "power %f, gain %f\n", power, s->gain); - for (ch = 0; ch < ctx->inputs[1]->channels; ch++) { - float *time = (float *)s->in[1]->extended_data[!s->one2many * ch]; + for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) { + float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch]; - s->fdsp->vector_fmul_scalar(time, time, s->gain, FFALIGN(s->nb_taps, 4)); + s->fdsp->vector_fmul_scalar(time, time, s->gain, FFALIGN(cur_nb_taps, 4)); } - av_log(ctx, AV_LOG_DEBUG, "nb_taps: %d\n", s->nb_taps); + av_log(ctx, AV_LOG_DEBUG, "nb_taps: %d\n", cur_nb_taps); av_log(ctx, AV_LOG_DEBUG, "nb_segments: %d\n", s->nb_segments); - for (ch = 0; ch < ctx->inputs[1]->channels; ch++) { - float *time = (float *)s->in[1]->extended_data[!s->one2many * ch]; + for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) { + float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch]; int toffset = 0; for (i = FFMAX(1, s->length * s->nb_taps); i < s->nb_taps; i++) @@ -483,6 +564,14 @@ static int convert_coeffs(AVFilterContext *ctx) const int remaining = s->nb_taps - toffset; const int size = remaining >= seg->part_size ? seg->part_size : remaining; + if (size < 8) { + for (n = 0; n < size; n++) + coeff[coffset + n].re = time[toffset + n]; + + toffset += size; + continue; + } + memset(block, 0, sizeof(*block) * seg->fft_length); memcpy(block, time + toffset, size * sizeof(*block)); @@ -510,7 +599,6 @@ static int convert_coeffs(AVFilterContext *ctx) } } - av_frame_free(&s->in[1]); s->have_coeffs = 1; return 0; @@ -543,26 +631,26 @@ static int activate(AVFilterContext *ctx) FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx); if (s->response) FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[1], ctx); - if (!s->eof_coeffs) { + if (!s->eof_coeffs[s->selir]) { AVFrame *ir = NULL; - ret = check_ir(ctx->inputs[1], ir); + ret = check_ir(ctx->inputs[1 + s->selir], ir); if (ret < 0) return ret; - if (ff_outlink_get_status(ctx->inputs[1]) == AVERROR_EOF) - s->eof_coeffs = 1; + if (ff_outlink_get_status(ctx->inputs[1 + s->selir]) == AVERROR_EOF) + s->eof_coeffs[s->selir] = 1; - if (!s->eof_coeffs) { + if (!s->eof_coeffs[s->selir]) { if (ff_outlink_frame_wanted(ctx->outputs[0])) - ff_inlink_request_frame(ctx->inputs[1]); + ff_inlink_request_frame(ctx->inputs[1 + s->selir]); else if (s->response && ff_outlink_frame_wanted(ctx->outputs[1])) - ff_inlink_request_frame(ctx->inputs[1]); + ff_inlink_request_frame(ctx->inputs[1 + s->selir]); return 0; } } - if (!s->have_coeffs && s->eof_coeffs) { + if (!s->have_coeffs && s->eof_coeffs[s->selir]) { ret = convert_coeffs(ctx); if (ret < 0) return ret; @@ -582,8 +670,12 @@ static int activate(AVFilterContext *ctx) int64_t new_pts = av_rescale_q(s->pts, ctx->inputs[0]->time_base, ctx->outputs[1]->time_base); if (ff_outlink_frame_wanted(ctx->outputs[1]) && old_pts < new_pts) { + AVFrame *clone; s->video->pts = new_pts; - return ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video)); + clone = av_frame_clone(s->video); + if (!clone) + return AVERROR(ENOMEM); + return ff_filter_frame(ctx->outputs[1], clone); } } @@ -658,8 +750,10 @@ static int query_formats(AVFilterContext *ctx) return ret; if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0) return ret; - if ((ret = ff_channel_layouts_ref(mono, &ctx->inputs[1]->out_channel_layouts)) < 0) - return ret; + for (int i = 1; i < ctx->nb_inputs; i++) { + if ((ret = ff_channel_layouts_ref(mono, &ctx->inputs[i]->out_channel_layouts)) < 0) + return ret; + } } formats = ff_make_format_list(sample_fmts); @@ -675,49 +769,19 @@ static int config_output(AVFilterLink *outlink) AVFilterContext *ctx = outlink->src; AudioFIRContext *s = ctx->priv; - s->one2many = ctx->inputs[1]->channels == 1; + s->one2many = ctx->inputs[1 + s->selir]->channels == 1; outlink->sample_rate = ctx->inputs[0]->sample_rate; outlink->time_base = ctx->inputs[0]->time_base; outlink->channel_layout = ctx->inputs[0]->channel_layout; outlink->channels = ctx->inputs[0]->channels; s->nb_channels = outlink->channels; - s->nb_coef_channels = ctx->inputs[1]->channels; + s->nb_coef_channels = ctx->inputs[1 + s->selir]->channels; s->pts = AV_NOPTS_VALUE; return 0; } -static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg) -{ - AudioFIRContext *s = ctx->priv; - - if (seg->rdft) { - for (int ch = 0; ch < s->nb_channels; ch++) { - av_rdft_end(seg->rdft[ch]); - } - } - av_freep(&seg->rdft); - - if (seg->irdft) { - for (int ch = 0; ch < s->nb_channels; ch++) { - av_rdft_end(seg->irdft[ch]); - } - } - av_freep(&seg->irdft); - - av_freep(&seg->output_offset); - av_freep(&seg->part_index); - - av_frame_free(&seg->block); - av_frame_free(&seg->sum); - av_frame_free(&seg->buffer); - av_frame_free(&seg->coeff); - av_frame_free(&seg->input); - av_frame_free(&seg->output); - seg->input_size = 0; -} - static av_cold void uninit(AVFilterContext *ctx) { AudioFIRContext *s = ctx->priv; @@ -727,7 +791,13 @@ static av_cold void uninit(AVFilterContext *ctx) } av_freep(&s->fdsp); - av_frame_free(&s->in[1]); + + for (int i = 0; i < s->nb_irs; i++) { + av_frame_free(&s->ir[i]); + } + + for (int i = 0; i < ctx->nb_inputs; i++) + av_freep(&ctx->input_pads[i].name); for (int i = 0; i < ctx->nb_outputs; i++) av_freep(&ctx->output_pads[i].name); @@ -767,7 +837,37 @@ static av_cold int init(AVFilterContext *ctx) AVFilterPad pad, vpad; int ret; - pad = (AVFilterPad){ + pad = (AVFilterPad) { + .name = av_strdup("main"), + .type = AVMEDIA_TYPE_AUDIO, + }; + + if (!pad.name) + return AVERROR(ENOMEM); + + ret = ff_insert_inpad(ctx, 0, &pad); + if (ret < 0) { + av_freep(&pad.name); + return ret; + } + + for (int n = 0; n < s->nb_irs; n++) { + pad = (AVFilterPad) { + .name = av_asprintf("ir%d", n), + .type = AVMEDIA_TYPE_AUDIO, + }; + + if (!pad.name) + return AVERROR(ENOMEM); + + ret = ff_insert_inpad(ctx, n + 1, &pad); + if (ret < 0) { + av_freep(&pad.name); + return ret; + } + } + + pad = (AVFilterPad) { .name = av_strdup("default"), .type = AVMEDIA_TYPE_AUDIO, .config_props = config_output, @@ -809,18 +909,31 @@ static av_cold int init(AVFilterContext *ctx) return 0; } -static const AVFilterPad afir_inputs[] = { - { - .name = "main", - .type = AVMEDIA_TYPE_AUDIO, - },{ - .name = "ir", - .type = AVMEDIA_TYPE_AUDIO, - }, - { NULL } -}; +static int process_command(AVFilterContext *ctx, + const char *cmd, + const char *arg, + char *res, + int res_len, + int flags) +{ + AudioFIRContext *s = ctx->priv; + int prev_ir = s->selir; + int ret = ff_filter_process_command(ctx, cmd, arg, res, res_len, flags); + + if (ret < 0) + return ret; + + s->selir = FFMIN(s->nb_irs - 1, s->selir); + + if (prev_ir != s->selir) { + s->have_coeffs = 0; + } + + return 0; +} #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +#define AFR AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM #define OFFSET(x) offsetof(AudioFIRContext, x) @@ -842,8 +955,10 @@ static const AVOption afir_options[] = { { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF }, { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF }, { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF }, - { "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=8192}, 8, 32768, AF }, + { "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=8192}, 1, 32768, AF }, { "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=8192}, 8, 32768, AF }, + { "nbirs", "set number of input IRs",OFFSET(nb_irs),AV_OPT_TYPE_INT, {.i64=1}, 1, 32, AF }, + { "ir", "select IR", OFFSET(selir), AV_OPT_TYPE_INT, {.i64=0}, 0, 31, AFR }, { NULL } }; @@ -851,14 +966,15 @@ AVFILTER_DEFINE_CLASS(afir); AVFilter ff_af_afir = { .name = "afir", - .description = NULL_IF_CONFIG_SMALL("Apply Finite Impulse Response filter with supplied coefficients in 2nd stream."), + .description = NULL_IF_CONFIG_SMALL("Apply Finite Impulse Response filter with supplied coefficients in additional stream(s)."), .priv_size = sizeof(AudioFIRContext), .priv_class = &afir_class, .query_formats = query_formats, .init = init, .activate = activate, .uninit = uninit, - .inputs = afir_inputs, - .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS | + .process_command = process_command, + .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | + AVFILTER_FLAG_DYNAMIC_OUTPUTS | AVFILTER_FLAG_SLICE_THREADS, }; diff --git a/libavfilter/af_afir.h b/libavfilter/af_afir.h index f665c0ef80..4f44675848 100644 --- a/libavfilter/af_afir.h +++ b/libavfilter/af_afir.h @@ -74,10 +74,12 @@ typedef struct AudioFIRContext { int ir_channel; int minp; int maxp; + int nb_irs; + int selir; float gain; - int eof_coeffs; + int eof_coeffs[32]; int have_coeffs; int nb_taps; int nb_channels; @@ -87,7 +89,8 @@ typedef struct AudioFIRContext { AudioFIRSegment seg[1024]; int nb_segments; - AVFrame *in[2]; + AVFrame *in; + AVFrame *ir[32]; AVFrame *video; int min_part_size; int64_t pts; diff --git a/libavfilter/af_aformat.c b/libavfilter/af_aformat.c index e43149561a..1a702778c3 100644 --- a/libavfilter/af_aformat.c +++ b/libavfilter/af_aformat.c @@ -50,8 +50,11 @@ typedef struct AFormatContext { #define F AV_OPT_FLAG_FILTERING_PARAM static const AVOption aformat_options[] = { { "sample_fmts", "A '|'-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A|F }, + { "f", "A '|'-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A|F }, { "sample_rates", "A '|'-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A|F }, + { "r", "A '|'-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A|F }, { "channel_layouts", "A '|'-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A|F }, + { "cl", "A '|'-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A|F }, { NULL } }; diff --git a/libavfilter/af_aiir.c b/libavfilter/af_aiir.c index 717388f450..89c8936c2f 100644 --- a/libavfilter/af_aiir.c +++ b/libavfilter/af_aiir.c @@ -1037,8 +1037,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base); if (new_pts > old_pts) { + AVFrame *clone; + s->video->pts = new_pts; - ret = ff_filter_frame(outlink, av_frame_clone(s->video)); + clone = av_frame_clone(s->video); + if (!clone) + return AVERROR(ENOMEM); + ret = ff_filter_frame(outlink, clone); if (ret < 0) return ret; } diff --git a/libavfilter/af_amix.c b/libavfilter/af_amix.c index 89a1b0568f..af8ad58262 100644 --- a/libavfilter/af_amix.c +++ b/libavfilter/af_amix.c @@ -182,7 +182,7 @@ typedef struct MixContext { #define F AV_OPT_FLAG_FILTERING_PARAM static const AVOption amix_options[] = { { "inputs", "Number of inputs.", - OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 1024, A|F }, + OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT16_MAX, A|F }, { "duration", "How to determine the end-of-stream.", OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A|F, "duration" }, { "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, 0, 0, A|F, "duration" }, diff --git a/libavfilter/af_anequalizer.c b/libavfilter/af_anequalizer.c index ee82474083..c974fd5abc 100644 --- a/libavfilter/af_anequalizer.c +++ b/libavfilter/af_anequalizer.c @@ -733,13 +733,18 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf) } if (s->draw_curves) { + AVFrame *clone; + const int64_t pts = buf->pts + av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate }, outlink->time_base); int ret; s->video->pts = pts; - ret = ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video)); + clone = av_frame_clone(s->video); + if (!clone) + return AVERROR(ENOMEM); + ret = ff_filter_frame(ctx->outputs[1], clone); if (ret < 0) return ret; } diff --git a/libavfilter/af_crystalizer.c b/libavfilter/af_crystalizer.c index 5b27e1fb79..4df6a4b609 100644 --- a/libavfilter/af_crystalizer.c +++ b/libavfilter/af_crystalizer.c @@ -34,7 +34,7 @@ typedef struct CrystalizerContext { } CrystalizerContext; #define OFFSET(x) offsetof(CrystalizerContext, x) -#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption crystalizer_options[] = { { "i", "set intensity", OFFSET(mult), AV_OPT_TYPE_FLOAT, {.dbl=2.0}, 0, 10, A }, @@ -212,7 +212,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) } s->filter((void **)out->extended_data, (void **)s->prev->extended_data, (const void **)in->extended_data, - in->nb_samples, in->channels, s->mult, s->clip); + in->nb_samples, in->channels, ctx->is_disabled ? 0.f : s->mult, s->clip); if (out != in) av_frame_free(&in); @@ -254,4 +254,6 @@ AVFilter ff_af_crystalizer = { .uninit = uninit, .inputs = inputs, .outputs = outputs, + .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL, + .process_command = ff_filter_process_command, }; diff --git a/libavfilter/af_dynaudnorm.c b/libavfilter/af_dynaudnorm.c index 639503384e..365453d60d 100644 --- a/libavfilter/af_dynaudnorm.c +++ b/libavfilter/af_dynaudnorm.c @@ -29,7 +29,10 @@ #include "libavutil/avassert.h" #include "libavutil/opt.h" -#define FF_BUFQUEUE_SIZE 302 +#define MIN_FILTER_SIZE 3 +#define MAX_FILTER_SIZE 301 + +#define FF_BUFQUEUE_SIZE (MAX_FILTER_SIZE + 1) #include "libavfilter/bufferqueue.h" #include "audio.h" @@ -37,11 +40,16 @@ #include "filters.h" #include "internal.h" +typedef struct local_gain { + double max_gain; + double threshold; +} local_gain; + typedef struct cqueue { double *elements; int size; + int max_size; int nb_elements; - int first; } cqueue; typedef struct DynamicAudioNormalizerContext { @@ -60,26 +68,26 @@ typedef struct DynamicAudioNormalizerContext { double max_amplification; double target_rms; double compress_factor; + double threshold; double *prev_amplification_factor; double *dc_correction_value; double *compress_threshold; - double *fade_factors[2]; double *weights; int channels; - int delay; int eof; int64_t pts; cqueue **gain_history_original; cqueue **gain_history_minimum; cqueue **gain_history_smoothed; + cqueue **threshold_history; cqueue *is_enabled; } DynamicAudioNormalizerContext; #define OFFSET(x) offsetof(DynamicAudioNormalizerContext, x) -#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption dynaudnorm_options[] = { { "framelen", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS }, @@ -100,6 +108,8 @@ static const AVOption dynaudnorm_options[] = { { "b", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS }, { "compress", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS }, { "s", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS }, + { "threshold", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS }, + { "t", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS }, { NULL } }; @@ -110,8 +120,8 @@ static av_cold int init(AVFilterContext *ctx) DynamicAudioNormalizerContext *s = ctx->priv; if (!(s->filter_size & 1)) { - av_log(ctx, AV_LOG_ERROR, "filter size %d is invalid. Must be an odd value.\n", s->filter_size); - return AVERROR(EINVAL); + av_log(ctx, AV_LOG_WARNING, "filter size %d is invalid. Changing to an odd value.\n", s->filter_size); + s->filter_size |= 1; } return 0; @@ -153,30 +163,22 @@ static inline int frame_size(int sample_rate, int frame_len_msec) return frame_size + (frame_size % 2); } -static void precalculate_fade_factors(double *fade_factors[2], int frame_len) -{ - const double step_size = 1.0 / frame_len; - int pos; - - for (pos = 0; pos < frame_len; pos++) { - fade_factors[0][pos] = 1.0 - (step_size * (pos + 1.0)); - fade_factors[1][pos] = 1.0 - fade_factors[0][pos]; - } -} - -static cqueue *cqueue_create(int size) +static cqueue *cqueue_create(int size, int max_size) { cqueue *q; + if (max_size < size) + return NULL; + q = av_malloc(sizeof(cqueue)); if (!q) return NULL; + q->max_size = max_size; q->size = size; q->nb_elements = 0; - q->first = 0; - q->elements = av_malloc_array(size, sizeof(double)); + q->elements = av_malloc_array(max_size, sizeof(double)); if (!q->elements) { av_free(q); return NULL; @@ -199,17 +201,14 @@ static int cqueue_size(cqueue *q) static int cqueue_empty(cqueue *q) { - return !q->nb_elements; + return q->nb_elements <= 0; } static int cqueue_enqueue(cqueue *q, double element) { - int i; + av_assert2(q->nb_elements < q->max_size); - av_assert2(q->nb_elements != q->size); - - i = (q->first + q->nb_elements) % q->size; - q->elements[i] = element; + q->elements[q->nb_elements] = element; q->nb_elements++; return 0; @@ -218,15 +217,15 @@ static int cqueue_enqueue(cqueue *q, double element) static double cqueue_peek(cqueue *q, int index) { av_assert2(index < q->nb_elements); - return q->elements[(q->first + index) % q->size]; + return q->elements[index]; } static int cqueue_dequeue(cqueue *q, double *element) { av_assert2(!cqueue_empty(q)); - *element = q->elements[q->first]; - q->first = (q->first + 1) % q->size; + *element = q->elements[0]; + memmove(&q->elements[0], &q->elements[1], (q->nb_elements - 1) * sizeof(double)); q->nb_elements--; return 0; @@ -236,12 +235,34 @@ static int cqueue_pop(cqueue *q) { av_assert2(!cqueue_empty(q)); - q->first = (q->first + 1) % q->size; + memmove(&q->elements[0], &q->elements[1], (q->nb_elements - 1) * sizeof(double)); q->nb_elements--; return 0; } +static void cqueue_resize(cqueue *q, int new_size) +{ + av_assert2(q->max_size >= new_size); + av_assert2(MIN_FILTER_SIZE <= new_size); + + if (new_size > q->nb_elements) { + const int side = (new_size - q->nb_elements) / 2; + + memmove(q->elements + side, q->elements, sizeof(double) * q->nb_elements); + for (int i = 0; i < side; i++) + q->elements[i] = q->elements[side]; + q->nb_elements = new_size - 1 - side; + } else { + int count = (q->size - new_size + 1) / 2; + + while (count-- > 0) + cqueue_pop(q); + } + + q->size = new_size; +} + static void init_gaussian_filter(DynamicAudioNormalizerContext *s) { double total_weight = 0.0; @@ -277,8 +298,6 @@ static av_cold void uninit(AVFilterContext *ctx) av_freep(&s->prev_amplification_factor); av_freep(&s->dc_correction_value); av_freep(&s->compress_threshold); - av_freep(&s->fade_factors[0]); - av_freep(&s->fade_factors[1]); for (c = 0; c < s->channels; c++) { if (s->gain_history_original) @@ -287,11 +306,14 @@ static av_cold void uninit(AVFilterContext *ctx) cqueue_free(s->gain_history_minimum[c]); if (s->gain_history_smoothed) cqueue_free(s->gain_history_smoothed[c]); + if (s->threshold_history) + cqueue_free(s->threshold_history[c]); } av_freep(&s->gain_history_original); av_freep(&s->gain_history_minimum); av_freep(&s->gain_history_smoothed); + av_freep(&s->threshold_history); cqueue_free(s->is_enabled); s->is_enabled = NULL; @@ -309,51 +331,50 @@ static int config_input(AVFilterLink *inlink) uninit(ctx); + s->channels = inlink->channels; s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec); av_log(ctx, AV_LOG_DEBUG, "frame len %d\n", s->frame_len); - s->fade_factors[0] = av_malloc_array(s->frame_len, sizeof(*s->fade_factors[0])); - s->fade_factors[1] = av_malloc_array(s->frame_len, sizeof(*s->fade_factors[1])); - s->prev_amplification_factor = av_malloc_array(inlink->channels, sizeof(*s->prev_amplification_factor)); s->dc_correction_value = av_calloc(inlink->channels, sizeof(*s->dc_correction_value)); s->compress_threshold = av_calloc(inlink->channels, sizeof(*s->compress_threshold)); s->gain_history_original = av_calloc(inlink->channels, sizeof(*s->gain_history_original)); s->gain_history_minimum = av_calloc(inlink->channels, sizeof(*s->gain_history_minimum)); s->gain_history_smoothed = av_calloc(inlink->channels, sizeof(*s->gain_history_smoothed)); - s->weights = av_malloc_array(s->filter_size, sizeof(*s->weights)); - s->is_enabled = cqueue_create(s->filter_size); + s->threshold_history = av_calloc(inlink->channels, sizeof(*s->threshold_history)); + s->weights = av_malloc_array(MAX_FILTER_SIZE, sizeof(*s->weights)); + s->is_enabled = cqueue_create(s->filter_size, MAX_FILTER_SIZE); if (!s->prev_amplification_factor || !s->dc_correction_value || - !s->compress_threshold || !s->fade_factors[0] || !s->fade_factors[1] || + !s->compress_threshold || !s->gain_history_original || !s->gain_history_minimum || - !s->gain_history_smoothed || !s->is_enabled || !s->weights) + !s->gain_history_smoothed || !s->threshold_history || + !s->is_enabled || !s->weights) return AVERROR(ENOMEM); for (c = 0; c < inlink->channels; c++) { s->prev_amplification_factor[c] = 1.0; - s->gain_history_original[c] = cqueue_create(s->filter_size); - s->gain_history_minimum[c] = cqueue_create(s->filter_size); - s->gain_history_smoothed[c] = cqueue_create(s->filter_size); + s->gain_history_original[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE); + s->gain_history_minimum[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE); + s->gain_history_smoothed[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE); + s->threshold_history[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE); if (!s->gain_history_original[c] || !s->gain_history_minimum[c] || - !s->gain_history_smoothed[c]) + !s->gain_history_smoothed[c] || !s->threshold_history[c]) return AVERROR(ENOMEM); } - precalculate_fade_factors(s->fade_factors, s->frame_len); init_gaussian_filter(s); - s->channels = inlink->channels; - s->delay = s->filter_size; - return 0; } -static inline double fade(double prev, double next, int pos, - double *fade_factors[2]) +static inline double fade(double prev, double next, int pos, int length) { - return fade_factors[0][pos] * prev + fade_factors[1][pos] * next; + const double step_size = 1.0 / length; + const double f0 = 1.0 - (step_size * (pos + 1.0)); + const double f1 = 1.0 - f0; + return f0 * prev + f1 * next; } static inline double pow_2(const double value) @@ -416,12 +437,18 @@ static double compute_frame_rms(AVFrame *frame, int channel) return FFMAX(sqrt(rms_value), DBL_EPSILON); } -static double get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame, - int channel) +static local_gain get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame, + int channel) { - const double maximum_gain = s->peak_value / find_peak_magnitude(frame, channel); + const double peak_magnitude = find_peak_magnitude(frame, channel); + const double maximum_gain = s->peak_value / peak_magnitude; const double rms_gain = s->target_rms > DBL_EPSILON ? (s->target_rms / compute_frame_rms(frame, channel)) : DBL_MAX; - return bound(s->max_amplification, FFMIN(maximum_gain, rms_gain)); + local_gain gain; + + gain.threshold = peak_magnitude > s->threshold; + gain.max_gain = bound(s->max_amplification, FFMIN(maximum_gain, rms_gain)); + + return gain; } static double minimum_filter(cqueue *q) @@ -436,38 +463,41 @@ static double minimum_filter(cqueue *q) return min; } -static double gaussian_filter(DynamicAudioNormalizerContext *s, cqueue *q) +static double gaussian_filter(DynamicAudioNormalizerContext *s, cqueue *q, cqueue *tq) { - double result = 0.0; + double result = 0.0, tsum = 0.0; int i; for (i = 0; i < cqueue_size(q); i++) { - result += cqueue_peek(q, i) * s->weights[i]; + tsum += cqueue_peek(tq, i) * s->weights[i]; + result += cqueue_peek(q, i) * s->weights[i] * cqueue_peek(tq, i); } + if (tsum == 0.0) + result = 1.0; + return result; } static void update_gain_history(DynamicAudioNormalizerContext *s, int channel, - double current_gain_factor) + local_gain gain) { - if (cqueue_empty(s->gain_history_original[channel]) || - cqueue_empty(s->gain_history_minimum[channel])) { + if (cqueue_empty(s->gain_history_original[channel])) { const int pre_fill_size = s->filter_size / 2; - const double initial_value = s->alt_boundary_mode ? current_gain_factor : 1.0; + const double initial_value = s->alt_boundary_mode ? gain.max_gain : s->peak_value; s->prev_amplification_factor[channel] = initial_value; while (cqueue_size(s->gain_history_original[channel]) < pre_fill_size) { cqueue_enqueue(s->gain_history_original[channel], initial_value); + cqueue_enqueue(s->threshold_history[channel], gain.threshold); } } - cqueue_enqueue(s->gain_history_original[channel], current_gain_factor); + cqueue_enqueue(s->gain_history_original[channel], gain.max_gain); while (cqueue_size(s->gain_history_original[channel]) >= s->filter_size) { double minimum; - av_assert0(cqueue_size(s->gain_history_original[channel]) == s->filter_size); if (cqueue_empty(s->gain_history_minimum[channel])) { const int pre_fill_size = s->filter_size / 2; @@ -485,17 +515,22 @@ static void update_gain_history(DynamicAudioNormalizerContext *s, int channel, cqueue_enqueue(s->gain_history_minimum[channel], minimum); + cqueue_enqueue(s->threshold_history[channel], gain.threshold); + cqueue_pop(s->gain_history_original[channel]); } while (cqueue_size(s->gain_history_minimum[channel]) >= s->filter_size) { - double smoothed; - av_assert0(cqueue_size(s->gain_history_minimum[channel]) == s->filter_size); - smoothed = gaussian_filter(s, s->gain_history_minimum[channel]); + double smoothed, limit; + + smoothed = gaussian_filter(s, s->gain_history_minimum[channel], s->threshold_history[channel]); + limit = cqueue_peek(s->gain_history_original[channel], 0); + smoothed = FFMIN(smoothed, limit); cqueue_enqueue(s->gain_history_smoothed[channel], smoothed); cqueue_pop(s->gain_history_minimum[channel]); + cqueue_pop(s->threshold_history[channel]); } } @@ -523,7 +558,7 @@ static void perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *fra s->dc_correction_value[c] = is_first_frame ? current_average_value : update_value(current_average_value, s->dc_correction_value[c], 0.1); for (i = 0; i < frame->nb_samples; i++) { - dst_ptr[i] -= fade(prev_value, s->dc_correction_value[c], i, s->fade_factors); + dst_ptr[i] -= fade(prev_value, s->dc_correction_value[c], i, frame->nb_samples); } } } @@ -596,7 +631,7 @@ static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame for (c = 0; c < s->channels; c++) { double *const dst_ptr = (double *)frame->extended_data[c]; for (i = 0; i < frame->nb_samples; i++) { - const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, s->fade_factors); + const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples); dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]); } } @@ -615,7 +650,7 @@ static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame dst_ptr = (double *)frame->extended_data[c]; for (i = 0; i < frame->nb_samples; i++) { - const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, s->fade_factors); + const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples); dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]); } } @@ -633,11 +668,11 @@ static void analyze_frame(DynamicAudioNormalizerContext *s, AVFrame *frame) } if (s->channels_coupled) { - const double current_gain_factor = get_max_local_gain(s, frame, -1); + const local_gain gain = get_max_local_gain(s, frame, -1); int c; for (c = 0; c < s->channels; c++) - update_gain_history(s, c, current_gain_factor); + update_gain_history(s, c, gain); } else { int c; @@ -659,12 +694,9 @@ static void amplify_frame(DynamicAudioNormalizerContext *s, AVFrame *frame, int for (i = 0; i < frame->nb_samples && enabled; i++) { const double amplification_factor = fade(s->prev_amplification_factor[c], current_amplification_factor, i, - s->fade_factors); + frame->nb_samples); dst_ptr[i] *= amplification_factor; - - if (fabs(dst_ptr[i]) > s->peak_value) - dst_ptr[i] = copysign(s->peak_value, dst_ptr[i]); } s->prev_amplification_factor[c] = current_amplification_factor; @@ -675,12 +707,14 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; DynamicAudioNormalizerContext *s = ctx->priv; - AVFilterLink *outlink = inlink->dst->outputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; int ret = 1; - if (!cqueue_empty(s->gain_history_smoothed[0])) { - double is_enabled; + while (((s->queue.available >= s->filter_size) || + (s->eof && s->queue.available)) && + !cqueue_empty(s->gain_history_smoothed[0])) { AVFrame *out = ff_bufqueue_get(&s->queue); + double is_enabled; cqueue_dequeue(s->is_enabled, &is_enabled); @@ -689,9 +723,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) } av_frame_make_writable(in); - cqueue_enqueue(s->is_enabled, !ctx->is_disabled); analyze_frame(s, in); - ff_bufqueue_add(ctx, &s->queue, in); + if (!s->eof) { + ff_bufqueue_add(ctx, &s->queue, in); + cqueue_enqueue(s->is_enabled, !ctx->is_disabled); + } else { + av_frame_free(&in); + } return ret; } @@ -717,7 +755,6 @@ static int flush_buffer(DynamicAudioNormalizerContext *s, AVFilterLink *inlink, } } - s->delay--; return filter_frame(inlink, out); } @@ -734,7 +771,6 @@ static int flush(AVFilterLink *outlink) s->pts = out->pts; ret = ff_filter_frame(outlink, out); - s->delay = s->queue.available; } return ret; @@ -772,10 +808,10 @@ static int activate(AVFilterContext *ctx) s->eof = 1; } - if (s->eof && s->delay > 0) + if (s->eof && s->queue.available) return flush(outlink); - if (s->eof && s->delay <= 0) { + if (s->eof && !s->queue.available) { ff_outlink_set_status(outlink, AVERROR_EOF, s->pts); return 0; } @@ -786,6 +822,34 @@ static int activate(AVFilterContext *ctx) return FFERROR_NOT_READY; } +static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, + char *res, int res_len, int flags) +{ + DynamicAudioNormalizerContext *s = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + int prev_filter_size = s->filter_size; + int ret; + + ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags); + if (ret < 0) + return ret; + + s->filter_size |= 1; + if (prev_filter_size != s->filter_size) { + init_gaussian_filter(s); + + for (int c = 0; c < s->channels; c++) { + cqueue_resize(s->gain_history_original[c], s->filter_size); + cqueue_resize(s->gain_history_minimum[c], s->filter_size); + cqueue_resize(s->threshold_history[c], s->filter_size); + } + } + + s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec); + + return 0; +} + static const AVFilterPad avfilter_af_dynaudnorm_inputs[] = { { .name = "default", @@ -815,4 +879,5 @@ AVFilter ff_af_dynaudnorm = { .outputs = avfilter_af_dynaudnorm_outputs, .priv_class = &dynaudnorm_class, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL, + .process_command = process_command, }; diff --git a/libavfilter/af_extrastereo.c b/libavfilter/af_extrastereo.c index 83eba47410..d8e4da9a93 100644 --- a/libavfilter/af_extrastereo.c +++ b/libavfilter/af_extrastereo.c @@ -31,7 +31,7 @@ typedef struct ExtraStereoContext { } ExtraStereoContext; #define OFFSET(x) offsetof(ExtraStereoContext, x) -#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption extrastereo_options[] = { { "m", "set the difference coefficient", OFFSET(mult), AV_OPT_TYPE_FLOAT, {.dbl=2.5}, -10, 10, A }, @@ -129,4 +129,5 @@ AVFilter ff_af_extrastereo = { .inputs = inputs, .outputs = outputs, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, + .process_command = ff_filter_process_command, }; diff --git a/libavfilter/af_firequalizer.c b/libavfilter/af_firequalizer.c index 00ddc87341..f4513a1c46 100644 --- a/libavfilter/af_firequalizer.c +++ b/libavfilter/af_firequalizer.c @@ -112,10 +112,11 @@ typedef struct FIREqualizerContext { #define OFFSET(x) offsetof(FIREqualizerContext, x) #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +#define TFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption firequalizer_options[] = { - { "gain", "set gain curve", OFFSET(gain), AV_OPT_TYPE_STRING, { .str = "gain_interpolate(f)" }, 0, 0, FLAGS }, - { "gain_entry", "set gain entry", OFFSET(gain_entry), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS }, + { "gain", "set gain curve", OFFSET(gain), AV_OPT_TYPE_STRING, { .str = "gain_interpolate(f)" }, 0, 0, TFLAGS }, + { "gain_entry", "set gain entry", OFFSET(gain_entry), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, TFLAGS }, { "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.0, 1e10, FLAGS }, { "accuracy", "set accuracy", OFFSET(accuracy), AV_OPT_TYPE_DOUBLE, { .dbl = 5.0 }, 0.0, 1e10, FLAGS }, { "wfunc", "set window function", OFFSET(wfunc), AV_OPT_TYPE_INT, { .i64 = WFUNC_HANN }, 0, NB_WFUNC-1, FLAGS, "wfunc" }, diff --git a/libavfilter/af_sidechaincompress.c b/libavfilter/af_sidechaincompress.c index 8199ec6b44..e79c04d40e 100644 --- a/libavfilter/af_sidechaincompress.c +++ b/libavfilter/af_sidechaincompress.c @@ -70,26 +70,27 @@ typedef struct SidechainCompressContext { #define OFFSET(x) offsetof(SidechainCompressContext, x) #define A AV_OPT_FLAG_AUDIO_PARAM #define F AV_OPT_FLAG_FILTERING_PARAM +#define R AV_OPT_FLAG_RUNTIME_PARAM static const AVOption options[] = { - { "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F }, - { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A|F, "mode" }, - { "downward",0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "mode" }, - { "upward", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "mode" }, - { "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0.125}, 0.000976563, 1, A|F }, - { "ratio", "set ratio", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 20, A|F }, - { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 0.01, 2000, A|F }, - { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=250}, 0.01, 9000, A|F }, - { "makeup", "set make up gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 64, A|F }, - { "knee", "set knee", OFFSET(knee), AV_OPT_TYPE_DOUBLE, {.dbl=2.82843}, 1, 8, A|F }, - { "link", "set link type", OFFSET(link), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A|F, "link" }, - { "average", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "link" }, - { "maximum", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "link" }, - { "detection", "set detection", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, A|F, "detection" }, - { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F, "detection" }, - { "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F, "detection" }, - { "level_sc", "set sidechain gain", OFFSET(level_sc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F }, - { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, A|F }, + { "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F|R }, + { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A|F|R, "mode" }, + { "downward",0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F|R, "mode" }, + { "upward", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F|R, "mode" }, + { "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0.125}, 0.000976563, 1, A|F|R }, + { "ratio", "set ratio", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 20, A|F|R }, + { "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 0.01, 2000, A|F|R }, + { "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=250}, 0.01, 9000, A|F|R }, + { "makeup", "set make up gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 64, A|F|R }, + { "knee", "set knee", OFFSET(knee), AV_OPT_TYPE_DOUBLE, {.dbl=2.82843}, 1, 8, A|F|R }, + { "link", "set link type", OFFSET(link), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A|F|R, "link" }, + { "average", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F|R, "link" }, + { "maximum", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F|R, "link" }, + { "detection", "set detection", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, A|F|R, "detection" }, + { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F|R, "detection" }, + { "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F|R, "detection" }, + { "level_sc", "set sidechain gain", OFFSET(level_sc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F|R }, + { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, A|F|R }, { NULL } }; @@ -214,6 +215,20 @@ static void compressor(SidechainCompressContext *s, } } +static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, + char *res, int res_len, int flags) +{ + int ret; + + ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags); + if (ret < 0) + return ret; + + compressor_config_output(ctx->outputs[0]); + + return 0; +} + #if CONFIG_SIDECHAINCOMPRESS_FILTER static int activate(AVFilterContext *ctx) { @@ -382,6 +397,7 @@ AVFilter ff_af_sidechaincompress = { .uninit = uninit, .inputs = sidechaincompress_inputs, .outputs = sidechaincompress_outputs, + .process_command = process_command, }; #endif /* CONFIG_SIDECHAINCOMPRESS_FILTER */ @@ -475,5 +491,6 @@ AVFilter ff_af_acompressor = { .query_formats = acompressor_query_formats, .inputs = acompressor_inputs, .outputs = acompressor_outputs, + .process_command = process_command, }; #endif /* CONFIG_ACOMPRESSOR_FILTER */ diff --git a/libavfilter/af_stereowiden.c b/libavfilter/af_stereowiden.c index d23c8dba75..251f08438e 100644 --- a/libavfilter/af_stereowiden.c +++ b/libavfilter/af_stereowiden.c @@ -39,13 +39,14 @@ typedef struct StereoWidenContext { } StereoWidenContext; #define OFFSET(x) offsetof(StereoWidenContext, x) -#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +#define AT AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption stereowiden_options[] = { { "delay", "set delay time", OFFSET(delay), AV_OPT_TYPE_FLOAT, {.dbl=20}, 1, 100, A }, - { "feedback", "set feedback gain", OFFSET(feedback), AV_OPT_TYPE_FLOAT, {.dbl=.3}, 0, 0.9, A }, - { "crossfeed", "set cross feed", OFFSET(crossfeed), AV_OPT_TYPE_FLOAT, {.dbl=.3}, 0, 0.8, A }, - { "drymix", "set dry-mix", OFFSET(drymix), AV_OPT_TYPE_FLOAT, {.dbl=.8}, 0, 1.0, A }, + { "feedback", "set feedback gain", OFFSET(feedback), AV_OPT_TYPE_FLOAT, {.dbl=.3}, 0, 0.9, AT }, + { "crossfeed", "set cross feed", OFFSET(crossfeed), AV_OPT_TYPE_FLOAT, {.dbl=.3}, 0, 0.8, AT }, + { "drymix", "set dry-mix", OFFSET(drymix), AV_OPT_TYPE_FLOAT, {.dbl=.8}, 0, 1.0, AT }, { NULL } }; @@ -165,4 +166,5 @@ AVFilter ff_af_stereowiden = { .inputs = inputs, .outputs = outputs, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL, + .process_command = ff_filter_process_command, }; diff --git a/libavfilter/af_volume.c b/libavfilter/af_volume.c index b106ed8cf4..213c57195a 100644 --- a/libavfilter/af_volume.c +++ b/libavfilter/af_volume.c @@ -62,10 +62,11 @@ static const char *const var_names[] = { #define OFFSET(x) offsetof(VolumeContext, x) #define A AV_OPT_FLAG_AUDIO_PARAM #define F AV_OPT_FLAG_FILTERING_PARAM +#define T AV_OPT_FLAG_RUNTIME_PARAM static const AVOption volume_options[] = { { "volume", "set volume adjustment expression", - OFFSET(volume_expr), AV_OPT_TYPE_STRING, { .str = "1.0" }, .flags = A|F }, + OFFSET(volume_expr), AV_OPT_TYPE_STRING, { .str = "1.0" }, .flags = A|F|T }, { "precision", "select mathematical precision", OFFSET(precision), AV_OPT_TYPE_INT, { .i64 = PRECISION_FLOAT }, PRECISION_FIXED, PRECISION_DOUBLE, A|F, "precision" }, { "fixed", "select 8-bit fixed-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FIXED }, INT_MIN, INT_MAX, A|F, "precision" }, diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index 69953832da..6270c18ae2 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -239,6 +239,7 @@ extern AVFilter ff_vf_framepack; extern AVFilter ff_vf_framerate; extern AVFilter ff_vf_framestep; extern AVFilter ff_vf_freezedetect; +extern AVFilter ff_vf_freezeframes; extern AVFilter ff_vf_frei0r; extern AVFilter ff_vf_fspp; extern AVFilter ff_vf_gblur; @@ -382,6 +383,7 @@ extern AVFilter ff_vf_swaprect; extern AVFilter ff_vf_swapuv; extern AVFilter ff_vf_tblend; extern AVFilter ff_vf_telecine; +extern AVFilter ff_vf_thistogram; extern AVFilter ff_vf_threshold; extern AVFilter ff_vf_thumbnail; extern AVFilter ff_vf_thumbnail_cuda; @@ -391,6 +393,7 @@ extern AVFilter ff_vf_tlut2; extern AVFilter ff_vf_tmix; extern AVFilter ff_vf_tonemap; extern AVFilter ff_vf_tonemap_opencl; +extern AVFilter ff_vf_tonemap_vaapi; extern AVFilter ff_vf_tpad; extern AVFilter ff_vf_transpose; extern AVFilter ff_vf_transpose_npp; diff --git a/libavfilter/asrc_anullsrc.c b/libavfilter/asrc_anullsrc.c index cb676947d8..52db61685d 100644 --- a/libavfilter/asrc_anullsrc.c +++ b/libavfilter/asrc_anullsrc.c @@ -114,11 +114,8 @@ static int request_frame(AVFilterLink *outlink) return AVERROR(ENOMEM); samplesref->pts = null->pts; - samplesref->channel_layout = null->channel_layout; - samplesref->sample_rate = outlink->sample_rate; - ret = ff_filter_frame(outlink, av_frame_clone(samplesref)); - av_frame_free(&samplesref); + ret = ff_filter_frame(outlink, samplesref); if (ret < 0) return ret; diff --git a/libavfilter/avf_ahistogram.c b/libavfilter/avf_ahistogram.c index a0931bfa58..92cda46756 100644 --- a/libavfilter/avf_ahistogram.c +++ b/libavfilter/avf_ahistogram.c @@ -163,6 +163,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) const int w = s->w; int c, y, n, p, bin; uint64_t acmax = 1; + AVFrame *clone; if (!s->out || s->out->width != outlink->w || s->out->height != outlink->h) { @@ -363,7 +364,11 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) s->ypos = H; } - return ff_filter_frame(outlink, av_frame_clone(s->out)); + clone = av_frame_clone(s->out); + if (!clone) + return AVERROR(ENOMEM); + + return ff_filter_frame(outlink, clone); } static int activate(AVFilterContext *ctx) diff --git a/libavfilter/avf_aphasemeter.c b/libavfilter/avf_aphasemeter.c index f497bc9969..be0b2fb70f 100644 --- a/libavfilter/avf_aphasemeter.c +++ b/libavfilter/avf_aphasemeter.c @@ -213,8 +213,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) } if (s->do_video) { + AVFrame *clone; + s->out->pts = in->pts; - ff_filter_frame(outlink, av_frame_clone(s->out)); + clone = av_frame_clone(s->out); + if (!clone) + return AVERROR(ENOMEM); + ff_filter_frame(outlink, clone); } return ff_filter_frame(aoutlink, in); } diff --git a/libavfilter/avf_avectorscope.c b/libavfilter/avf_avectorscope.c index 0f53b36dfb..b288ff63ff 100644 --- a/libavfilter/avf_avectorscope.c +++ b/libavfilter/avf_avectorscope.c @@ -238,6 +238,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) AudioVectorScopeContext *s = ctx->priv; const int hw = s->hw; const int hh = s->hh; + AVFrame *clone; unsigned x, y; unsigned prev_x = s->prev_x, prev_y = s->prev_y; double zoom = s->zoom; @@ -360,7 +361,11 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) s->prev_x = x, s->prev_y = y; av_frame_free(&insamples); - return ff_filter_frame(outlink, av_frame_clone(s->outpicref)); + clone = av_frame_clone(s->outpicref); + if (!clone) + return AVERROR(ENOMEM); + + return ff_filter_frame(outlink, clone); } static int activate(AVFilterContext *ctx) diff --git a/libavfilter/avf_showspectrum.c b/libavfilter/avf_showspectrum.c index 3a3ef7eb31..a4dd7b7879 100644 --- a/libavfilter/avf_showspectrum.c +++ b/libavfilter/avf_showspectrum.c @@ -1365,6 +1365,8 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples) s->xpos = 0; if (!s->single_pic && (s->sliding != FULLFRAME || s->xpos == 0)) { if (s->old_pts < outpicref->pts) { + AVFrame *clone; + if (s->legend) { char *units = get_time(ctx, insamples->pts /(float)inlink->sample_rate, x); if (!units) @@ -1393,7 +1395,10 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples) av_free(units); } s->old_pts = outpicref->pts; - ret = ff_filter_frame(outlink, av_frame_clone(s->outpicref)); + clone = av_frame_clone(s->outpicref); + if (!clone) + return AVERROR(ENOMEM); + ret = ff_filter_frame(outlink, clone); if (ret < 0) return ret; return 0; diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c index 1004a6ee1d..394811916d 100644 --- a/libavfilter/avfilter.c +++ b/libavfilter/avfilter.c @@ -88,7 +88,7 @@ const char *avfilter_configuration(void) const char *avfilter_license(void) { #define LICENSE_PREFIX "libavfilter license: " - return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; + return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1]; } void ff_command_queue_pop(AVFilterContext *filter) @@ -467,24 +467,6 @@ static int ff_request_frame_to_filter(AVFilterLink *link) return ret; } -int ff_poll_frame(AVFilterLink *link) -{ - int i, min = INT_MAX; - - if (link->srcpad->poll_frame) - return link->srcpad->poll_frame(link); - - for (i = 0; i < link->src->nb_inputs; i++) { - int val; - if (!link->src->inputs[i]) - return AVERROR(EINVAL); - val = ff_poll_frame(link->src->inputs[i]); - min = FFMIN(min, val); - } - - return min; -} - static const char *const var_names[] = { "t", "n", diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h index 3eaa8a4089..49b4f7a939 100644 --- a/libavfilter/avfilter.h +++ b/libavfilter/avfilter.h @@ -947,7 +947,7 @@ AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, const char *nam /** * Create and add a filter instance into an existing graph. * The filter instance is created from the filter filt and inited - * with the parameters args and opaque. + * with the parameter args. opaque is currently ignored. * * In case of success put in *filt_ctx the pointer to the created * filter instance, otherwise set *filt_ctx to NULL. diff --git a/libavfilter/buffersink.c b/libavfilter/buffersink.c index 25b3f4ab6b..76a46f6678 100644 --- a/libavfilter/buffersink.c +++ b/libavfilter/buffersink.c @@ -61,8 +61,6 @@ typedef struct BufferSinkContext { } BufferSinkContext; #define NB_ITEMS(list) (list ## _size / sizeof(*list)) -#define FIFO_INIT_SIZE 8 -#define FIFO_INIT_ELEMENT_SIZE sizeof(void *) int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame) { @@ -127,6 +125,7 @@ int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx, return get_frame_internal(ctx, frame, 0, nb_samples); } +#if FF_API_NEXT AVBufferSinkParams *av_buffersink_params_alloc(void) { static const int pixel_fmts[] = { AV_PIX_FMT_NONE }; @@ -146,6 +145,7 @@ AVABufferSinkParams *av_abuffersink_params_alloc(void) return NULL; return params; } +#endif static av_cold int common_init(AVFilterContext *ctx) { @@ -201,20 +201,6 @@ MAKE_AVFILTERLINK_ACCESSOR(int , sample_rate ) MAKE_AVFILTERLINK_ACCESSOR(AVBufferRef * , hw_frames_ctx ) -static av_cold int vsink_init(AVFilterContext *ctx, void *opaque) -{ - BufferSinkContext *buf = ctx->priv; - AVBufferSinkParams *params = opaque; - int ret; - - if (params) { - if ((ret = av_opt_set_int_list(buf, "pix_fmts", params->pixel_fmts, AV_PIX_FMT_NONE, 0)) < 0) - return ret; - } - - return common_init(ctx); -} - #define CHECK_LIST_SIZE(field) \ if (buf->field ## _size % sizeof(*buf->field)) { \ av_log(ctx, AV_LOG_ERROR, "Invalid size for " #field ": %d, " \ @@ -244,23 +230,6 @@ static int vsink_query_formats(AVFilterContext *ctx) return 0; } -static av_cold int asink_init(AVFilterContext *ctx, void *opaque) -{ - BufferSinkContext *buf = ctx->priv; - AVABufferSinkParams *params = opaque; - int ret; - - if (params) { - if ((ret = av_opt_set_int_list(buf, "sample_fmts", params->sample_fmts, AV_SAMPLE_FMT_NONE, 0)) < 0 || - (ret = av_opt_set_int_list(buf, "sample_rates", params->sample_rates, -1, 0)) < 0 || - (ret = av_opt_set_int_list(buf, "channel_layouts", params->channel_layouts, -1, 0)) < 0 || - (ret = av_opt_set_int_list(buf, "channel_counts", params->channel_counts, -1, 0)) < 0 || - (ret = av_opt_set_int(buf, "all_channel_counts", params->all_channel_counts, 0)) < 0) - return ret; - } - return common_init(ctx); -} - static int asink_query_formats(AVFilterContext *ctx) { BufferSinkContext *buf = ctx->priv; @@ -347,7 +316,7 @@ AVFilter ff_vsink_buffer = { .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."), .priv_size = sizeof(BufferSinkContext), .priv_class = &buffersink_class, - .init_opaque = vsink_init, + .init = common_init, .query_formats = vsink_query_formats, .activate = activate, .inputs = avfilter_vsink_buffer_inputs, @@ -367,7 +336,7 @@ AVFilter ff_asink_abuffer = { .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."), .priv_class = &abuffersink_class, .priv_size = sizeof(BufferSinkContext), - .init_opaque = asink_init, + .init = common_init, .query_formats = asink_query_formats, .activate = activate, .inputs = avfilter_asink_abuffer_inputs, diff --git a/libavfilter/buffersink.h b/libavfilter/buffersink.h index 3c846bb527..2ec821c685 100644 --- a/libavfilter/buffersink.h +++ b/libavfilter/buffersink.h @@ -59,6 +59,7 @@ int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flag */ #define AV_BUFFERSINK_FLAG_NO_REQUEST 2 +#if FF_API_NEXT /** * Struct to use for initializing a buffersink context. */ @@ -71,6 +72,7 @@ typedef struct AVBufferSinkParams { * * Must be freed with av_free(). */ +attribute_deprecated AVBufferSinkParams *av_buffersink_params_alloc(void); /** @@ -89,7 +91,9 @@ typedef struct AVABufferSinkParams { * * Must be freed with av_free(). */ +attribute_deprecated AVABufferSinkParams *av_abuffersink_params_alloc(void); +#endif /** * Set the frame size for an audio buffer sink. diff --git a/libavfilter/buffersrc.c b/libavfilter/buffersrc.c index bae7d86695..bf30f54177 100644 --- a/libavfilter/buffersrc.c +++ b/libavfilter/buffersrc.c @@ -25,9 +25,9 @@ #include +#include "libavutil/avassert.h" #include "libavutil/channel_layout.h" #include "libavutil/common.h" -#include "libavutil/fifo.h" #include "libavutil/frame.h" #include "libavutil/imgutils.h" #include "libavutil/internal.h" @@ -43,7 +43,6 @@ typedef struct BufferSourceContext { const AVClass *class; - AVFifoBuffer *fifo; AVRational time_base; ///< time_base to set in the output link AVRational frame_rate; ///< frame_rate to set in the output link unsigned nb_failed_requests; @@ -52,7 +51,9 @@ typedef struct BufferSourceContext { int w, h; enum AVPixelFormat pix_fmt; AVRational pixel_aspect; +#if FF_API_SWS_PARAM_OPTION char *sws_param; +#endif AVBufferRef *hw_frames_ctx; @@ -63,7 +64,6 @@ typedef struct BufferSourceContext { uint64_t channel_layout; char *channel_layout_str; - int got_format_from_params; int eof; } BufferSourceContext; @@ -105,7 +105,6 @@ int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *par switch (ctx->filter->outputs[0].type) { case AVMEDIA_TYPE_VIDEO: if (param->format != AV_PIX_FMT_NONE) { - s->got_format_from_params = 1; s->pix_fmt = param->format; } if (param->width > 0) @@ -125,7 +124,6 @@ int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *par break; case AVMEDIA_TYPE_AUDIO: if (param->format != AV_SAMPLE_FMT_NONE) { - s->got_format_from_params = 1; s->sample_fmt = param->format; } if (param->sample_rate > 0) @@ -228,11 +226,6 @@ static int av_buffersrc_add_frame_internal(AVFilterContext *ctx, } - if (!av_fifo_space(s->fifo) && - (ret = av_fifo_realloc2(s->fifo, av_fifo_size(s->fifo) + - sizeof(copy))) < 0) - return ret; - if (!(copy = av_frame_alloc())) return AVERROR(ENOMEM); @@ -246,14 +239,8 @@ static int av_buffersrc_add_frame_internal(AVFilterContext *ctx, } } - if ((ret = av_fifo_generic_write(s->fifo, ©, sizeof(copy), NULL)) < 0) { - if (refcounted) - av_frame_move_ref(frame, copy); - av_frame_free(©); - return ret; - } - - if ((ret = ctx->output_pads[0].request_frame(ctx->outputs[0])) < 0) + ret = ff_filter_frame(ctx->outputs[0], copy); + if (ret < 0) return ret; if ((flags & AV_BUFFERSRC_FLAG_PUSH)) { @@ -278,19 +265,22 @@ static av_cold int init_video(AVFilterContext *ctx) { BufferSourceContext *c = ctx->priv; - if (!(c->pix_fmt != AV_PIX_FMT_NONE || c->got_format_from_params) || !c->w || !c->h || + if (c->pix_fmt == AV_PIX_FMT_NONE || !c->w || !c->h || av_q2d(c->time_base) <= 0) { av_log(ctx, AV_LOG_ERROR, "Invalid parameters provided.\n"); return AVERROR(EINVAL); } - if (!(c->fifo = av_fifo_alloc(sizeof(AVFrame*)))) - return AVERROR(ENOMEM); - - av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n", + av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d\n", c->w, c->h, av_get_pix_fmt_name(c->pix_fmt), c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den, - c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, "")); + c->pixel_aspect.num, c->pixel_aspect.den); + +#if FF_API_SWS_PARAM_OPTION + if (c->sws_param) + av_log(ctx, AV_LOG_WARNING, "sws_param option is deprecated and ignored\n"); +#endif + return 0; } @@ -312,7 +302,9 @@ static const AVOption buffer_options[] = { { "pixel_aspect", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V }, { "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V }, { "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V }, +#if FF_API_SWS_PARAM_OPTION { "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = V }, +#endif { NULL }, }; @@ -334,7 +326,7 @@ static av_cold int init_audio(AVFilterContext *ctx) BufferSourceContext *s = ctx->priv; int ret = 0; - if (!(s->sample_fmt != AV_SAMPLE_FMT_NONE || s->got_format_from_params)) { + if (s->sample_fmt == AV_SAMPLE_FMT_NONE) { av_log(ctx, AV_LOG_ERROR, "Sample format was not set or was invalid\n"); return AVERROR(EINVAL); } @@ -367,9 +359,6 @@ static av_cold int init_audio(AVFilterContext *ctx) return AVERROR(EINVAL); } - if (!(s->fifo = av_fifo_alloc(sizeof(AVFrame*)))) - return AVERROR(ENOMEM); - if (!s->time_base.num) s->time_base = (AVRational){1, s->sample_rate}; @@ -384,13 +373,7 @@ static av_cold int init_audio(AVFilterContext *ctx) static av_cold void uninit(AVFilterContext *ctx) { BufferSourceContext *s = ctx->priv; - while (s->fifo && av_fifo_size(s->fifo)) { - AVFrame *frame; - av_fifo_generic_read(s->fifo, &frame, sizeof(frame), NULL); - av_frame_free(&frame); - } av_buffer_unref(&s->hw_frames_ctx); - av_fifo_freep(&s->fifo); } static int query_formats(AVFilterContext *ctx) @@ -460,29 +443,11 @@ static int config_props(AVFilterLink *link) static int request_frame(AVFilterLink *link) { BufferSourceContext *c = link->src->priv; - AVFrame *frame; - int ret; - if (!av_fifo_size(c->fifo)) { - if (c->eof) - return AVERROR_EOF; - c->nb_failed_requests++; - return AVERROR(EAGAIN); - } - av_fifo_generic_read(c->fifo, &frame, sizeof(frame), NULL); - - ret = ff_filter_frame(link, frame); - - return ret; -} - -static int poll_frame(AVFilterLink *link) -{ - BufferSourceContext *c = link->src->priv; - int size = av_fifo_size(c->fifo); - if (!size && c->eof) + if (c->eof) return AVERROR_EOF; - return size/sizeof(AVFrame*); + c->nb_failed_requests++; + return AVERROR(EAGAIN); } static const AVFilterPad avfilter_vsrc_buffer_outputs[] = { @@ -490,7 +455,6 @@ static const AVFilterPad avfilter_vsrc_buffer_outputs[] = { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .request_frame = request_frame, - .poll_frame = poll_frame, .config_props = config_props, }, { NULL } @@ -515,7 +479,6 @@ static const AVFilterPad avfilter_asrc_abuffer_outputs[] = { .name = "default", .type = AVMEDIA_TYPE_AUDIO, .request_frame = request_frame, - .poll_frame = poll_frame, .config_props = config_props, }, { NULL } diff --git a/libavfilter/f_drawgraph.c b/libavfilter/f_drawgraph.c index 955047368d..7d8bc4adb7 100644 --- a/libavfilter/f_drawgraph.c +++ b/libavfilter/f_drawgraph.c @@ -40,6 +40,7 @@ typedef struct DrawGraphContext { int mode; int slide; int w, h; + AVRational frame_rate; AVFrame *out; int x; @@ -48,6 +49,7 @@ typedef struct DrawGraphContext { float *values[4]; int values_size[4]; int nb_values; + int64_t prev_pts; } DrawGraphContext; #define OFFSET(x) offsetof(DrawGraphContext, x) @@ -77,6 +79,8 @@ static const AVOption drawgraph_options[] = { {"picture", "display graph in single frame", OFFSET(slide), AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "slide"}, { "size", "set graph size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="900x256"}, 0, 0, FLAGS }, { "s", "set graph size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="900x256"}, 0, 0, FLAGS }, + { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS }, + { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS }, { NULL } }; @@ -159,6 +163,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) AVDictionary *metadata; AVDictionaryEntry *e; AVFrame *out = s->out; + AVFrame *clone = NULL; + int64_t in_pts, out_pts; int i; if (s->slide == 4 && s->nb_values >= s->values_size[0] / sizeof(float)) { @@ -309,12 +315,24 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) s->nb_values++; s->x++; + in_pts = in->pts; + av_frame_free(&in); if (s->slide == 4) return 0; - return ff_filter_frame(outlink, av_frame_clone(s->out)); + out_pts = av_rescale_q(in_pts, inlink->time_base, outlink->time_base); + + if (out_pts == s->prev_pts) + return 0; + + clone = av_frame_clone(s->out); + if (!clone) + return AVERROR(ENOMEM); + + clone->pts = s->prev_pts = out_pts; + return ff_filter_frame(outlink, clone); } static int request_frame(AVFilterLink *outlink) @@ -406,6 +424,9 @@ static int config_output(AVFilterLink *outlink) outlink->w = s->w; outlink->h = s->h; outlink->sample_aspect_ratio = (AVRational){1,1}; + outlink->frame_rate = s->frame_rate; + outlink->time_base = av_inv_q(outlink->frame_rate); + s->prev_pts = AV_NOPTS_VALUE; return 0; } diff --git a/libavfilter/f_ebur128.c b/libavfilter/f_ebur128.c index f25d5f096e..31b75ab097 100644 --- a/libavfilter/f_ebur128.c +++ b/libavfilter/f_ebur128.c @@ -774,6 +774,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) /* push one video frame */ if (ebur128->do_video) { + AVFrame *clone; int x, y, ret; uint8_t *p; double gauge_value; @@ -823,7 +824,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) /* set pts and push frame */ pic->pts = pts; - ret = ff_filter_frame(outlink, av_frame_clone(pic)); + clone = av_frame_clone(pic); + if (!clone) + return AVERROR(ENOMEM); + ret = ff_filter_frame(outlink, clone); if (ret < 0) return ret; } diff --git a/libavfilter/f_metadata.c b/libavfilter/f_metadata.c index 3bf4bb17f5..bf298e9d39 100644 --- a/libavfilter/f_metadata.c +++ b/libavfilter/f_metadata.c @@ -88,6 +88,8 @@ typedef struct MetadataContext { int (*compare)(struct MetadataContext *s, const char *value1, const char *value2); void (*print)(AVFilterContext *ctx, const char *msg, ...) av_printf_format(2, 3); + + int direct; // reduces buffering when printing to user-supplied URL } MetadataContext; #define OFFSET(x) offsetof(MetadataContext, x) @@ -111,6 +113,7 @@ static const AVOption filt_name##_options[] = { \ { "ends_with", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = METADATAF_ENDS_WITH }, 0, 0, FLAGS, "function" }, \ { "expr", "set expression for expr function", OFFSET(expr_str), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, FLAGS }, \ { "file", "set file where to print metadata information", OFFSET(file_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS }, \ + { "direct", "reduce buffering when printing to user-set file or pipe", OFFSET(direct), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS }, \ { NULL } \ } @@ -274,6 +277,9 @@ static av_cold int init(AVFilterContext *ctx) s->file_str, buf); return ret; } + + if (s->direct) + s->avio_context->direct = AVIO_FLAG_DIRECT; } return 0; diff --git a/libavfilter/f_streamselect.c b/libavfilter/f_streamselect.c index 7a1ff775f4..b3ae4bed62 100644 --- a/libavfilter/f_streamselect.c +++ b/libavfilter/f_streamselect.c @@ -41,9 +41,10 @@ typedef struct StreamSelectContext { #define OFFSET(x) offsetof(StreamSelectContext, x) #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM +#define TFLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_RUNTIME_PARAM static const AVOption streamselect_options[] = { { "inputs", "number of input streams", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=2}, 2, INT_MAX, .flags=FLAGS }, - { "map", "input indexes to remap to outputs", OFFSET(map_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags=FLAGS }, + { "map", "input indexes to remap to outputs", OFFSET(map_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags=TFLAGS }, { NULL } }; diff --git a/libavfilter/internal.h b/libavfilter/internal.h index 1d77808082..abe7537b5d 100644 --- a/libavfilter/internal.h +++ b/libavfilter/internal.h @@ -92,17 +92,6 @@ struct AVFilterPad { */ int (*filter_frame)(AVFilterLink *link, AVFrame *frame); - /** - * Frame poll callback. This returns the number of immediately available - * samples. It should return a positive value if the next request_frame() - * is guaranteed to return one frame (with no delay). - * - * Defaults to just calling the source poll_frame() method. - * - * Output pads only. - */ - int (*poll_frame)(AVFilterLink *link); - /** * Frame request callback. A call to this should result in some progress * towards producing output over the given link. This should return zero @@ -289,15 +278,6 @@ static inline int ff_insert_outpad(AVFilterContext *f, unsigned index, &f->output_pads, &f->outputs, p); } -/** - * Poll a frame from the filter chain. - * - * @param link the input link - * @return the number of immediately available frames, a negative - * number in case of error - */ -int ff_poll_frame(AVFilterLink *link); - /** * Request an input frame from the filter at the other end of the link. * diff --git a/libavfilter/scale_eval.c b/libavfilter/scale_eval.c index a3439a95e0..dfec081e15 100644 --- a/libavfilter/scale_eval.c +++ b/libavfilter/scale_eval.c @@ -25,9 +25,6 @@ #include "libavutil/pixdesc.h" static const char *const var_names[] = { - "PI", - "PHI", - "E", "in_w", "iw", "in_h", "ih", "out_w", "ow", @@ -43,9 +40,6 @@ static const char *const var_names[] = { }; enum var_name { - VAR_PI, - VAR_PHI, - VAR_E, VAR_IN_W, VAR_IW, VAR_IN_H, VAR_IH, VAR_OUT_W, VAR_OW, @@ -60,49 +54,6 @@ enum var_name { VARS_NB }; -/** - * This must be kept in sync with var_names so that it is always a - * complete list of var_names with the scale2ref specific names - * appended. scale2ref values must appear in the order they appear - * in the var_name_scale2ref enum but also be below all of the - * non-scale2ref specific values. - */ -static const char *const var_names_scale2ref[] = { - "PI", - "PHI", - "E", - "in_w", "iw", - "in_h", "ih", - "out_w", "ow", - "out_h", "oh", - "a", - "sar", - "dar", - "hsub", - "vsub", - "ohsub", - "ovsub", - "main_w", - "main_h", - "main_a", - "main_sar", - "main_dar", "mdar", - "main_hsub", - "main_vsub", - NULL -}; - -enum var_name_scale2ref { - VAR_S2R_MAIN_W, - VAR_S2R_MAIN_H, - VAR_S2R_MAIN_A, - VAR_S2R_MAIN_SAR, - VAR_S2R_MAIN_DAR, VAR_S2R_MDAR, - VAR_S2R_MAIN_HSUB, - VAR_S2R_MAIN_VSUB, - VARS_S2R_NB -}; - int ff_scale_eval_dimensions(void *log_ctx, const char *w_expr, const char *h_expr, AVFilterLink *inlink, AVFilterLink *outlink, @@ -113,20 +64,8 @@ int ff_scale_eval_dimensions(void *log_ctx, const char *expr; int eval_w, eval_h; int ret; - const char scale2ref = outlink->src->nb_inputs == 2 && outlink->src->inputs[1] == inlink; - double var_values[VARS_NB + VARS_S2R_NB], res; - const AVPixFmtDescriptor *main_desc; - const AVFilterLink *main_link; - const char *const *names = scale2ref ? var_names_scale2ref : var_names; + double var_values[VARS_NB], res; - if (scale2ref) { - main_link = outlink->src->inputs[0]; - main_desc = av_pix_fmt_desc_get(main_link->format); - } - - var_values[VAR_PI] = M_PI; - var_values[VAR_PHI] = M_PHI; - var_values[VAR_E] = M_E; var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w; var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h; var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN; @@ -140,32 +79,20 @@ int ff_scale_eval_dimensions(void *log_ctx, var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w; var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h; - if (scale2ref) { - var_values[VARS_NB + VAR_S2R_MAIN_W] = main_link->w; - var_values[VARS_NB + VAR_S2R_MAIN_H] = main_link->h; - var_values[VARS_NB + VAR_S2R_MAIN_A] = (double) main_link->w / main_link->h; - var_values[VARS_NB + VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ? - (double) main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1; - var_values[VARS_NB + VAR_S2R_MAIN_DAR] = var_values[VARS_NB + VAR_S2R_MDAR] = - var_values[VARS_NB + VAR_S2R_MAIN_A] * var_values[VARS_NB + VAR_S2R_MAIN_SAR]; - var_values[VARS_NB + VAR_S2R_MAIN_HSUB] = 1 << main_desc->log2_chroma_w; - var_values[VARS_NB + VAR_S2R_MAIN_VSUB] = 1 << main_desc->log2_chroma_h; - } - /* evaluate width and height */ av_expr_parse_and_eval(&res, (expr = w_expr), - names, var_values, + var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, log_ctx); eval_w = var_values[VAR_OUT_W] = var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res; if ((ret = av_expr_parse_and_eval(&res, (expr = h_expr), - names, var_values, + var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, log_ctx)) < 0) goto fail; eval_h = var_values[VAR_OUT_H] = var_values[VAR_OH] = (int) res == 0 ? inlink->h : (int) res; /* evaluate again the width, as it may depend on the output height */ if ((ret = av_expr_parse_and_eval(&res, (expr = w_expr), - names, var_values, + var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, log_ctx)) < 0) goto fail; eval_w = (int) res == 0 ? inlink->w : (int) res; diff --git a/libavfilter/tinterlace.h b/libavfilter/tinterlace.h index 5bcb9a583a..020887ff34 100644 --- a/libavfilter/tinterlace.h +++ b/libavfilter/tinterlace.h @@ -36,6 +36,13 @@ #define TINTERLACE_FLAG_VLPF 01 #define TINTERLACE_FLAG_CVLPF 2 #define TINTERLACE_FLAG_EXACT_TB 4 +#define TINTERLACE_FLAG_BYPASS_IL 8 + +enum VLPFilter { + VLPF_OFF = 0, + VLPF_LIN = 1, + VLPF_CMP = 2, +}; enum TInterlaceMode { MODE_MERGE = 0, @@ -59,6 +66,7 @@ typedef struct TInterlaceContext { int mode; ///< TInterlaceMode, interlace mode selected AVRational preout_time_base; int flags; ///< flags affecting interlacing algorithm + int lowpass; ///< legacy interlace filter lowpass mode int frame; ///< number of the output frame int vsub; ///< chroma vertical subsampling AVFrame *cur; diff --git a/libavfilter/version.h b/libavfilter/version.h index a3625006af..03b6ce650a 100644 --- a/libavfilter/version.h +++ b/libavfilter/version.h @@ -30,7 +30,7 @@ #include "libavutil/version.h" #define LIBAVFILTER_VERSION_MAJOR 7 -#define LIBAVFILTER_VERSION_MINOR 68 +#define LIBAVFILTER_VERSION_MINOR 71 #define LIBAVFILTER_VERSION_MICRO 100 @@ -59,6 +59,9 @@ #ifndef FF_API_FILTER_GET_SET #define FF_API_FILTER_GET_SET (LIBAVFILTER_VERSION_MAJOR < 8) #endif +#ifndef FF_API_SWS_PARAM_OPTION +#define FF_API_SWS_PARAM_OPTION (LIBAVFILTER_VERSION_MAJOR < 8) +#endif #ifndef FF_API_NEXT #define FF_API_NEXT (LIBAVFILTER_VERSION_MAJOR < 8) #endif diff --git a/libavfilter/vf_coreimage.m b/libavfilter/vf_coreimage.m index 323a28caa1..4ed5ba7920 100644 --- a/libavfilter/vf_coreimage.m +++ b/libavfilter/vf_coreimage.m @@ -486,6 +486,7 @@ static av_cold int init(AVFilterContext *fctx) av_log(ctx, AV_LOG_DEBUG, "Filter_string: %s\n", ctx->filter_string); ret = av_dict_parse_string(&filter_dict, ctx->filter_string, "@", "#", AV_DICT_MULTIKEY); // parse filter_name:all_filter_options if (ret) { + av_dict_free(&filter_dict); av_log(ctx, AV_LOG_ERROR, "Parsing of filters failed.\n"); return AVERROR(EIO); } @@ -507,6 +508,7 @@ static av_cold int init(AVFilterContext *fctx) if (strncmp(f->value, "default", 7)) { // not default ret = av_dict_parse_string(&filter_options, f->value, "=", "@", 0); // parse option_name:option_value if (ret) { + av_dict_free(&filter_options); av_log(ctx, AV_LOG_ERROR, "Parsing of filter options for \"%s\" failed.\n", f->key); return AVERROR(EIO); } diff --git a/libavfilter/vf_crop.c b/libavfilter/vf_crop.c index 9bf4077c5d..8dbf1b52d8 100644 --- a/libavfilter/vf_crop.c +++ b/libavfilter/vf_crop.c @@ -370,14 +370,15 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar #define OFFSET(x) offsetof(CropContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM +#define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption crop_options[] = { - { "out_w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS }, - { "w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, FLAGS }, - { "out_h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS }, - { "h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, FLAGS }, - { "x", "set the x crop area expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "(in_w-out_w)/2"}, CHAR_MIN, CHAR_MAX, FLAGS }, - { "y", "set the y crop area expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "(in_h-out_h)/2"}, CHAR_MIN, CHAR_MAX, FLAGS }, + { "out_w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, TFLAGS }, + { "w", "set the width crop area expression", OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, CHAR_MIN, CHAR_MAX, TFLAGS }, + { "out_h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, TFLAGS }, + { "h", "set the height crop area expression", OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, CHAR_MIN, CHAR_MAX, TFLAGS }, + { "x", "set the x crop area expression", OFFSET(x_expr), AV_OPT_TYPE_STRING, {.str = "(in_w-out_w)/2"}, CHAR_MIN, CHAR_MAX, TFLAGS }, + { "y", "set the y crop area expression", OFFSET(y_expr), AV_OPT_TYPE_STRING, {.str = "(in_h-out_h)/2"}, CHAR_MIN, CHAR_MAX, TFLAGS }, { "keep_aspect", "keep aspect ratio", OFFSET(keep_aspect), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS }, { "exact", "do exact cropping", OFFSET(exact), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS }, { NULL } diff --git a/libavfilter/vf_datascope.c b/libavfilter/vf_datascope.c index e91a1e66cf..e83f04c697 100644 --- a/libavfilter/vf_datascope.c +++ b/libavfilter/vf_datascope.c @@ -35,6 +35,7 @@ typedef struct DatascopeContext { int ow, oh; int x, y; int mode; + int dformat; int axis; float opacity; @@ -67,6 +68,9 @@ static const AVOption datascope_options[] = { { "color2", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "mode" }, { "axis", "draw column/row numbers", OFFSET(axis), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS }, { "opacity", "set background opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS }, + { "format", "set display number format", OFFSET(dformat), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "format" }, + { "hex", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "format" }, + { "dec", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "format" }, { NULL } }; @@ -180,9 +184,10 @@ static int filter_color2(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs const int yoff = td->yoff; const int P = FFMAX(s->nb_planes, s->nb_comps); const int C = s->chars; + const int D = ((s->chars - s->dformat) >> 2) + s->dformat * 2; const int W = (outlink->w - xoff) / (C * 10); const int H = (outlink->h - yoff) / (P * 12); - const char *format[2] = {"%02X\n", "%04X\n"}; + const char *format[4] = {"%02X\n", "%04X\n", "%03d\n", "%05d\n"}; const int slice_start = (W * jobnr) / nb_jobs; const int slice_end = (W * (jobnr+1)) / nb_jobs; int x, y, p; @@ -201,7 +206,7 @@ static int filter_color2(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs for (p = 0; p < P; p++) { char text[256]; - snprintf(text, sizeof(text), format[C>>2], value[p]); + snprintf(text, sizeof(text), format[D], value[p]); draw_text(&s->draw, out, &reverse, xoff + x * C * 10 + 2, yoff + y * P * 12 + p * 10 + 2, text, 0); } } @@ -222,9 +227,10 @@ static int filter_color(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) const int yoff = td->yoff; const int P = FFMAX(s->nb_planes, s->nb_comps); const int C = s->chars; + const int D = ((s->chars - s->dformat) >> 2) + s->dformat * 2; const int W = (outlink->w - xoff) / (C * 10); const int H = (outlink->h - yoff) / (P * 12); - const char *format[2] = {"%02X\n", "%04X\n"}; + const char *format[4] = {"%02X\n", "%04X\n", "%03d\n", "%05d\n"}; const int slice_start = (W * jobnr) / nb_jobs; const int slice_end = (W * (jobnr+1)) / nb_jobs; int x, y, p; @@ -239,7 +245,7 @@ static int filter_color(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) for (p = 0; p < P; p++) { char text[256]; - snprintf(text, sizeof(text), format[C>>2], value[p]); + snprintf(text, sizeof(text), format[D], value[p]); draw_text(&s->draw, out, &color, xoff + x * C * 10 + 2, yoff + y * P * 12 + p * 10 + 2, text, 0); } } @@ -260,9 +266,10 @@ static int filter_mono(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) const int yoff = td->yoff; const int P = FFMAX(s->nb_planes, s->nb_comps); const int C = s->chars; + const int D = ((s->chars - s->dformat) >> 2) + s->dformat * 2; const int W = (outlink->w - xoff) / (C * 10); const int H = (outlink->h - yoff) / (P * 12); - const char *format[2] = {"%02X\n", "%04X\n"}; + const char *format[4] = {"%02X\n", "%04X\n", "%03d\n", "%05d\n"}; const int slice_start = (W * jobnr) / nb_jobs; const int slice_end = (W * (jobnr+1)) / nb_jobs; int x, y, p; @@ -276,7 +283,7 @@ static int filter_mono(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) for (p = 0; p < P; p++) { char text[256]; - snprintf(text, sizeof(text), format[C>>2], value[p]); + snprintf(text, sizeof(text), format[D], value[p]); draw_text(&s->draw, out, &s->white, xoff + x * C * 10 + 2, yoff + y * P * 12 + p * 10 + 2, text, 0); } } @@ -360,7 +367,7 @@ static int config_input(AVFilterLink *inlink) ff_draw_color(&s->draw, &s->black, (uint8_t[]){ 0, 0, 0, alpha} ); ff_draw_color(&s->draw, &s->yellow, (uint8_t[]){ 255, 255, 0, 255} ); ff_draw_color(&s->draw, &s->gray, (uint8_t[]){ 77, 77, 77, 255} ); - s->chars = (s->draw.desc->comp[0].depth + 7) / 8 * 2; + s->chars = (s->draw.desc->comp[0].depth + 7) / 8 * 2 + s->dformat; s->nb_comps = s->draw.desc->nb_components; switch (s->mode) { diff --git a/libavfilter/vf_dnn_processing.c b/libavfilter/vf_dnn_processing.c index f59cfb0da2..492df93c20 100644 --- a/libavfilter/vf_dnn_processing.c +++ b/libavfilter/vf_dnn_processing.c @@ -27,6 +27,7 @@ #include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "libavutil/avassert.h" +#include "libavutil/imgutils.h" #include "avfilter.h" #include "dnn_interface.h" #include "formats.h" @@ -37,7 +38,6 @@ typedef struct DnnProcessingContext { char *model_filename; DNNBackendType backend_type; - enum AVPixelFormat fmt; char *model_inputname; char *model_outputname; @@ -60,7 +60,6 @@ static const AVOption dnn_processing_options[] = { { "model", "path to model file", OFFSET(model_filename), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS }, { "input", "input name of the model", OFFSET(model_inputname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS }, { "output", "output name of the model", OFFSET(model_outputname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS }, - { "fmt", "AVPixelFormat of the frame", OFFSET(fmt), AV_OPT_TYPE_PIXEL_FMT, { .i64=AV_PIX_FMT_RGB24 }, AV_PIX_FMT_NONE, AV_PIX_FMT_NB - 1, FLAGS }, { NULL } }; @@ -69,23 +68,6 @@ AVFILTER_DEFINE_CLASS(dnn_processing); static av_cold int init(AVFilterContext *context) { DnnProcessingContext *ctx = context->priv; - int supported = 0; - // as the first step, only rgb24 and bgr24 are supported - const enum AVPixelFormat supported_pixel_fmts[] = { - AV_PIX_FMT_RGB24, - AV_PIX_FMT_BGR24, - }; - for (int i = 0; i < sizeof(supported_pixel_fmts) / sizeof(enum AVPixelFormat); ++i) { - if (supported_pixel_fmts[i] == ctx->fmt) { - supported = 1; - break; - } - } - if (!supported) { - av_log(context, AV_LOG_ERROR, "pixel fmt %s not supported yet\n", - av_get_pix_fmt_name(ctx->fmt)); - return AVERROR(AVERROR_INVALIDDATA); - } if (!ctx->model_filename) { av_log(ctx, AV_LOG_ERROR, "model file for network is not specified\n"); @@ -121,14 +103,77 @@ static av_cold int init(AVFilterContext *context) static int query_formats(AVFilterContext *context) { - AVFilterFormats *formats; - DnnProcessingContext *ctx = context->priv; - enum AVPixelFormat pixel_fmts[2]; - pixel_fmts[0] = ctx->fmt; - pixel_fmts[1] = AV_PIX_FMT_NONE; + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, + AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAYF32, + AV_PIX_FMT_NONE + }; + AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts); + return ff_set_common_formats(context, fmts_list); +} - formats = ff_make_format_list(pixel_fmts); - return ff_set_common_formats(context, formats); +#define LOG_FORMAT_CHANNEL_MISMATCH() \ + av_log(ctx, AV_LOG_ERROR, \ + "the frame's format %s does not match " \ + "the model input channel %d\n", \ + av_get_pix_fmt_name(fmt), \ + model_input->channels); + +static int check_modelinput_inlink(const DNNData *model_input, const AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + enum AVPixelFormat fmt = inlink->format; + + // the design is to add explicit scale filter before this filter + if (model_input->height != -1 && model_input->height != inlink->h) { + av_log(ctx, AV_LOG_ERROR, "the model requires frame height %d but got %d\n", + model_input->height, inlink->h); + return AVERROR(EIO); + } + if (model_input->width != -1 && model_input->width != inlink->w) { + av_log(ctx, AV_LOG_ERROR, "the model requires frame width %d but got %d\n", + model_input->width, inlink->w); + return AVERROR(EIO); + } + + switch (fmt) { + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_BGR24: + if (model_input->channels != 3) { + LOG_FORMAT_CHANNEL_MISMATCH(); + return AVERROR(EIO); + } + if (model_input->dt != DNN_FLOAT && model_input->dt != DNN_UINT8) { + av_log(ctx, AV_LOG_ERROR, "only support dnn models with input data type as float32 and uint8.\n"); + return AVERROR(EIO); + } + return 0; + case AV_PIX_FMT_GRAY8: + if (model_input->channels != 1) { + LOG_FORMAT_CHANNEL_MISMATCH(); + return AVERROR(EIO); + } + if (model_input->dt != DNN_UINT8) { + av_log(ctx, AV_LOG_ERROR, "only support dnn models with input data type uint8.\n"); + return AVERROR(EIO); + } + return 0; + case AV_PIX_FMT_GRAYF32: + if (model_input->channels != 1) { + LOG_FORMAT_CHANNEL_MISMATCH(); + return AVERROR(EIO); + } + if (model_input->dt != DNN_FLOAT) { + av_log(ctx, AV_LOG_ERROR, "only support dnn models with input data type float32.\n"); + return AVERROR(EIO); + } + return 0; + default: + av_log(ctx, AV_LOG_ERROR, "%s not supported.\n", av_get_pix_fmt_name(fmt)); + return AVERROR(EIO); + } + + return 0; } static int config_input(AVFilterLink *inlink) @@ -136,40 +181,24 @@ static int config_input(AVFilterLink *inlink) AVFilterContext *context = inlink->dst; DnnProcessingContext *ctx = context->priv; DNNReturnType result; - DNNData dnn_data; + DNNData model_input; + int check; - result = ctx->model->get_input(ctx->model->model, &dnn_data, ctx->model_inputname); + result = ctx->model->get_input(ctx->model->model, &model_input, ctx->model_inputname); if (result != DNN_SUCCESS) { av_log(ctx, AV_LOG_ERROR, "could not get input from the model\n"); return AVERROR(EIO); } - // the design is to add explicit scale filter before this filter - if (dnn_data.height != -1 && dnn_data.height != inlink->h) { - av_log(ctx, AV_LOG_ERROR, "the model requires frame height %d but got %d\n", - dnn_data.height, inlink->h); - return AVERROR(EIO); - } - if (dnn_data.width != -1 && dnn_data.width != inlink->w) { - av_log(ctx, AV_LOG_ERROR, "the model requires frame width %d but got %d\n", - dnn_data.width, inlink->w); - return AVERROR(EIO); - } - - if (dnn_data.channels != 3) { - av_log(ctx, AV_LOG_ERROR, "the model requires input channels %d\n", - dnn_data.channels); - return AVERROR(EIO); - } - if (dnn_data.dt != DNN_FLOAT && dnn_data.dt != DNN_UINT8) { - av_log(ctx, AV_LOG_ERROR, "only support dnn models with input data type as float32 and uint8.\n"); - return AVERROR(EIO); + check = check_modelinput_inlink(&model_input, inlink); + if (check != 0) { + return check; } ctx->input.width = inlink->w; ctx->input.height = inlink->h; - ctx->input.channels = dnn_data.channels; - ctx->input.dt = dnn_data.dt; + ctx->input.channels = model_input.channels; + ctx->input.dt = model_input.dt; result = (ctx->model->set_input_output)(ctx->model->model, &ctx->input, ctx->model_inputname, @@ -201,59 +230,81 @@ static int config_output(AVFilterLink *outlink) return 0; } -static int copy_from_frame_to_dnn(DNNData *dnn_data, const AVFrame *in) +static int copy_from_frame_to_dnn(DNNData *dnn_input, const AVFrame *frame) { - // extend this function to support more formats - av_assert0(in->format == AV_PIX_FMT_RGB24 || in->format == AV_PIX_FMT_BGR24); + int bytewidth = av_image_get_linesize(frame->format, frame->width, 0); - if (dnn_data->dt == DNN_FLOAT) { - float *dnn_input = dnn_data->data; - for (int i = 0; i < in->height; i++) { - for(int j = 0; j < in->width * 3; j++) { - int k = i * in->linesize[0] + j; - int t = i * in->width * 3 + j; - dnn_input[t] = in->data[0][k] / 255.0f; - } - } - } else { - uint8_t *dnn_input = dnn_data->data; - av_assert0(dnn_data->dt == DNN_UINT8); - for (int i = 0; i < in->height; i++) { - for(int j = 0; j < in->width * 3; j++) { - int k = i * in->linesize[0] + j; - int t = i * in->width * 3 + j; - dnn_input[t] = in->data[0][k]; + switch (frame->format) { + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_BGR24: + if (dnn_input->dt == DNN_FLOAT) { + float *dnn_input_data = dnn_input->data; + for (int i = 0; i < frame->height; i++) { + for(int j = 0; j < frame->width * 3; j++) { + int k = i * frame->linesize[0] + j; + int t = i * frame->width * 3 + j; + dnn_input_data[t] = frame->data[0][k] / 255.0f; + } } + } else { + av_assert0(dnn_input->dt == DNN_UINT8); + av_image_copy_plane(dnn_input->data, bytewidth, + frame->data[0], frame->linesize[0], + bytewidth, frame->height); } + return 0; + case AV_PIX_FMT_GRAY8: + case AV_PIX_FMT_GRAYF32: + av_image_copy_plane(dnn_input->data, bytewidth, + frame->data[0], frame->linesize[0], + bytewidth, frame->height); + return 0; + default: + return AVERROR(EIO); } return 0; } -static int copy_from_dnn_to_frame(AVFrame *out, const DNNData *dnn_data) +static int copy_from_dnn_to_frame(AVFrame *frame, const DNNData *dnn_output) { - // extend this function to support more formats - av_assert0(out->format == AV_PIX_FMT_RGB24 || out->format == AV_PIX_FMT_BGR24); + int bytewidth = av_image_get_linesize(frame->format, frame->width, 0); - if (dnn_data->dt == DNN_FLOAT) { - float *dnn_output = dnn_data->data; - for (int i = 0; i < out->height; i++) { - for(int j = 0; j < out->width * 3; j++) { - int k = i * out->linesize[0] + j; - int t = i * out->width * 3 + j; - out->data[0][k] = av_clip_uintp2((int)(dnn_output[t] * 255.0f), 8); - } - } - } else { - uint8_t *dnn_output = dnn_data->data; - av_assert0(dnn_data->dt == DNN_UINT8); - for (int i = 0; i < out->height; i++) { - for(int j = 0; j < out->width * 3; j++) { - int k = i * out->linesize[0] + j; - int t = i * out->width * 3 + j; - out->data[0][k] = dnn_output[t]; + switch (frame->format) { + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_BGR24: + if (dnn_output->dt == DNN_FLOAT) { + float *dnn_output_data = dnn_output->data; + for (int i = 0; i < frame->height; i++) { + for(int j = 0; j < frame->width * 3; j++) { + int k = i * frame->linesize[0] + j; + int t = i * frame->width * 3 + j; + frame->data[0][k] = av_clip_uintp2((int)(dnn_output_data[t] * 255.0f), 8); + } } + } else { + av_assert0(dnn_output->dt == DNN_UINT8); + av_image_copy_plane(frame->data[0], frame->linesize[0], + dnn_output->data, bytewidth, + bytewidth, frame->height); } + return 0; + case AV_PIX_FMT_GRAY8: + // it is possible that data type of dnn output is float32, + // need to add support for such case when needed. + av_assert0(dnn_output->dt == DNN_UINT8); + av_image_copy_plane(frame->data[0], frame->linesize[0], + dnn_output->data, bytewidth, + bytewidth, frame->height); + return 0; + case AV_PIX_FMT_GRAYF32: + av_assert0(dnn_output->dt == DNN_FLOAT); + av_image_copy_plane(frame->data[0], frame->linesize[0], + dnn_output->data, bytewidth, + bytewidth, frame->height); + return 0; + default: + return AVERROR(EIO); } return 0; @@ -275,7 +326,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) av_frame_free(&in); return AVERROR(EIO); } - av_assert0(ctx->output.channels == 3); out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c index 8f4badbdb5..aea17b6793 100644 --- a/libavfilter/vf_drawtext.c +++ b/libavfilter/vf_drawtext.c @@ -829,6 +829,7 @@ static int config_input(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; DrawTextContext *s = ctx->priv; + char *expr; int ret; ff_draw_init(&s->dc, inlink->format, FF_DRAW_PROCESS_ALPHA); @@ -854,14 +855,15 @@ static int config_input(AVFilterLink *inlink) av_expr_free(s->a_pexpr); s->x_pexpr = s->y_pexpr = s->a_pexpr = NULL; - if ((ret = av_expr_parse(&s->x_pexpr, s->x_expr, var_names, + if ((ret = av_expr_parse(&s->x_pexpr, expr = s->x_expr, var_names, NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 || - (ret = av_expr_parse(&s->y_pexpr, s->y_expr, var_names, + (ret = av_expr_parse(&s->y_pexpr, expr = s->y_expr, var_names, NULL, NULL, fun2_names, fun2, 0, ctx)) < 0 || - (ret = av_expr_parse(&s->a_pexpr, s->a_expr, var_names, - NULL, NULL, fun2_names, fun2, 0, ctx)) < 0) - + (ret = av_expr_parse(&s->a_pexpr, expr = s->a_expr, var_names, + NULL, NULL, fun2_names, fun2, 0, ctx)) < 0) { + av_log(ctx, AV_LOG_ERROR, "Failed to parse expression: %s \n", expr); return AVERROR(EINVAL); + } return 0; } diff --git a/libavfilter/vf_eq.c b/libavfilter/vf_eq.c index 914a07f176..de93901508 100644 --- a/libavfilter/vf_eq.c +++ b/libavfilter/vf_eq.c @@ -353,24 +353,24 @@ static const AVFilterPad eq_outputs[] = { #define OFFSET(x) offsetof(EQContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM - +#define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption eq_options[] = { { "contrast", "set the contrast adjustment, negative values give a negative image", - OFFSET(contrast_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS }, + OFFSET(contrast_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, TFLAGS }, { "brightness", "set the brightness adjustment", - OFFSET(brightness_expr), AV_OPT_TYPE_STRING, {.str = "0.0"}, CHAR_MIN, CHAR_MAX, FLAGS }, + OFFSET(brightness_expr), AV_OPT_TYPE_STRING, {.str = "0.0"}, CHAR_MIN, CHAR_MAX, TFLAGS }, { "saturation", "set the saturation adjustment", - OFFSET(saturation_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS }, + OFFSET(saturation_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, TFLAGS }, { "gamma", "set the initial gamma value", - OFFSET(gamma_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS }, + OFFSET(gamma_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, TFLAGS }, { "gamma_r", "gamma value for red", - OFFSET(gamma_r_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS }, + OFFSET(gamma_r_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, TFLAGS }, { "gamma_g", "gamma value for green", - OFFSET(gamma_g_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS }, + OFFSET(gamma_g_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, TFLAGS }, { "gamma_b", "gamma value for blue", - OFFSET(gamma_b_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS }, + OFFSET(gamma_b_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, TFLAGS }, { "gamma_weight", "set the gamma weight which reduces the effect of gamma on bright areas", - OFFSET(gamma_weight_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, FLAGS }, + OFFSET(gamma_weight_expr), AV_OPT_TYPE_STRING, {.str = "1.0"}, CHAR_MIN, CHAR_MAX, TFLAGS }, { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" }, { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" }, { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" }, @@ -390,5 +390,5 @@ AVFilter ff_vf_eq = { .query_formats = query_formats, .init = initialize, .uninit = uninit, - .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, + .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, }; diff --git a/libavfilter/vf_fade.c b/libavfilter/vf_fade.c index bec765a09e..6fa02e6bfa 100644 --- a/libavfilter/vf_fade.c +++ b/libavfilter/vf_fade.c @@ -56,9 +56,11 @@ typedef struct FadeContext { int start_frame, nb_frames; int hsub, vsub, bpp; unsigned int black_level, black_level_scaled; + uint8_t is_rgb; uint8_t is_packed_rgb; uint8_t rgba_map[4]; int alpha; + int is_planar; uint64_t start_time, duration; enum {VF_FADE_WAITING=0, VF_FADE_FADING, VF_FADE_DONE} fade_state; uint8_t color_rgba[4]; ///< fade color @@ -107,23 +109,27 @@ static int query_formats(AVFilterContext *ctx) AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, + AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE }; static const enum AVPixelFormat pix_fmts_rgb[] = { AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, + AV_PIX_FMT_GBRP, AV_PIX_FMT_NONE }; static const enum AVPixelFormat pix_fmts_alpha[] = { AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, + AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE }; static const enum AVPixelFormat pix_fmts_rgba[] = { AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_RGBA, AV_PIX_FMT_BGRA, + AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE }; AVFilterFormats *fmts_list; @@ -159,11 +165,15 @@ static int config_props(AVFilterLink *inlink) s->hsub = pixdesc->log2_chroma_w; s->vsub = pixdesc->log2_chroma_h; + ff_fill_rgba_map(s->rgba_map, inlink->format); + s->bpp = pixdesc->flags & AV_PIX_FMT_FLAG_PLANAR ? 1 : av_get_bits_per_pixel(pixdesc) >> 3; s->alpha &= !!(pixdesc->flags & AV_PIX_FMT_FLAG_ALPHA); - s->is_packed_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0; + s->is_planar = pixdesc->flags & AV_PIX_FMT_FLAG_PLANAR; + s->is_rgb = pixdesc->flags & AV_PIX_FMT_FLAG_RGB; + s->is_packed_rgb = !s->is_planar && s->is_rgb; /* use CCIR601/709 black level for studio-level pixel non-alpha components */ s->black_level = @@ -199,6 +209,29 @@ static av_always_inline void filter_rgb(FadeContext *s, const AVFrame *frame, } } +static av_always_inline void filter_rgb_planar(FadeContext *s, const AVFrame *frame, + int slice_start, int slice_end, + int do_alpha) +{ + int i, j; + const uint8_t *c = s->color_rgba; + + for (i = slice_start; i < slice_end; i++) { + uint8_t *pg = frame->data[0] + i * frame->linesize[0]; + uint8_t *pb = frame->data[1] + i * frame->linesize[1]; + uint8_t *pr = frame->data[2] + i * frame->linesize[2]; + uint8_t *pa = frame->data[3] + i * frame->linesize[3]; + for (j = 0; j < frame->width; j++) { +#define INTERPP(c_name, c_idx) av_clip_uint8(((c[c_idx]<<16) + ((int)c_name - (int)c[c_idx]) * s->factor + (1<<15)) >> 16) + pr[j] = INTERPP(pr[j], 1); + pg[j] = INTERPP(pg[j], 0); + pb[j] = INTERPP(pb[j], 2); + if (do_alpha) + pa[j] = INTERPP(pa[j], 3); + } + } +} + static int filter_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) { @@ -207,7 +240,11 @@ static int filter_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr, int slice_start = (frame->height * jobnr ) / nb_jobs; int slice_end = (frame->height * (jobnr+1)) / nb_jobs; - if (s->alpha) filter_rgb(s, frame, slice_start, slice_end, 1, 4); + if (s->is_planar && s->alpha) + filter_rgb_planar(s, frame, slice_start, slice_end, 1); + else if (s->is_planar) + filter_rgb_planar(s, frame, slice_start, slice_end, 0); + else if (s->alpha) filter_rgb(s, frame, slice_start, slice_end, 1, 4); else if (s->bpp == 3) filter_rgb(s, frame, slice_start, slice_end, 0, 3); else if (s->bpp == 4) filter_rgb(s, frame, slice_start, slice_end, 0, 4); else av_assert0(0); @@ -224,14 +261,16 @@ static int filter_slice_luma(AVFilterContext *ctx, void *arg, int jobnr, int slice_end = (frame->height * (jobnr+1)) / nb_jobs; int i, j; - for (i = slice_start; i < slice_end; i++) { - uint8_t *p = frame->data[0] + i * frame->linesize[0]; - for (j = 0; j < frame->width * s->bpp; j++) { - /* s->factor is using 16 lower-order bits for decimal - * places. 32768 = 1 << 15, it is an integer representation - * of 0.5 and is for rounding. */ - *p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16; - p++; + for (int k = 0; k < 1 + 2 * (s->is_planar && s->is_rgb); k++) { + for (i = slice_start; i < slice_end; i++) { + uint8_t *p = frame->data[k] + i * frame->linesize[k]; + for (j = 0; j < frame->width * s->bpp; j++) { + /* s->factor is using 16 lower-order bits for decimal + * places. 32768 = 1 << 15, it is an integer representation + * of 0.5 and is for rounding. */ + *p = ((*p - s->black_level) * s->factor + s->black_level_scaled) >> 16; + p++; + } } } @@ -348,7 +387,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) if (s->alpha) { ctx->internal->execute(ctx, filter_slice_alpha, frame, NULL, FFMIN(frame->height, ff_filter_get_nb_threads(ctx))); - } else if (s->is_packed_rgb && !s->black_fade) { + } else if (s->is_rgb && !s->black_fade) { ctx->internal->execute(ctx, filter_slice_rgb, frame, NULL, FFMIN(frame->height, ff_filter_get_nb_threads(ctx))); } else { @@ -356,7 +395,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) ctx->internal->execute(ctx, filter_slice_luma, frame, NULL, FFMIN(frame->height, ff_filter_get_nb_threads(ctx))); - if (frame->data[1] && frame->data[2]) { + if (frame->data[1] && frame->data[2] && !s->is_rgb) { /* chroma planes */ ctx->internal->execute(ctx, filter_slice_chroma, frame, NULL, FFMIN(frame->height, ff_filter_get_nb_threads(ctx))); diff --git a/libavfilter/vf_freezeframes.c b/libavfilter/vf_freezeframes.c new file mode 100644 index 0000000000..b6cd5dba68 --- /dev/null +++ b/libavfilter/vf_freezeframes.c @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2019 Paul B Mahol + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/common.h" +#include "libavutil/internal.h" +#include "libavutil/opt.h" + +#include "avfilter.h" +#include "filters.h" +#include "internal.h" +#include "video.h" + +typedef struct FreezeFramesContext { + const AVClass *class; + int64_t first, last, replace; + + AVFrame *replace_frame; +} FreezeFramesContext; + +#define OFFSET(x) offsetof(FreezeFramesContext, x) +#define FLAGS (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM) + +static const AVOption freezeframes_options[] = { + { "first", "set first frame to freeze", OFFSET(first), AV_OPT_TYPE_INT64, {.i64=0}, 0, INT64_MAX, FLAGS }, + { "last", "set last frame to freeze", OFFSET(last), AV_OPT_TYPE_INT64, {.i64=0}, 0, INT64_MAX, FLAGS }, + { "replace", "set frame to replace", OFFSET(replace), AV_OPT_TYPE_INT64, {.i64=0}, 0, INT64_MAX, FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(freezeframes); + +static int config_output(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AVFilterLink *sourcelink = ctx->inputs[0]; + AVFilterLink *replacelink = ctx->inputs[1]; + + if (sourcelink->w != replacelink->w || sourcelink->h != replacelink->h) { + av_log(ctx, AV_LOG_ERROR, + "Input frame sizes do not match (%dx%d vs %dx%d).\n", + sourcelink->w, sourcelink->h, + replacelink->w, replacelink->h); + return AVERROR(EINVAL); + } + + outlink->w = sourcelink->w; + outlink->h = sourcelink->h; + outlink->time_base = sourcelink->time_base; + outlink->sample_aspect_ratio = sourcelink->sample_aspect_ratio; + outlink->frame_rate = sourcelink->frame_rate; + + return 0; +} + +static int activate(AVFilterContext *ctx) +{ + AVFilterLink *outlink = ctx->outputs[0]; + FreezeFramesContext *s = ctx->priv; + AVFrame *frame = NULL; + int drop = ctx->inputs[0]->frame_count_out >= s->first && + ctx->inputs[0]->frame_count_out <= s->last; + int replace = ctx->inputs[1]->frame_count_out == s->replace; + int ret; + + FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx); + + if (drop && s->replace_frame) { + ret = ff_inlink_consume_frame(ctx->inputs[0], &frame); + if (ret < 0) + return ret; + + if (frame) { + int64_t dropped_pts = frame->pts; + + av_frame_free(&frame); + frame = av_frame_clone(s->replace_frame); + if (!frame) + return AVERROR(ENOMEM); + frame->pts = dropped_pts; + return ff_filter_frame(outlink, frame); + } + } else if (!drop) { + ret = ff_inlink_consume_frame(ctx->inputs[0], &frame); + if (ret < 0) + return ret; + + if (frame) + return ff_filter_frame(outlink, frame); + } + + ret = ff_inlink_consume_frame(ctx->inputs[1], &frame); + if (ret < 0) + return ret; + if (replace && frame) { + s->replace_frame = frame; + } else if (frame) { + av_frame_free(&frame); + } + + FF_FILTER_FORWARD_STATUS(ctx->inputs[0], outlink); + FF_FILTER_FORWARD_STATUS(ctx->inputs[1], outlink); + + if (!drop || (drop && s->replace_frame)) + FF_FILTER_FORWARD_WANTED(outlink, ctx->inputs[0]); + if (!s->replace_frame) + FF_FILTER_FORWARD_WANTED(outlink, ctx->inputs[1]); + + return FFERROR_NOT_READY; +} + +static av_cold void uninit(AVFilterContext *ctx) +{ + FreezeFramesContext *s = ctx->priv; + + av_frame_free(&s->replace_frame); +} + +static const AVFilterPad freezeframes_inputs[] = { + { + .name = "source", + .type = AVMEDIA_TYPE_VIDEO, + }, + { + .name = "replace", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL }, +}; + +static const AVFilterPad freezeframes_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + }, + { NULL }, +}; + +AVFilter ff_vf_freezeframes = { + .name = "freezeframes", + .description = NULL_IF_CONFIG_SMALL("Freeze video frames."), + .priv_size = sizeof(FreezeFramesContext), + .priv_class = &freezeframes_class, + .inputs = freezeframes_inputs, + .outputs = freezeframes_outputs, + .activate = activate, + .uninit = uninit, +}; diff --git a/libavfilter/vf_geq.c b/libavfilter/vf_geq.c index e3267e331f..2905efae24 100644 --- a/libavfilter/vf_geq.c +++ b/libavfilter/vf_geq.c @@ -4,19 +4,19 @@ * * This file is part of FFmpeg. * - * FFmpeg is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with FFmpeg; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** @@ -33,6 +33,8 @@ #include "libavutil/pixdesc.h" #include "internal.h" +#define NB_PLANES 4 + enum InterpolationMethods { INTERP_NEAREST, INTERP_BILINEAR, @@ -44,7 +46,7 @@ enum { VAR_X, VAR_Y, VAR_W, VAR_H, VAR_N, VAR_ typedef struct GEQContext { const AVClass *class; - AVExpr *e[4]; ///< expressions for each plane + AVExpr *e[NB_PLANES]; ///< expressions for each plane char *expr_str[4+3]; ///< expression strings for each plane AVFrame *picref; ///< current input buffer uint8_t *dst; ///< reference pointer to the 8bits output @@ -55,6 +57,9 @@ typedef struct GEQContext { int interpolation; int is_rgb; int bps; + + double *pixel_sums[NB_PLANES]; + int needs_sum[NB_PLANES]; } GEQContext; enum { Y = 0, U, V, A, G, B, R }; @@ -133,6 +138,76 @@ static inline double getpix(void *priv, double x, double y, int plane) } } +static int calculate_sums(GEQContext *geq, int plane, int w, int h) +{ + int xi, yi; + AVFrame *picref = geq->picref; + const uint8_t *src = picref->data[plane]; + int linesize = picref->linesize[plane]; + + if (!geq->pixel_sums[plane]) + geq->pixel_sums[plane] = av_malloc_array(w, h * sizeof (*geq->pixel_sums[plane])); + if (!geq->pixel_sums[plane]) + return AVERROR(ENOMEM); + if (geq->bps > 8) + linesize /= 2; + for (yi = 0; yi < h; yi ++) { + if (geq->bps > 8) { + const uint16_t *src16 = (const uint16_t*)src; + double linesum = 0; + + for (xi = 0; xi < w; xi ++) { + linesum += src16[xi + yi * linesize]; + geq->pixel_sums[plane][xi + yi * w] = linesum; + } + } else { + double linesum = 0; + + for (xi = 0; xi < w; xi ++) { + linesum += src[xi + yi * linesize]; + geq->pixel_sums[plane][xi + yi * w] = linesum; + } + } + if (yi) + for (xi = 0; xi < w; xi ++) { + geq->pixel_sums[plane][xi + yi * w] += geq->pixel_sums[plane][xi + yi * w - w]; + } + } + return 0; +} + +static inline double getpix_integrate_internal(GEQContext *geq, int x, int y, int plane, int w, int h) +{ + if (x > w - 1) { + double boundary = getpix_integrate_internal(geq, w - 1, y, plane, w, h); + return 2*boundary - getpix_integrate_internal(geq, 2*(w - 1) - x, y, plane, w, h); + } else if (y > h - 1) { + double boundary = getpix_integrate_internal(geq, x, h - 1, plane, w, h); + return 2*boundary - getpix_integrate_internal(geq, x, 2*(h - 1) - y, plane, w, h); + } else if (x < 0) { + if (x == -1) return 0; + return - getpix_integrate_internal(geq, -x-2, y, plane, w, h); + } else if (y < 0) { + if (y == -1) return 0; + return - getpix_integrate_internal(geq, x, -y-2, plane, w, h); + } + + return geq->pixel_sums[plane][x + y * w]; +} + +static inline double getpix_integrate(void *priv, double x, double y, int plane) { + GEQContext *geq = priv; + AVFrame *picref = geq->picref; + const uint8_t *src = picref->data[plane]; + const int w = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->width, geq->hsub) : picref->width; + const int h = (plane == 1 || plane == 2) ? AV_CEIL_RSHIFT(picref->height, geq->vsub) : picref->height; + + if (!src) + return 0; + + return getpix_integrate_internal(geq, lrint(av_clipd(x, -w, 2*w)), lrint(av_clipd(y, -h, 2*h)), plane, w, h); +} + //TODO: cubic interpolate //TODO: keep the last few frames static double lum(void *priv, double x, double y) { return getpix(priv, x, y, 0); } @@ -140,6 +215,11 @@ static double cb(void *priv, double x, double y) { return getpix(priv, x, y, 1) static double cr(void *priv, double x, double y) { return getpix(priv, x, y, 2); } static double alpha(void *priv, double x, double y) { return getpix(priv, x, y, 3); } +static double lumsum(void *priv, double x, double y) { return getpix_integrate(priv, x, y, 0); } +static double cbsum(void *priv, double x, double y) { return getpix_integrate(priv, x, y, 1); } +static double crsub(void *priv, double x, double y) { return getpix_integrate(priv, x, y, 2); } +static double alphasum(void *priv, double x, double y) { return getpix_integrate(priv, x, y, 3); } + static av_cold int geq_init(AVFilterContext *ctx) { GEQContext *geq = ctx->priv; @@ -188,17 +268,33 @@ static av_cold int geq_init(AVFilterContext *ctx) goto end; } - for (plane = 0; plane < 4; plane++) { - static double (*p[])(void *, double, double) = { lum, cb, cr, alpha }; - static const char *const func2_yuv_names[] = { "lum", "cb", "cr", "alpha", "p", NULL }; - static const char *const func2_rgb_names[] = { "g", "b", "r", "alpha", "p", NULL }; + for (plane = 0; plane < NB_PLANES; plane++) { + static double (*p[])(void *, double, double) = { + lum , cb , cr , alpha , + lumsum, cbsum, crsub, alphasum, + }; + static const char *const func2_yuv_names[] = { + "lum" , "cb" , "cr" , "alpha" , "p", + "lumsum", "cbsum", "crsum", "alphasum", "psum", + NULL }; + static const char *const func2_rgb_names[] = { + "g" , "b" , "r" , "alpha" , "p", + "gsum", "bsum", "rsum", "alphasum", "psum", + NULL }; const char *const *func2_names = geq->is_rgb ? func2_rgb_names : func2_yuv_names; - double (*func2[])(void *, double, double) = { lum, cb, cr, alpha, p[plane], NULL }; + double (*func2[])(void *, double, double) = { + lum , cb , cr , alpha , p[plane], + lumsum, cbsum, crsub, alphasum, p[plane + 4], + NULL }; + int counter[10] = {0}; ret = av_expr_parse(&geq->e[plane], geq->expr_str[plane < 3 && geq->is_rgb ? plane+4 : plane], var_names, NULL, NULL, func2_names, func2, 0, ctx); if (ret < 0) break; + + av_expr_count_func(geq->e[plane], counter, FF_ARRAY_ELEMS(counter), 2); + geq->needs_sum[plane] = counter[5] + counter[6] + counter[7] + counter[8] + counter[9]; } end: @@ -355,6 +451,9 @@ static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in) td.plane = plane; td.linesize = linesize; + if (geq->needs_sum[plane]) + calculate_sums(geq, plane, width, height); + ctx->internal->execute(ctx, slice_geq_filter, &td, NULL, FFMIN(height, nb_threads)); } @@ -369,6 +468,8 @@ static av_cold void geq_uninit(AVFilterContext *ctx) for (i = 0; i < FF_ARRAY_ELEMS(geq->e); i++) av_expr_free(geq->e[i]); + for (i = 0; i < NB_PLANES; i++) + av_freep(&geq->pixel_sums); } static const AVFilterPad geq_inputs[] = { diff --git a/libavfilter/vf_histogram.c b/libavfilter/vf_histogram.c index 5185992de6..db1962edc1 100644 --- a/libavfilter/vf_histogram.c +++ b/libavfilter/vf_histogram.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2013 Paul B Mahol + * Copyright (c) 2012-2019 Paul B Mahol * * This file is part of FFmpeg. * @@ -19,6 +19,7 @@ */ #include "libavutil/avassert.h" +#include "libavutil/colorspace.h" #include "libavutil/opt.h" #include "libavutil/parseutils.h" #include "libavutil/pixdesc.h" @@ -31,13 +32,19 @@ typedef struct HistogramContext { const AVClass *class; ///< AVClass context for log and options purpose + int thistogram; + int envelope; unsigned histogram[256*256]; int histogram_size; + int width; + int x_pos; int mult; int ncomp; int dncomp; uint8_t bg_color[4]; uint8_t fg_color[4]; + uint8_t envelope_rgba[4]; + uint8_t envelope_color[4]; int level_height; int scale_height; int display_mode; @@ -48,25 +55,30 @@ typedef struct HistogramContext { float bgopacity; int planewidth[4]; int planeheight[4]; + int start[4]; + AVFrame *out; } HistogramContext; #define OFFSET(x) offsetof(HistogramContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM +#define COMMON_OPTIONS \ + { "display_mode", "set display mode", OFFSET(display_mode), AV_OPT_TYPE_INT, {.i64=2}, 0, 2, FLAGS, "display_mode"}, \ + { "d", "set display mode", OFFSET(display_mode), AV_OPT_TYPE_INT, {.i64=2}, 0, 2, FLAGS, "display_mode"}, \ + { "overlay", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "display_mode" }, \ + { "parade", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "display_mode" }, \ + { "stack", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "display_mode" }, \ + { "levels_mode", "set levels mode", OFFSET(levels_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "levels_mode"}, \ + { "m", "set levels mode", OFFSET(levels_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "levels_mode"}, \ + { "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "levels_mode" }, \ + { "logarithmic", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "levels_mode" }, \ + { "components", "set color components to display", OFFSET(components), AV_OPT_TYPE_INT, {.i64=7}, 1, 15, FLAGS}, \ + { "c", "set color components to display", OFFSET(components), AV_OPT_TYPE_INT, {.i64=7}, 1, 15, FLAGS}, + static const AVOption histogram_options[] = { { "level_height", "set level height", OFFSET(level_height), AV_OPT_TYPE_INT, {.i64=200}, 50, 2048, FLAGS}, { "scale_height", "set scale height", OFFSET(scale_height), AV_OPT_TYPE_INT, {.i64=12}, 0, 40, FLAGS}, - { "display_mode", "set display mode", OFFSET(display_mode), AV_OPT_TYPE_INT, {.i64=2}, 0, 2, FLAGS, "display_mode"}, - { "d", "set display mode", OFFSET(display_mode), AV_OPT_TYPE_INT, {.i64=2}, 0, 2, FLAGS, "display_mode"}, - { "overlay", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "display_mode" }, - { "parade", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "display_mode" }, - { "stack", NULL, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "display_mode" }, - { "levels_mode", "set levels mode", OFFSET(levels_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "levels_mode"}, - { "m", "set levels mode", OFFSET(levels_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "levels_mode"}, - { "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "levels_mode" }, - { "logarithmic", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "levels_mode" }, - { "components", "set color components to display", OFFSET(components), AV_OPT_TYPE_INT, {.i64=7}, 1, 15, FLAGS}, - { "c", "set color components to display", OFFSET(components), AV_OPT_TYPE_INT, {.i64=7}, 1, 15, FLAGS}, + COMMON_OPTIONS { "fgopacity", "set foreground opacity", OFFSET(fgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.7}, 0, 1, FLAGS}, { "f", "set foreground opacity", OFFSET(fgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.7}, 0, 1, FLAGS}, { "bgopacity", "set background opacity", OFFSET(bgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.5}, 0, 1, FLAGS}, @@ -87,6 +99,7 @@ static const enum AVPixelFormat levels_in_pix_fmts[] = { AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12, + AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRAP12, @@ -110,7 +123,7 @@ static const enum AVPixelFormat levels_out_yuv10_pix_fmts[] = { }; static const enum AVPixelFormat levels_out_yuv12_pix_fmts[] = { - AV_PIX_FMT_YUV444P12, + AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_NONE }; @@ -192,12 +205,12 @@ static const uint8_t white_gbrp_color[4] = { 255, 255, 255, 255 }; static int config_input(AVFilterLink *inlink) { - HistogramContext *h = inlink->dst->priv; + HistogramContext *s = inlink->dst->priv; - h->desc = av_pix_fmt_desc_get(inlink->format); - h->ncomp = h->desc->nb_components; - h->histogram_size = 1 << h->desc->comp[0].depth; - h->mult = h->histogram_size / 256; + s->desc = av_pix_fmt_desc_get(inlink->format); + s->ncomp = s->desc->nb_components; + s->histogram_size = 1 << s->desc->comp[0].depth; + s->mult = s->histogram_size / 256; switch (inlink->format) { case AV_PIX_FMT_GBRAP12: @@ -207,21 +220,29 @@ static int config_input(AVFilterLink *inlink) case AV_PIX_FMT_GBRP9: case AV_PIX_FMT_GBRAP: case AV_PIX_FMT_GBRP: - memcpy(h->bg_color, black_gbrp_color, 4); - memcpy(h->fg_color, white_gbrp_color, 4); + memcpy(s->bg_color, black_gbrp_color, 4); + memcpy(s->fg_color, white_gbrp_color, 4); + s->start[0] = s->start[1] = s->start[2] = s->start[3] = 0; + memcpy(s->envelope_color, s->envelope_rgba, 4); break; default: - memcpy(h->bg_color, black_yuva_color, 4); - memcpy(h->fg_color, white_yuva_color, 4); + memcpy(s->bg_color, black_yuva_color, 4); + memcpy(s->fg_color, white_yuva_color, 4); + s->start[0] = s->start[3] = 0; + s->start[1] = s->start[2] = s->histogram_size / 2; + s->envelope_color[0] = RGB_TO_Y_BT709(s->envelope_rgba[0], s->envelope_rgba[1], s->envelope_rgba[2]); + s->envelope_color[1] = RGB_TO_U_BT709(s->envelope_rgba[0], s->envelope_rgba[1], s->envelope_rgba[2], 0); + s->envelope_color[2] = RGB_TO_V_BT709(s->envelope_rgba[0], s->envelope_rgba[1], s->envelope_rgba[2], 0); + s->envelope_color[3] = s->envelope_rgba[3]; } - h->fg_color[3] = h->fgopacity * 255; - h->bg_color[3] = h->bgopacity * 255; + s->fg_color[3] = s->fgopacity * 255; + s->bg_color[3] = s->bgopacity * 255; - h->planeheight[1] = h->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, h->desc->log2_chroma_h); - h->planeheight[0] = h->planeheight[3] = inlink->h; - h->planewidth[1] = h->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, h->desc->log2_chroma_w); - h->planewidth[0] = h->planewidth[3] = inlink->w; + s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h); + s->planeheight[0] = s->planeheight[3] = inlink->h; + s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, s->desc->log2_chroma_w); + s->planewidth[0] = s->planewidth[3] = inlink->w; return 0; } @@ -229,18 +250,29 @@ static int config_input(AVFilterLink *inlink) static int config_output(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; - HistogramContext *h = ctx->priv; + HistogramContext *s = ctx->priv; int ncomp = 0, i; - for (i = 0; i < h->ncomp; i++) { - if ((1 << i) & h->components) + if (!strcmp(ctx->filter->name, "thistogram")) + s->thistogram = 1; + + for (i = 0; i < s->ncomp; i++) { + if ((1 << i) & s->components) ncomp++; } - outlink->w = h->histogram_size * FFMAX(ncomp * (h->display_mode == 1), 1); - outlink->h = (h->level_height + h->scale_height) * FFMAX(ncomp * (h->display_mode == 2), 1); - h->odesc = av_pix_fmt_desc_get(outlink->format); - h->dncomp = h->odesc->nb_components; + if (s->thistogram) { + if (!s->width) + s->width = ctx->inputs[0]->w; + outlink->w = s->width * FFMAX(ncomp * (s->display_mode == 1), 1); + outlink->h = s->histogram_size * FFMAX(ncomp * (s->display_mode == 2), 1); + } else { + outlink->w = s->histogram_size * FFMAX(ncomp * (s->display_mode == 1), 1); + outlink->h = (s->level_height + s->scale_height) * FFMAX(ncomp * (s->display_mode == 2), 1); + } + + s->odesc = av_pix_fmt_desc_get(outlink->format); + s->dncomp = s->odesc->nb_components; outlink->sample_aspect_ratio = (AVRational){1,1}; return 0; @@ -248,111 +280,179 @@ static int config_output(AVFilterLink *outlink) static int filter_frame(AVFilterLink *inlink, AVFrame *in) { - HistogramContext *h = inlink->dst->priv; + HistogramContext *s = inlink->dst->priv; AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; - AVFrame *out; + AVFrame *out = s->out; int i, j, k, l, m; - out = ff_get_video_buffer(outlink, outlink->w, outlink->h); - if (!out) { - av_frame_free(&in); - return AVERROR(ENOMEM); - } + if (!s->thistogram || !out) { + out = ff_get_video_buffer(outlink, outlink->w, outlink->h); + if (!out) { + av_frame_free(&in); + return AVERROR(ENOMEM); + } + s->out = out; - out->pts = in->pts; + for (k = 0; k < 4 && out->data[k]; k++) { + const int is_chroma = (k == 1 || k == 2); + const int dst_h = AV_CEIL_RSHIFT(outlink->h, (is_chroma ? s->odesc->log2_chroma_h : 0)); + const int dst_w = AV_CEIL_RSHIFT(outlink->w, (is_chroma ? s->odesc->log2_chroma_w : 0)); - for (k = 0; k < 4 && out->data[k]; k++) { - const int is_chroma = (k == 1 || k == 2); - const int dst_h = AV_CEIL_RSHIFT(outlink->h, (is_chroma ? h->odesc->log2_chroma_h : 0)); - const int dst_w = AV_CEIL_RSHIFT(outlink->w, (is_chroma ? h->odesc->log2_chroma_w : 0)); + if (s->histogram_size <= 256) { + for (i = 0; i < dst_h ; i++) + memset(out->data[s->odesc->comp[k].plane] + + i * out->linesize[s->odesc->comp[k].plane], + s->bg_color[k], dst_w); + } else { + const int mult = s->mult; - if (h->histogram_size <= 256) { - for (i = 0; i < dst_h ; i++) - memset(out->data[h->odesc->comp[k].plane] + - i * out->linesize[h->odesc->comp[k].plane], - h->bg_color[k], dst_w); - } else { - const int mult = h->mult; - - for (i = 0; i < dst_h ; i++) - for (j = 0; j < dst_w; j++) - AV_WN16(out->data[h->odesc->comp[k].plane] + - i * out->linesize[h->odesc->comp[k].plane] + j * 2, - h->bg_color[k] * mult); + for (i = 0; i < dst_h ; i++) + for (j = 0; j < dst_w; j++) + AV_WN16(out->data[s->odesc->comp[k].plane] + + i * out->linesize[s->odesc->comp[k].plane] + j * 2, + s->bg_color[k] * mult); + } } } - for (m = 0, k = 0; k < h->ncomp; k++) { - const int p = h->desc->comp[k].plane; - const int height = h->planeheight[p]; - const int width = h->planewidth[p]; + for (m = 0, k = 0; k < s->ncomp; k++) { + const int p = s->desc->comp[k].plane; + const int max_value = s->histogram_size - 1 - s->start[p]; + const int height = s->planeheight[p]; + const int width = s->planewidth[p]; double max_hval_log; unsigned max_hval = 0; - int start, startx; + int starty, startx; - if (!((1 << k) & h->components)) + if (!((1 << k) & s->components)) continue; - startx = m * h->histogram_size * (h->display_mode == 1); - start = m++ * (h->level_height + h->scale_height) * (h->display_mode == 2); + if (s->thistogram) { + starty = m * s->histogram_size * (s->display_mode == 2); + startx = m++ * s->width * (s->display_mode == 1); + } else { + startx = m * s->histogram_size * (s->display_mode == 1); + starty = m++ * (s->level_height + s->scale_height) * (s->display_mode == 2); + } - if (h->histogram_size <= 256) { + if (s->histogram_size <= 256) { for (i = 0; i < height; i++) { const uint8_t *src = in->data[p] + i * in->linesize[p]; for (j = 0; j < width; j++) - h->histogram[src[j]]++; + s->histogram[src[j]]++; } } else { for (i = 0; i < height; i++) { const uint16_t *src = (const uint16_t *)(in->data[p] + i * in->linesize[p]); for (j = 0; j < width; j++) - h->histogram[src[j]]++; + s->histogram[src[j]]++; } } - for (i = 0; i < h->histogram_size; i++) - max_hval = FFMAX(max_hval, h->histogram[i]); + for (i = 0; i < s->histogram_size; i++) + max_hval = FFMAX(max_hval, s->histogram[i]); max_hval_log = log2(max_hval + 1); - for (i = 0; i < h->histogram_size; i++) { - int col_height; + if (s->thistogram) { + int minh = s->histogram_size - 1, maxh = 0; - if (h->levels_mode) - col_height = lrint(h->level_height * (1. - (log2(h->histogram[i] + 1) / max_hval_log))); - else - col_height = h->level_height - (h->histogram[i] * (int64_t)h->level_height + max_hval - 1) / max_hval; + for (int i = 0; i < s->histogram_size; i++) { + int idx = s->histogram_size - i - 1; + int value = s->start[p]; - if (h->histogram_size <= 256) { - for (j = h->level_height - 1; j >= col_height; j--) { - if (h->display_mode) { - for (l = 0; l < h->dncomp; l++) - out->data[l][(j + start) * out->linesize[l] + startx + i] = h->fg_color[l]; - } else { - out->data[p][(j + start) * out->linesize[p] + startx + i] = 255; + if (s->envelope && s->histogram[idx]) { + minh = FFMIN(minh, i); + maxh = FFMAX(maxh, i); + } + + if (s->levels_mode) + value += lrint(max_value * (log2(s->histogram[idx] + 1) / max_hval_log)); + else + value += lrint(max_value * s->histogram[idx] / (float)max_hval); + + if (s->histogram_size <= 256) { + s->out->data[p][(i + starty) * s->out->linesize[p] + startx + s->x_pos] = value; + } else { + AV_WN16(s->out->data[p] + (i + starty) * s->out->linesize[p] + startx * 2 + s->x_pos * 2, value); + } + } + + if (s->envelope) { + if (s->histogram_size <= 256) { + s->out->data[0][(minh + starty) * s->out->linesize[p] + startx + s->x_pos] = s->envelope_color[0]; + s->out->data[0][(maxh + starty) * s->out->linesize[p] + startx + s->x_pos] = s->envelope_color[0]; + if (s->dncomp >= 3) { + s->out->data[1][(minh + starty) * s->out->linesize[p] + startx + s->x_pos] = s->envelope_color[1]; + s->out->data[2][(minh + starty) * s->out->linesize[p] + startx + s->x_pos] = s->envelope_color[2]; + s->out->data[1][(maxh + starty) * s->out->linesize[p] + startx + s->x_pos] = s->envelope_color[1]; + s->out->data[2][(maxh + starty) * s->out->linesize[p] + startx + s->x_pos] = s->envelope_color[2]; + } + } else { + const int mult = s->mult; + + AV_WN16(s->out->data[0] + (minh + starty) * s->out->linesize[p] + startx * 2 + s->x_pos * 2, s->envelope_color[0] * mult); + AV_WN16(s->out->data[0] + (maxh + starty) * s->out->linesize[p] + startx * 2 + s->x_pos * 2, s->envelope_color[0] * mult); + if (s->dncomp >= 3) { + AV_WN16(s->out->data[1] + (minh + starty) * s->out->linesize[p] + startx * 2 + s->x_pos * 2, s->envelope_color[1] * mult); + AV_WN16(s->out->data[2] + (minh + starty) * s->out->linesize[p] + startx * 2 + s->x_pos * 2, s->envelope_color[2] * mult); + AV_WN16(s->out->data[1] + (maxh + starty) * s->out->linesize[p] + startx * 2 + s->x_pos * 2, s->envelope_color[1] * mult); + AV_WN16(s->out->data[2] + (maxh + starty) * s->out->linesize[p] + startx * 2 + s->x_pos * 2, s->envelope_color[2] * mult); } } - for (j = h->level_height + h->scale_height - 1; j >= h->level_height; j--) - out->data[p][(j + start) * out->linesize[p] + startx + i] = i; - } else { - const int mult = h->mult; + } + } else { + for (i = 0; i < s->histogram_size; i++) { + int col_height; - for (j = h->level_height - 1; j >= col_height; j--) { - if (h->display_mode) { - for (l = 0; l < h->dncomp; l++) - AV_WN16(out->data[l] + (j + start) * out->linesize[l] + startx * 2 + i * 2, h->fg_color[l] * mult); - } else { - AV_WN16(out->data[p] + (j + start) * out->linesize[p] + startx * 2 + i * 2, 255 * mult); + if (s->levels_mode) + col_height = lrint(s->level_height * (1. - (log2(s->histogram[i] + 1) / max_hval_log))); + else + col_height = s->level_height - (s->histogram[i] * (int64_t)s->level_height + max_hval - 1) / max_hval; + + if (s->histogram_size <= 256) { + for (j = s->level_height - 1; j >= col_height; j--) { + if (s->display_mode) { + for (l = 0; l < s->dncomp; l++) + out->data[l][(j + starty) * out->linesize[l] + startx + i] = s->fg_color[l]; + } else { + out->data[p][(j + starty) * out->linesize[p] + startx + i] = 255; + } } + for (j = s->level_height + s->scale_height - 1; j >= s->level_height; j--) + out->data[p][(j + starty) * out->linesize[p] + startx + i] = i; + } else { + const int mult = s->mult; + + for (j = s->level_height - 1; j >= col_height; j--) { + if (s->display_mode) { + for (l = 0; l < s->dncomp; l++) + AV_WN16(out->data[l] + (j + starty) * out->linesize[l] + startx * 2 + i * 2, s->fg_color[l] * mult); + } else { + AV_WN16(out->data[p] + (j + starty) * out->linesize[p] + startx * 2 + i * 2, 255 * mult); + } + } + for (j = s->level_height + s->scale_height - 1; j >= s->level_height; j--) + AV_WN16(out->data[p] + (j + starty) * out->linesize[p] + startx * 2 + i * 2, i); } - for (j = h->level_height + h->scale_height - 1; j >= h->level_height; j--) - AV_WN16(out->data[p] + (j + start) * out->linesize[p] + startx * 2 + i * 2, i); } } - memset(h->histogram, 0, h->histogram_size * sizeof(unsigned)); + memset(s->histogram, 0, s->histogram_size * sizeof(unsigned)); } + out->pts = in->pts; av_frame_free(&in); + s->x_pos++; + if (s->x_pos >= s->width) + s->x_pos = 0; + + if (s->thistogram) { + AVFrame *clone = av_frame_clone(out); + + if (!clone) + return AVERROR(ENOMEM); + return ff_filter_frame(outlink, clone); + } return ff_filter_frame(outlink, out); } @@ -375,6 +475,8 @@ static const AVFilterPad outputs[] = { { NULL } }; +#if CONFIG_HISTOGRAM_FILTER + AVFilter ff_vf_histogram = { .name = "histogram", .description = NULL_IF_CONFIG_SMALL("Compute and draw a histogram."), @@ -384,3 +486,34 @@ AVFilter ff_vf_histogram = { .outputs = outputs, .priv_class = &histogram_class, }; + +#endif /* CONFIG_HISTOGRAM_FILTER */ + +#if CONFIG_THISTOGRAM_FILTER + +static const AVOption thistogram_options[] = { + { "width", "set width", OFFSET(width), AV_OPT_TYPE_INT, {.i64=0}, 0, 8192, FLAGS}, + { "w", "set width", OFFSET(width), AV_OPT_TYPE_INT, {.i64=0}, 0, 8192, FLAGS}, + COMMON_OPTIONS + { "bgopacity", "set background opacity", OFFSET(bgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.9}, 0, 1, FLAGS}, + { "b", "set background opacity", OFFSET(bgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.9}, 0, 1, FLAGS}, + { "envelope", "display envelope", OFFSET(envelope), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS }, + { "e", "display envelope", OFFSET(envelope), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS }, + { "ecolor", "set envelope color", OFFSET(envelope_rgba), AV_OPT_TYPE_COLOR, {.str="gold"}, 0, 0, FLAGS }, + { "ec", "set envelope color", OFFSET(envelope_rgba), AV_OPT_TYPE_COLOR, {.str="gold"}, 0, 0, FLAGS }, + { NULL } +}; + +AVFILTER_DEFINE_CLASS(thistogram); + +AVFilter ff_vf_thistogram = { + .name = "thistogram", + .description = NULL_IF_CONFIG_SMALL("Compute and draw a temporal histogram."), + .priv_size = sizeof(HistogramContext), + .query_formats = query_formats, + .inputs = inputs, + .outputs = outputs, + .priv_class = &thistogram_class, +}; + +#endif /* CONFIG_THISTOGRAM_FILTER */ diff --git a/libavfilter/vf_hue.c b/libavfilter/vf_hue.c index 323333b33c..026d4b6eec 100644 --- a/libavfilter/vf_hue.c +++ b/libavfilter/vf_hue.c @@ -86,7 +86,7 @@ typedef struct HueContext { } HueContext; #define OFFSET(x) offsetof(HueContext, x) -#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption hue_options[] = { { "h", "set the hue angle degrees expression", OFFSET(hue_deg_expr), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS }, diff --git a/libavfilter/vf_il.c b/libavfilter/vf_il.c index ae0cc1938a..6cd5f89f76 100644 --- a/libavfilter/vf_il.c +++ b/libavfilter/vf_il.c @@ -46,7 +46,7 @@ typedef struct IlContext { } IlContext; #define OFFSET(x) offsetof(IlContext, x) -#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption il_options[] = { {"luma_mode", "select luma mode", OFFSET(luma_mode), AV_OPT_TYPE_INT, {.i64=MODE_NONE}, MODE_NONE, MODE_DEINTERLEAVE, FLAGS, "luma_mode"}, @@ -210,4 +210,5 @@ AVFilter ff_vf_il = { .outputs = outputs, .priv_class = &il_class, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, + .process_command = ff_filter_process_command, }; diff --git a/libavfilter/vf_mix.c b/libavfilter/vf_mix.c index b5a282ad4c..9e1ae79e00 100644 --- a/libavfilter/vf_mix.c +++ b/libavfilter/vf_mix.c @@ -305,7 +305,7 @@ static int activate(AVFilterContext *ctx) #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM static const AVOption mix_options[] = { - { "inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=2}, 2, INT_MAX, .flags = FLAGS }, + { "inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=2}, 2, INT16_MAX, .flags = FLAGS }, { "weights", "set weight for each input", OFFSET(weights_str), AV_OPT_TYPE_STRING, {.str="1 1"}, 0, 0, .flags = FLAGS }, { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, INT16_MAX, .flags = FLAGS }, { "duration", "how to determine end of stream", OFFSET(duration), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, .flags = FLAGS, "duration" }, diff --git a/libavfilter/vf_neighbor.c b/libavfilter/vf_neighbor.c index f20997212c..17a9b88265 100644 --- a/libavfilter/vf_neighbor.c +++ b/libavfilter/vf_neighbor.c @@ -353,7 +353,7 @@ static const AVFilterPad neighbor_outputs[] = { }; #define OFFSET(x) offsetof(NContext, x) -#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM #define DEFINE_NEIGHBOR_FILTER(name_, description_) \ AVFILTER_DEFINE_CLASS(name_); \ @@ -368,6 +368,7 @@ AVFilter ff_vf_##name_ = { \ .outputs = neighbor_outputs, \ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC| \ AVFILTER_FLAG_SLICE_THREADS, \ + .process_command = ff_filter_process_command, \ } #if CONFIG_EROSION_FILTER diff --git a/libavfilter/vf_readeia608.c b/libavfilter/vf_readeia608.c index 27a0c58321..2973847d40 100644 --- a/libavfilter/vf_readeia608.c +++ b/libavfilter/vf_readeia608.c @@ -36,41 +36,52 @@ #include "internal.h" #include "video.h" -#define FALL 0 -#define RISE 1 +#define LAG 25 +#define CLOCK_BITSIZE_MIN 0.2f +#define CLOCK_BITSIZE_MAX 1.5f +#define SYNC_BITSIZE_MIN 12.f +#define SYNC_BITSIZE_MAX 15.f + +typedef struct LineItem { + int input; + int output; + + float unfiltered; + float filtered; + float average; + float deviation; +} LineItem; + +typedef struct CodeItem { + uint8_t bit; + int size; +} CodeItem; typedef struct ReadEIA608Context { const AVClass *class; int start, end; - int min_range; - int max_peak_diff; - int max_period_diff; - int max_start_diff; int nb_found; int white; int black; - float mpd, mhd, msd, mac, spw, bhd, wth, bth; + float spw; int chp; int lp; - uint8_t *temp; + + uint64_t histogram[256]; + + CodeItem *code; + LineItem *line; } ReadEIA608Context; #define OFFSET(x) offsetof(ReadEIA608Context, x) #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM static const AVOption readeia608_options[] = { - { "scan_min", "set from which line to scan for codes", OFFSET(start), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS }, - { "scan_max", "set to which line to scan for codes", OFFSET(end), AV_OPT_TYPE_INT, {.i64=29}, 0, INT_MAX, FLAGS }, - { "mac", "set minimal acceptable amplitude change for sync codes detection", OFFSET(mac), AV_OPT_TYPE_FLOAT, {.dbl=.2}, 0.001, 1, FLAGS }, - { "spw", "set ratio of width reserved for sync code detection", OFFSET(spw), AV_OPT_TYPE_FLOAT, {.dbl=.27}, 0.1, 0.7, FLAGS }, - { "mhd", "set max peaks height difference for sync code detection", OFFSET(mhd), AV_OPT_TYPE_FLOAT, {.dbl=.1}, 0, 0.5, FLAGS }, - { "mpd", "set max peaks period difference for sync code detection", OFFSET(mpd), AV_OPT_TYPE_FLOAT, {.dbl=.1}, 0, 0.5, FLAGS }, - { "msd", "set first two max start code bits differences", OFFSET(msd), AV_OPT_TYPE_FLOAT, {.dbl=.02}, 0, 0.5, FLAGS }, - { "bhd", "set min ratio of bits height compared to 3rd start code bit", OFFSET(bhd), AV_OPT_TYPE_FLOAT, {.dbl=.75}, 0.01, 1, FLAGS }, - { "th_w", "set white color threshold", OFFSET(wth), AV_OPT_TYPE_FLOAT, {.dbl=.35}, 0.1, 1, FLAGS }, - { "th_b", "set black color threshold", OFFSET(bth), AV_OPT_TYPE_FLOAT, {.dbl=.15}, 0, 0.5, FLAGS }, - { "chp", "check and apply parity bit", OFFSET(chp), AV_OPT_TYPE_BOOL, {.i64= 0}, 0, 1, FLAGS }, - { "lp", "lowpass line prior to processing", OFFSET(lp), AV_OPT_TYPE_BOOL, {.i64= 0}, 0, 1, FLAGS }, + { "scan_min", "set from which line to scan for codes", OFFSET(start), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS }, + { "scan_max", "set to which line to scan for codes", OFFSET(end), AV_OPT_TYPE_INT, {.i64=29}, 0, INT_MAX, FLAGS }, + { "spw", "set ratio of width reserved for sync code detection", OFFSET(spw), AV_OPT_TYPE_FLOAT, {.dbl=.27}, 0.1, 0.7, FLAGS }, + { "chp", "check and apply parity bit", OFFSET(chp), AV_OPT_TYPE_BOOL, {.i64= 0}, 0, 1, FLAGS }, + { "lp", "lowpass line prior to processing", OFFSET(lp), AV_OPT_TYPE_BOOL, {.i64= 1}, 0, 1, FLAGS }, { NULL } }; @@ -96,10 +107,9 @@ static int query_formats(AVFilterContext *ctx) static int config_input(AVFilterLink *inlink) { - const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); AVFilterContext *ctx = inlink->dst; ReadEIA608Context *s = ctx->priv; - int depth = desc->comp[0].depth; + int size = inlink->w + LAG; if (s->end >= inlink->h) { av_log(ctx, AV_LOG_WARNING, "Last line to scan too large, clipping.\n"); @@ -111,124 +121,243 @@ static int config_input(AVFilterLink *inlink) return AVERROR(EINVAL); } - s->min_range = s->mac * ((1 << depth) - 1); - s->max_peak_diff = s->mhd * ((1 << depth) - 1); - s->max_period_diff = s->mpd * ((1 << depth) - 1); - s->max_start_diff = s->msd * ((1 << depth) - 1); - s->white = s->wth * ((1 << depth) - 1); - s->black = s->bth * ((1 << depth) - 1); - s->temp = av_calloc(inlink->w, sizeof(*s->temp)); - if (!s->temp) + s->line = av_calloc(size, sizeof(*s->line)); + s->code = av_calloc(size, sizeof(*s->code)); + if (!s->line || !s->code) return AVERROR(ENOMEM); return 0; } -static void extract_line(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *in, int line) +static void build_histogram(ReadEIA608Context *s, const LineItem *line, int len) +{ + memset(s->histogram, 0, sizeof(s->histogram)); + + for (int i = LAG; i < len + LAG; i++) + s->histogram[line[i].input]++; +} + +static void find_black_and_white(ReadEIA608Context *s) +{ + int start = 0, end = 0, middle; + int black = 0, white = 0; + int cnt; + + for (int i = 0; i < 256; i++) { + if (s->histogram[i]) { + start = i; + break; + } + } + + for (int i = 255; i >= 0; i--) { + if (s->histogram[i]) { + end = i; + break; + } + } + + middle = start + (end - start) / 2; + + cnt = 0; + for (int i = start; i <= middle; i++) { + if (s->histogram[i] > cnt) { + cnt = s->histogram[i]; + black = i; + } + } + + cnt = 0; + for (int i = end; i >= middle; i--) { + if (s->histogram[i] > cnt) { + cnt = s->histogram[i]; + white = i; + } + } + + s->black = black; + s->white = white; +} + +static float meanf(const LineItem *line, int len) +{ + float sum = 0.0, mean = 0.0; + + for (int i = 0; i < len; i++) + sum += line[i].filtered; + + mean = sum / len; + + return mean; +} + +static float stddevf(const LineItem *line, int len) +{ + float m = meanf(line, len); + float standard_deviation = 0.f; + + for (int i = 0; i < len; i++) + standard_deviation += (line[i].filtered - m) * (line[i].filtered - m); + + return sqrtf(standard_deviation / (len - 1)); +} + +static void thresholding(ReadEIA608Context *s, LineItem *line, + int lag, float threshold, float influence, int len) +{ + for (int i = lag; i < len + lag; i++) { + line[i].unfiltered = line[i].input / 255.f; + line[i].filtered = line[i].unfiltered; + } + + for (int i = 0; i < lag; i++) { + line[i].unfiltered = meanf(line, len * s->spw); + line[i].filtered = line[i].unfiltered; + } + + line[lag - 1].average = meanf(line, lag); + line[lag - 1].deviation = stddevf(line, lag); + + for (int i = lag; i < len + lag; i++) { + if (fabsf(line[i].unfiltered - line[i-1].average) > threshold * line[i-1].deviation) { + if (line[i].unfiltered > line[i-1].average) { + line[i].output = 255; + } else { + line[i].output = 0; + } + + line[i].filtered = influence * line[i].unfiltered + (1.f - influence) * line[i-1].filtered; + } else { + int distance_from_black, distance_from_white; + + distance_from_black = FFABS(line[i].input - s->black); + distance_from_white = FFABS(line[i].input - s->white); + + line[i].output = distance_from_black <= distance_from_white ? 0 : 255; + } + + line[i].average = meanf(line + i - lag, lag); + line[i].deviation = stddevf(line + i - lag, lag); + } +} + +static int periods(const LineItem *line, CodeItem *code, int len) +{ + int hold = line[LAG].output, cnt = 0; + int last = LAG; + + memset(code, 0, len * sizeof(*code)); + + for (int i = LAG + 1; i < len + LAG; i++) { + if (line[i].output != hold) { + code[cnt].size = i - last; + code[cnt].bit = hold; + hold = line[i].output; + last = i; + cnt++; + } + } + + code[cnt].size = LAG + len - last; + code[cnt].bit = hold; + + return cnt + 1; +} + +static void dump_code(AVFilterContext *ctx, int len, int item) { ReadEIA608Context *s = ctx->priv; - int max = 0, min = INT_MAX; - int i, ch, range = 0; + + av_log(ctx, AV_LOG_DEBUG, "%d:", item); + for (int i = 0; i < len; i++) { + av_log(ctx, AV_LOG_DEBUG, " %03d", s->code[i].size); + } + av_log(ctx, AV_LOG_DEBUG, "\n"); +} + +static void extract_line(AVFilterContext *ctx, AVFrame *in, int w, int nb_line) +{ + ReadEIA608Context *s = ctx->priv; + LineItem *line = s->line; + int i, j, ch, len; const uint8_t *src; - uint16_t clock[8][2] = { { 0 } }; - const int sync_width = s->spw * in->width; - int last = 0, peaks = 0, max_peak_diff = 0, dir = RISE; - const int width_per_bit = (in->width - sync_width) / 19; uint8_t byte[2] = { 0 }; - int s1, s2, s3, parity; + uint8_t codes[19] = { 0 }; + float bit_size = 0.f; + int parity; - src = &in->data[0][line * in->linesize[0]]; + memset(line, 0, (w + LAG) * sizeof(*line)); + src = &in->data[0][nb_line * in->linesize[0]]; if (s->lp) { - uint8_t *dst = s->temp; - int w = inlink->w - 1; - - for (i = 0; i < inlink->w; i++) { + for (i = 0; i < w; i++) { int a = FFMAX(i - 3, 0); int b = FFMAX(i - 2, 0); int c = FFMAX(i - 1, 0); - int d = FFMIN(i + 3, w); - int e = FFMIN(i + 2, w); - int f = FFMIN(i + 1, w); + int d = FFMIN(i + 3, w-1); + int e = FFMIN(i + 2, w-1); + int f = FFMIN(i + 1, w-1); - dst[i] = (src[a] + src[b] + src[c] + src[i] + src[d] + src[e] + src[f] + 6) / 7; + line[LAG + i].input = (src[a] + src[b] + src[c] + src[i] + src[d] + src[e] + src[f] + 6) / 7; + } + } else { + for (i = 0; i < w; i++) { + line[LAG + i].input = src[i]; + } + } + + build_histogram(s, line, w); + find_black_and_white(s); + if (s->white - s->black < 5) + return; + + thresholding(s, line, LAG, 1, 0, w); + len = periods(line, s->code, w); + dump_code(ctx, len, nb_line); + if (len < 15 || + s->code[14].bit != 0 || + w / (float)s->code[14].size < SYNC_BITSIZE_MIN || + w / (float)s->code[14].size > SYNC_BITSIZE_MAX) { + return; + } + + for (i = 14; i < len; i++) { + bit_size += s->code[i].size; + } + + bit_size /= 19.f; + for (i = 1; i < 14; i++) { + if (s->code[i].size / bit_size > CLOCK_BITSIZE_MAX || + s->code[i].size / bit_size < CLOCK_BITSIZE_MIN) { + return; + } + } + + if (s->code[15].size / bit_size < 0.45f) { + return; + } + + for (j = 0, i = 14; i < len; i++) { + int run, bit; + + run = lrintf(s->code[i].size / bit_size); + bit = s->code[i].bit; + + for (int k = 0; j < 19 && k < run; k++) { + codes[j++] = bit; } - src = s->temp; - } - - for (i = 0; i < sync_width; i++) { - max = FFMAX(max, src[i]); - min = FFMIN(min, src[i]); - } - - range = max - min; - if (range < s->min_range) - return; - - for (i = 0; i < sync_width; i++) { - int Y = src[i]; - - if (dir == RISE) { - if (Y < last) { - dir = FALL; - if (last >= s->white) { - clock[peaks][0] = last; - clock[peaks][1] = i; - peaks++; - if (peaks > 7) - break; - } - } - } else if (dir == FALL) { - if (Y > last && last <= s->black) { - dir = RISE; - } - } - last = Y; - } - - if (peaks != 7) { - av_log(ctx, AV_LOG_DEBUG, "peaks: %d != 7\n", peaks); - return; - } - - for (i = 1; i < 7; i++) - max_peak_diff = FFMAX(max_peak_diff, FFABS(clock[i][0] - clock[i-1][0])); - - if (max_peak_diff > s->max_peak_diff) { - av_log(ctx, AV_LOG_DEBUG, "mhd: %d > %d\n", max_peak_diff, s->max_peak_diff); - return; - } - - max = 0; min = INT_MAX; - for (i = 1; i < 7; i++) { - max = FFMAX(max, FFABS(clock[i][1] - clock[i-1][1])); - min = FFMIN(min, FFABS(clock[i][1] - clock[i-1][1])); - } - - range = max - min; - if (range > s->max_period_diff) { - av_log(ctx, AV_LOG_DEBUG, "mpd: %d > %d\n", range, s->max_period_diff); - return; - } - - s1 = src[sync_width + width_per_bit * 0 + width_per_bit / 2]; - s2 = src[sync_width + width_per_bit * 1 + width_per_bit / 2]; - s3 = src[sync_width + width_per_bit * 2 + width_per_bit / 2]; - - if (FFABS(s1 - s2) > s->max_start_diff || s1 > s->black || s2 > s->black || s3 < s->white) { - av_log(ctx, AV_LOG_DEBUG, "msd: %d > %d\n", FFABS(s1 - s2), s->max_start_diff); - return; + if (j >= 19) + break; } for (ch = 0; ch < 2; ch++) { for (parity = 0, i = 0; i < 8; i++) { - int b = src[sync_width + width_per_bit * (i + 3 + 8 * ch) + width_per_bit / 2]; + int b = codes[3 + ch * 8 + i]; - if (b - s1 > (s3 - s1) * s->bhd) { - b = 1; + if (b == 255) { parity++; + b = 1; } else { b = 0; } @@ -237,7 +366,7 @@ static void extract_line(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *in if (s->chp) { if (!(parity & 1)) { - byte[ch] = 0; + byte[ch] = 0x7F; } } } @@ -245,12 +374,16 @@ static void extract_line(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *in { uint8_t key[128], value[128]; + //snprintf(key, sizeof(key), "lavfi.readeia608.%d.bits", s->nb_found); + //snprintf(value, sizeof(value), "0b%d%d%d%d%d%d%d%d 0b%d%d%d%d%d%d%d%d", codes[3]==255,codes[4]==255,codes[5]==255,codes[6]==255,codes[7]==255,codes[8]==255,codes[9]==255,codes[10]==255,codes[11]==255,codes[12]==255,codes[13]==255,codes[14]==255,codes[15]==255,codes[16]==255,codes[17]==255,codes[18]==255); + //av_dict_set(&in->metadata, key, value, 0); + snprintf(key, sizeof(key), "lavfi.readeia608.%d.cc", s->nb_found); snprintf(value, sizeof(value), "0x%02X%02X", byte[0], byte[1]); av_dict_set(&in->metadata, key, value, 0); snprintf(key, sizeof(key), "lavfi.readeia608.%d.line", s->nb_found); - snprintf(value, sizeof(value), "%d", line); + snprintf(value, sizeof(value), "%d", nb_line); av_dict_set(&in->metadata, key, value, 0); } @@ -266,7 +399,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) s->nb_found = 0; for (i = s->start; i <= s->end; i++) - extract_line(ctx, inlink, in, i); + extract_line(ctx, in, inlink->w, i); return ff_filter_frame(outlink, in); } @@ -275,7 +408,8 @@ static av_cold void uninit(AVFilterContext *ctx) { ReadEIA608Context *s = ctx->priv; - av_freep(&s->temp); + av_freep(&s->code); + av_freep(&s->line); } static const AVFilterPad readeia608_inputs[] = { diff --git a/libavfilter/vf_rotate.c b/libavfilter/vf_rotate.c index 371ff7f722..378be44ae9 100644 --- a/libavfilter/vf_rotate.c +++ b/libavfilter/vf_rotate.c @@ -94,10 +94,11 @@ typedef struct ThreadData { #define OFFSET(x) offsetof(RotContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM +#define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption rotate_options[] = { - { "angle", "set angle (in radians)", OFFSET(angle_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS }, - { "a", "set angle (in radians)", OFFSET(angle_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS }, + { "angle", "set angle (in radians)", OFFSET(angle_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, .flags=TFLAGS }, + { "a", "set angle (in radians)", OFFSET(angle_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, CHAR_MIN, CHAR_MAX, .flags=TFLAGS }, { "out_w", "set output width expression", OFFSET(outw_expr_str), AV_OPT_TYPE_STRING, {.str="iw"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS }, { "ow", "set output width expression", OFFSET(outw_expr_str), AV_OPT_TYPE_STRING, {.str="iw"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS }, { "out_h", "set output height expression", OFFSET(outh_expr_str), AV_OPT_TYPE_STRING, {.str="ih"}, CHAR_MIN, CHAR_MAX, .flags=FLAGS }, diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c index 8620d1c44e..d46c767e70 100644 --- a/libavfilter/vf_scale.c +++ b/libavfilter/vf_scale.c @@ -32,6 +32,7 @@ #include "scale_eval.h" #include "video.h" #include "libavutil/avstring.h" +#include "libavutil/eval.h" #include "libavutil/internal.h" #include "libavutil/mathematics.h" #include "libavutil/opt.h" @@ -41,6 +42,62 @@ #include "libavutil/avassert.h" #include "libswscale/swscale.h" +static const char *const var_names[] = { + "in_w", "iw", + "in_h", "ih", + "out_w", "ow", + "out_h", "oh", + "a", + "sar", + "dar", + "hsub", + "vsub", + "ohsub", + "ovsub", + "n", + "t", + "pos", + "main_w", + "main_h", + "main_a", + "main_sar", + "main_dar", "mdar", + "main_hsub", + "main_vsub", + "main_n", + "main_t", + "main_pos", + NULL +}; + +enum var_name { + VAR_IN_W, VAR_IW, + VAR_IN_H, VAR_IH, + VAR_OUT_W, VAR_OW, + VAR_OUT_H, VAR_OH, + VAR_A, + VAR_SAR, + VAR_DAR, + VAR_HSUB, + VAR_VSUB, + VAR_OHSUB, + VAR_OVSUB, + VAR_N, + VAR_T, + VAR_POS, + VAR_S2R_MAIN_W, + VAR_S2R_MAIN_H, + VAR_S2R_MAIN_A, + VAR_S2R_MAIN_SAR, + VAR_S2R_MAIN_DAR, VAR_S2R_MDAR, + VAR_S2R_MAIN_HSUB, + VAR_S2R_MAIN_VSUB, + VAR_S2R_MAIN_N, + VAR_S2R_MAIN_T, + VAR_S2R_MAIN_POS, + VARS_NB +}; + enum EvalMode { EVAL_MODE_INIT, EVAL_MODE_FRAME, @@ -72,6 +129,10 @@ typedef struct ScaleContext { char *w_expr; ///< width expression string char *h_expr; ///< height expression string + AVExpr *w_pexpr; + AVExpr *h_pexpr; + double var_values[VARS_NB]; + char *flags_str; char *in_color_matrix; @@ -96,6 +157,120 @@ typedef struct ScaleContext { AVFilter ff_vf_scale2ref; +static int config_props(AVFilterLink *outlink); + +static int check_exprs(AVFilterContext *ctx) +{ + ScaleContext *scale = ctx->priv; + unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 }; + + if (!scale->w_pexpr && !scale->h_pexpr) + return AVERROR(EINVAL); + + if (scale->w_pexpr) + av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB); + if (scale->h_pexpr) + av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB); + + if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) { + av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr); + return AVERROR(EINVAL); + } + + if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) { + av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr); + return AVERROR(EINVAL); + } + + if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) && + (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) { + av_log(ctx, AV_LOG_ERROR, "Circular expressions invalid for width '%s' and height '%s'.\n", scale->w_expr, scale->h_expr); + return AVERROR(EINVAL); + } + + if (ctx->filter != &ff_vf_scale2ref && + (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] || + vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] || + vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] || + vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] || + vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] || + vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] || + vars_w[VAR_S2R_MAIN_HSUB] || vars_h[VAR_S2R_MAIN_HSUB] || + vars_w[VAR_S2R_MAIN_VSUB] || vars_h[VAR_S2R_MAIN_VSUB] || + vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] || + vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] || + vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) { + av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref variables are not valid in scale filter.\n"); + return AVERROR(EINVAL); + } + + if (scale->eval_mode == EVAL_MODE_INIT && + (vars_w[VAR_N] || vars_h[VAR_N] || + vars_w[VAR_T] || vars_h[VAR_T] || + vars_w[VAR_POS] || vars_h[VAR_POS] || + vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] || + vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T] || + vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]) ) { + av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', 'pos' are not valid in init eval_mode.\n"); + return AVERROR(EINVAL); + } + + return 0; +} + +static int scale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args) +{ + ScaleContext *scale = ctx->priv; + int ret, is_inited = 0; + char *old_str_expr = NULL; + AVExpr *old_pexpr = NULL; + + if (str_expr) { + old_str_expr = av_strdup(str_expr); + if (!old_str_expr) + return AVERROR(ENOMEM); + av_opt_set(scale, var, args, 0); + } + + if (*pexpr_ptr) { + old_pexpr = *pexpr_ptr; + *pexpr_ptr = NULL; + is_inited = 1; + } + + ret = av_expr_parse(pexpr_ptr, args, var_names, + NULL, NULL, NULL, NULL, 0, ctx); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var, args); + goto revert; + } + + ret = check_exprs(ctx); + if (ret < 0) + goto revert; + + if (is_inited && (ret = config_props(ctx->outputs[0])) < 0) + goto revert; + + av_expr_free(old_pexpr); + old_pexpr = NULL; + av_freep(&old_str_expr); + + return 0; + +revert: + av_expr_free(*pexpr_ptr); + *pexpr_ptr = NULL; + if (old_str_expr) { + av_opt_set(scale, var, old_str_expr, 0); + av_free(old_str_expr); + } + if (old_pexpr) + *pexpr_ptr = old_pexpr; + + return ret; +} + static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts) { ScaleContext *scale = ctx->priv; @@ -127,6 +302,14 @@ static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts) if (!scale->h_expr) av_opt_set(scale, "h", "ih", 0); + ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr); + if (ret < 0) + return ret; + + ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr); + if (ret < 0) + return ret; + av_log(ctx, AV_LOG_VERBOSE, "w:%s h:%s flags:'%s' interl:%d\n", scale->w_expr, scale->h_expr, (char *)av_x_if_null(scale->flags_str, ""), scale->interlaced); @@ -149,6 +332,9 @@ static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts) static av_cold void uninit(AVFilterContext *ctx) { ScaleContext *scale = ctx->priv; + av_expr_free(scale->w_pexpr); + av_expr_free(scale->h_pexpr); + scale->w_pexpr = scale->h_pexpr = NULL; sws_freeContext(scale->sws); sws_freeContext(scale->isws[0]); sws_freeContext(scale->isws[1]); @@ -218,6 +404,81 @@ static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace) return sws_getCoefficients(colorspace); } +static int scale_eval_dimensions(AVFilterContext *ctx) +{ + ScaleContext *scale = ctx->priv; + const char scale2ref = ctx->filter == &ff_vf_scale2ref; + const AVFilterLink *inlink = scale2ref ? ctx->inputs[1] : ctx->inputs[0]; + const AVFilterLink *outlink = ctx->outputs[0]; + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); + const AVPixFmtDescriptor *out_desc = av_pix_fmt_desc_get(outlink->format); + char *expr; + int eval_w, eval_h; + int ret; + double res; + const AVPixFmtDescriptor *main_desc; + const AVFilterLink *main_link; + + if (scale2ref) { + main_link = ctx->inputs[0]; + main_desc = av_pix_fmt_desc_get(main_link->format); + } + + scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w; + scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h; + scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN; + scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN; + scale->var_values[VAR_A] = (double) inlink->w / inlink->h; + scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? + (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; + scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR]; + scale->var_values[VAR_HSUB] = 1 << desc->log2_chroma_w; + scale->var_values[VAR_VSUB] = 1 << desc->log2_chroma_h; + scale->var_values[VAR_OHSUB] = 1 << out_desc->log2_chroma_w; + scale->var_values[VAR_OVSUB] = 1 << out_desc->log2_chroma_h; + + if (scale2ref) { + scale->var_values[VAR_S2R_MAIN_W] = main_link->w; + scale->var_values[VAR_S2R_MAIN_H] = main_link->h; + scale->var_values[VAR_S2R_MAIN_A] = (double) main_link->w / main_link->h; + scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ? + (double) main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1; + scale->var_values[VAR_S2R_MAIN_DAR] = scale->var_values[VAR_S2R_MDAR] = + scale->var_values[VAR_S2R_MAIN_A] * scale->var_values[VAR_S2R_MAIN_SAR]; + scale->var_values[VAR_S2R_MAIN_HSUB] = 1 << main_desc->log2_chroma_w; + scale->var_values[VAR_S2R_MAIN_VSUB] = 1 << main_desc->log2_chroma_h; + } + + res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL); + eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res; + + res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL); + if (isnan(res)) { + expr = scale->h_expr; + ret = AVERROR(EINVAL); + goto fail; + } + eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int) res == 0 ? inlink->h : (int) res; + + res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL); + if (isnan(res)) { + expr = scale->w_expr; + ret = AVERROR(EINVAL); + goto fail; + } + eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int) res == 0 ? inlink->w : (int) res; + + scale->w = eval_w; + scale->h = eval_h; + + return 0; + +fail: + av_log(ctx, AV_LOG_ERROR, + "Error when evaluating the expression '%s'.\n", expr); + return ret; +} + static int config_props(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; @@ -228,26 +489,23 @@ static int config_props(AVFilterLink *outlink) enum AVPixelFormat outfmt = outlink->format; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); ScaleContext *scale = ctx->priv; - int w, h; int ret; - if ((ret = ff_scale_eval_dimensions(ctx, - scale->w_expr, scale->h_expr, - inlink, outlink, - &w, &h)) < 0) + if ((ret = scale_eval_dimensions(ctx)) < 0) goto fail; - ff_scale_adjust_dimensions(inlink, &w, &h, + ff_scale_adjust_dimensions(inlink, &scale->w, &scale->h, scale->force_original_aspect_ratio, scale->force_divisible_by); - if (w > INT_MAX || h > INT_MAX || - (h * inlink->w) > INT_MAX || - (w * inlink->h) > INT_MAX) + if (scale->w > INT_MAX || + scale->h > INT_MAX || + (scale->h * inlink->w) > INT_MAX || + (scale->w * inlink->h) > INT_MAX) av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n"); - outlink->w = w; - outlink->h = h; + outlink->w = scale->w; + outlink->h = scale->h; /* TODO: make algorithm configurable */ @@ -390,30 +648,67 @@ static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, s out,out_stride); } +#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb)) + static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out) { - ScaleContext *scale = link->dst->priv; - AVFilterLink *outlink = link->dst->outputs[0]; + AVFilterContext *ctx = link->dst; + ScaleContext *scale = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; AVFrame *out; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format); char buf[32]; int in_range; + int frame_changed; *frame_out = NULL; if (in->colorspace == AVCOL_SPC_YCGCO) av_log(link->dst, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n"); - if ( in->width != link->w - || in->height != link->h - || in->format != link->format - || in->sample_aspect_ratio.den != link->sample_aspect_ratio.den || in->sample_aspect_ratio.num != link->sample_aspect_ratio.num) { + frame_changed = in->width != link->w || + in->height != link->h || + in->format != link->format || + in->sample_aspect_ratio.den != link->sample_aspect_ratio.den || + in->sample_aspect_ratio.num != link->sample_aspect_ratio.num; + + if (scale->eval_mode == EVAL_MODE_FRAME || frame_changed) { int ret; + unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 }; + + av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB); + av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB); + + if (scale->eval_mode == EVAL_MODE_FRAME && + !frame_changed && + ctx->filter != &ff_vf_scale2ref && + !(vars_w[VAR_N] || vars_w[VAR_T] || vars_w[VAR_POS]) && + !(vars_h[VAR_N] || vars_h[VAR_T] || vars_h[VAR_POS]) && + scale->w && scale->h) + goto scale; if (scale->eval_mode == EVAL_MODE_INIT) { snprintf(buf, sizeof(buf)-1, "%d", outlink->w); av_opt_set(scale, "w", buf, 0); snprintf(buf, sizeof(buf)-1, "%d", outlink->h); av_opt_set(scale, "h", buf, 0); + + ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr); + if (ret < 0) + return ret; + + ret = scale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr); + if (ret < 0) + return ret; + } + + if (ctx->filter == &ff_vf_scale2ref) { + scale->var_values[VAR_S2R_MAIN_N] = link->frame_count_out; + scale->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base); + scale->var_values[VAR_S2R_MAIN_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos; + } else { + scale->var_values[VAR_N] = link->frame_count_out; + scale->var_values[VAR_T] = TS2T(in->pts, link->time_base); + scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos; } link->dst->inputs[0]->format = in->format; @@ -427,6 +722,7 @@ static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out) return ret; } +scale: if (!scale->sws) { *frame_out = in; return 0; @@ -533,7 +829,31 @@ static int filter_frame(AVFilterLink *link, AVFrame *in) static int filter_frame_ref(AVFilterLink *link, AVFrame *in) { + ScaleContext *scale = link->dst->priv; AVFilterLink *outlink = link->dst->outputs[1]; + int frame_changed; + + frame_changed = in->width != link->w || + in->height != link->h || + in->format != link->format || + in->sample_aspect_ratio.den != link->sample_aspect_ratio.den || + in->sample_aspect_ratio.num != link->sample_aspect_ratio.num; + + if (frame_changed) { + link->format = in->format; + link->w = in->width; + link->h = in->height; + link->sample_aspect_ratio.num = in->sample_aspect_ratio.num; + link->sample_aspect_ratio.den = in->sample_aspect_ratio.den; + + config_props_ref(outlink); + } + + if (scale->eval_mode == EVAL_MODE_FRAME) { + scale->var_values[VAR_N] = link->frame_count_out; + scale->var_values[VAR_T] = TS2T(in->pts, link->time_base); + scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos; + } return ff_filter_frame(outlink, in); } @@ -542,23 +862,24 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar char *res, int res_len, int flags) { ScaleContext *scale = ctx->priv; - int ret; + char *str_expr; + AVExpr **pexpr_ptr; + int ret, w, h; - if ( !strcmp(cmd, "width") || !strcmp(cmd, "w") - || !strcmp(cmd, "height") || !strcmp(cmd, "h")) { + w = !strcmp(cmd, "width") || !strcmp(cmd, "w"); + h = !strcmp(cmd, "height") || !strcmp(cmd, "h"); - int old_w = scale->w; - int old_h = scale->h; - AVFilterLink *outlink = ctx->outputs[0]; + if (w || h) { + str_expr = w ? scale->w_expr : scale->h_expr; + pexpr_ptr = w ? &scale->w_pexpr : &scale->h_pexpr; - av_opt_set(scale, cmd, args, 0); - if ((ret = config_props(outlink)) < 0) { - scale->w = old_w; - scale->h = old_h; - } + ret = scale_parse_expr(ctx, str_expr, pexpr_ptr, cmd, args); } else ret = AVERROR(ENOSYS); + if (ret < 0) + av_log(ctx, AV_LOG_ERROR, "Failed to process command. Continuing with existing parameters.\n"); + return ret; } @@ -569,12 +890,13 @@ static const AVClass *child_class_next(const AVClass *prev) #define OFFSET(x) offsetof(ScaleContext, x) #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +#define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption scale_options[] = { - { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS }, - { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS }, - { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS }, - { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS }, + { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS }, + { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS }, + { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS }, + { "height","Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS }, { "flags", "Flags to pass to libswscale", OFFSET(flags_str), AV_OPT_TYPE_STRING, { .str = "bilinear" }, .flags = FLAGS }, { "interl", "set interlacing", OFFSET(interlaced), AV_OPT_TYPE_BOOL, {.i64 = 0 }, -1, 1, FLAGS }, { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, FLAGS }, diff --git a/libavfilter/vf_showinfo.c b/libavfilter/vf_showinfo.c index 31f6b32aa4..79b79db2d3 100644 --- a/libavfilter/vf_showinfo.c +++ b/libavfilter/vf_showinfo.c @@ -24,6 +24,7 @@ #include +#include "libavutil/bswap.h" #include "libavutil/adler32.h" #include "libavutil/display.h" #include "libavutil/imgutils.h" @@ -202,7 +203,7 @@ static void dump_color_property(AVFilterContext *ctx, AVFrame *frame) av_log(ctx, AV_LOG_INFO, "\n"); } -static void update_sample_stats(const uint8_t *src, int len, int64_t *sum, int64_t *sum2) +static void update_sample_stats_8(const uint8_t *src, int len, int64_t *sum, int64_t *sum2) { int i; @@ -212,6 +213,30 @@ static void update_sample_stats(const uint8_t *src, int len, int64_t *sum, int64 } } +static void update_sample_stats_16(int be, const uint8_t *src, int len, int64_t *sum, int64_t *sum2) +{ + const uint16_t *src1 = (const uint16_t *)src; + int i; + + for (i = 0; i < len / 2; i++) { + if ((HAVE_BIGENDIAN && !be) || (!HAVE_BIGENDIAN && be)) { + *sum += av_bswap16(src1[i]); + *sum2 += (uint32_t)av_bswap16(src1[i]) * (uint32_t)av_bswap16(src1[i]); + } else { + *sum += src1[i]; + *sum2 += (uint32_t)src1[i] * (uint32_t)src1[i]; + } + } +} + +static void update_sample_stats(int depth, int be, const uint8_t *src, int len, int64_t *sum, int64_t *sum2) +{ + if (depth <= 8) + update_sample_stats_8(src, len, sum, sum2); + else + update_sample_stats_16(be, src, len, sum, sum2); +} + static int filter_frame(AVFilterLink *inlink, AVFrame *frame) { AVFilterContext *ctx = inlink->dst; @@ -220,12 +245,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) uint32_t plane_checksum[4] = {0}, checksum = 0; int64_t sum[4] = {0}, sum2[4] = {0}; int32_t pixelcount[4] = {0}; + int bitdepth = desc->comp[0].depth; + int be = desc->flags & AV_PIX_FMT_FLAG_BE; int i, plane, vsub = desc->log2_chroma_h; for (plane = 0; plane < 4 && s->calculate_checksums && frame->data[plane] && frame->linesize[plane]; plane++) { uint8_t *data = frame->data[plane]; int h = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT(inlink->h, vsub) : inlink->h; int linesize = av_image_get_linesize(frame->format, frame->width, plane); + int width = linesize >> (bitdepth > 8); if (linesize < 0) return linesize; @@ -234,8 +262,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) plane_checksum[plane] = av_adler32_update(plane_checksum[plane], data, linesize); checksum = av_adler32_update(checksum, data, linesize); - update_sample_stats(data, linesize, sum+plane, sum2+plane); - pixelcount[plane] += linesize; + update_sample_stats(bitdepth, be, data, linesize, sum+plane, sum2+plane); + pixelcount[plane] += width; data += frame->linesize[plane]; } } diff --git a/libavfilter/vf_spp.c b/libavfilter/vf_spp.c index fe579cedb1..7381938f7f 100644 --- a/libavfilter/vf_spp.c +++ b/libavfilter/vf_spp.c @@ -57,8 +57,9 @@ static void *child_next(void *obj, void *prev) #define OFFSET(x) offsetof(SPPContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM +#define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption spp_options[] = { - { "quality", "set quality", OFFSET(log2_count), AV_OPT_TYPE_INT, {.i64 = 3}, 0, MAX_LEVEL, FLAGS }, + { "quality", "set quality", OFFSET(log2_count), AV_OPT_TYPE_INT, {.i64 = 3}, 0, MAX_LEVEL, TFLAGS }, { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 63, FLAGS }, { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_HARD}, 0, NB_MODES - 1, FLAGS, "mode" }, { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" }, @@ -444,7 +445,7 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar { SPPContext *s = ctx->priv; - if (!strcmp(cmd, "level")) { + if (!strcmp(cmd, "level") || !strcmp(cmd, "quality")) { if (!strcmp(args, "max")) s->log2_count = MAX_LEVEL; else diff --git a/libavfilter/vf_stack.c b/libavfilter/vf_stack.c index b9e87a4429..5bc4ccf6ed 100644 --- a/libavfilter/vf_stack.c +++ b/libavfilter/vf_stack.c @@ -310,6 +310,17 @@ static int config_output(AVFilterLink *outlink) outlink->frame_rate = frame_rate; outlink->sample_aspect_ratio = sar; + for (i = 1; i < s->nb_inputs; i++) { + AVFilterLink *inlink = ctx->inputs[i]; + if (outlink->frame_rate.num != inlink->frame_rate.num || + outlink->frame_rate.den != inlink->frame_rate.den) { + av_log(ctx, AV_LOG_VERBOSE, + "Video inputs have different frame rates, output will be VFR\n"); + outlink->frame_rate = av_make_q(1, 0); + break; + } + } + if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0) return ret; diff --git a/libavfilter/vf_swaprect.c b/libavfilter/vf_swaprect.c index f1fab1e36d..cf9c298f2f 100644 --- a/libavfilter/vf_swaprect.c +++ b/libavfilter/vf_swaprect.c @@ -99,7 +99,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR]; var_values[VAR_N] = inlink->frame_count_out; var_values[VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base); - var_values[VAR_POS] = in->pkt_pos ? NAN : in->pkt_pos; + var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos; ret = av_expr_parse_and_eval(&dw, s->w, var_names, &var_values[0], diff --git a/libavfilter/vf_tinterlace.c b/libavfilter/vf_tinterlace.c index fc5d11e053..a77753775c 100644 --- a/libavfilter/vf_tinterlace.c +++ b/libavfilter/vf_tinterlace.c @@ -53,6 +53,7 @@ static const AVOption tinterlace_options[] = { {"complex_filter", "enable complex vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_CVLPF},INT_MIN, INT_MAX, FLAGS, "flags" }, {"cvlpf", "enable complex vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_CVLPF},INT_MIN, INT_MAX, FLAGS, "flags" }, {"exact_tb", "force a timebase which can represent timestamps exactly", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_EXACT_TB}, INT_MIN, INT_MAX, FLAGS, "flags" }, + {"bypass_il", "bypass already interlaced frames", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_BYPASS_IL}, INT_MIN, INT_MAX, FLAGS, "flags" }, {NULL} }; @@ -63,10 +64,10 @@ static const AVOption interlace_options[] = { { "scan", "scanning mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_TFF}, 0, 1, FLAGS, "mode"}, { "tff", "top field first", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_TFF}, INT_MIN, INT_MAX, FLAGS, .unit = "mode"}, { "bff", "bottom field first", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_BFF}, INT_MIN, INT_MAX, FLAGS, .unit = "mode"}, - { "lowpass", "set vertical low-pass filter", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = TINTERLACE_FLAG_VLPF}, 0, 2, FLAGS, "flags" }, - { "off", "disable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, FLAGS, "flags" }, - { "linear", "linear vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_VLPF}, INT_MIN, INT_MAX, FLAGS, "flags" }, - { "complex", "complex vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = TINTERLACE_FLAG_CVLPF},INT_MIN, INT_MAX, FLAGS, "flags" }, + { "lowpass", "set vertical low-pass filter", OFFSET(lowpass), AV_OPT_TYPE_INT, {.i64 = VLPF_LIN}, 0, 2, FLAGS, "lowpass" }, + { "off", "disable vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = VLPF_OFF}, INT_MIN, INT_MAX, FLAGS, "lowpass" }, + { "linear", "linear vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = VLPF_LIN}, INT_MIN, INT_MAX, FLAGS, "lowpass" }, + { "complex", "complex vertical low-pass filter", 0, AV_OPT_TYPE_CONST, {.i64 = VLPF_CMP}, INT_MIN, INT_MAX, FLAGS, "lowpass" }, { NULL } }; @@ -439,6 +440,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref) * halving the frame rate and preserving image height */ case MODE_INTERLEAVE_TOP: /* top field first */ case MODE_INTERLEAVE_BOTTOM: /* bottom field first */ + if ((tinterlace->flags & TINTERLACE_FLAG_BYPASS_IL) && cur->interlaced_frame) { + av_log(ctx, AV_LOG_WARNING, + "video is already interlaced, adjusting framerate only\n"); + out = av_frame_clone(cur); + if (!out) + return AVERROR(ENOMEM); + out->pts /= 2; // adjust pts to new framerate + ret = ff_filter_frame(outlink, out); + return ret; + } tff = tinterlace->mode == MODE_INTERLEAVE_TOP; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) @@ -518,6 +529,12 @@ static int init_interlace(AVFilterContext *ctx) if (tinterlace->mode <= MODE_BFF) tinterlace->mode += MODE_INTERLEAVE_TOP; + tinterlace->flags |= TINTERLACE_FLAG_BYPASS_IL; + if (tinterlace->lowpass == VLPF_LIN) + tinterlace->flags |= TINTERLACE_FLAG_VLPF; + if (tinterlace->lowpass == VLPF_CMP) + tinterlace->flags |= TINTERLACE_FLAG_CVLPF; + return 0; } diff --git a/libavfilter/vf_tonemap_vaapi.c b/libavfilter/vf_tonemap_vaapi.c new file mode 100644 index 0000000000..2f41b90424 --- /dev/null +++ b/libavfilter/vf_tonemap_vaapi.c @@ -0,0 +1,419 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include + +#include "libavutil/avassert.h" +#include "libavutil/mem.h" +#include "libavutil/opt.h" +#include "libavutil/pixdesc.h" +#include "libavutil/mastering_display_metadata.h" + +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "vaapi_vpp.h" + +typedef struct HDRVAAPIContext { + VAAPIVPPContext vpp_ctx; // must be the first field + + char *output_format_string; + + char *color_primaries_string; + char *color_transfer_string; + char *color_matrix_string; + + enum AVColorPrimaries color_primaries; + enum AVColorTransferCharacteristic color_transfer; + enum AVColorSpace color_matrix; + + VAHdrMetaDataHDR10 in_metadata; + + AVFrameSideData *src_display; + AVFrameSideData *src_light; +} HDRVAAPIContext; + +static int tonemap_vaapi_save_metadata(AVFilterContext *avctx, AVFrame *input_frame) +{ + HDRVAAPIContext *ctx = avctx->priv; + AVMasteringDisplayMetadata *hdr_meta; + AVContentLightMetadata *light_meta; + + if (input_frame->color_trc != AVCOL_TRC_SMPTE2084) { + av_log(avctx, AV_LOG_WARNING, "Only support HDR10 as input for vaapi tone-mapping\n"); + } + + ctx->src_display = av_frame_get_side_data(input_frame, + AV_FRAME_DATA_MASTERING_DISPLAY_METADATA); + if (ctx->src_display) { + hdr_meta = (AVMasteringDisplayMetadata *)ctx->src_display->data; + if (!hdr_meta) { + av_log(avctx, AV_LOG_ERROR, "No mastering display data\n"); + return AVERROR(EINVAL); + } + + if (hdr_meta->has_luminance) { + const int luma_den = 10000; + ctx->in_metadata.max_display_mastering_luminance = + lrint(luma_den * av_q2d(hdr_meta->max_luminance)); + ctx->in_metadata.min_display_mastering_luminance = + FFMIN(lrint(luma_den * av_q2d(hdr_meta->min_luminance)), + ctx->in_metadata.max_display_mastering_luminance); + + av_log(avctx, AV_LOG_DEBUG, + "Mastering Display Metadata(in luminance):\n"); + av_log(avctx, AV_LOG_DEBUG, + "min_luminance=%u, max_luminance=%u\n", + ctx->in_metadata.min_display_mastering_luminance, + ctx->in_metadata.max_display_mastering_luminance); + } + + if (hdr_meta->has_primaries) { + int i; + const int mapping[3] = {1, 2, 0}; //green, blue, red + const int chroma_den = 50000; + + for (i = 0; i < 3; i++) { + const int j = mapping[i]; + ctx->in_metadata.display_primaries_x[i] = + FFMIN(lrint(chroma_den * + av_q2d(hdr_meta->display_primaries[j][0])), + chroma_den); + ctx->in_metadata.display_primaries_y[i] = + FFMIN(lrint(chroma_den * + av_q2d(hdr_meta->display_primaries[j][1])), + chroma_den); + } + + ctx->in_metadata.white_point_x = + FFMIN(lrint(chroma_den * av_q2d(hdr_meta->white_point[0])), + chroma_den); + ctx->in_metadata.white_point_y = + FFMIN(lrint(chroma_den * av_q2d(hdr_meta->white_point[1])), + chroma_den); + + av_log(avctx, AV_LOG_DEBUG, + "Mastering Display Metadata(in primaries):\n"); + av_log(avctx, AV_LOG_DEBUG, + "G(%u,%u) B(%u,%u) R(%u,%u) WP(%u,%u)\n", + ctx->in_metadata.display_primaries_x[0], + ctx->in_metadata.display_primaries_y[0], + ctx->in_metadata.display_primaries_x[1], + ctx->in_metadata.display_primaries_y[1], + ctx->in_metadata.display_primaries_x[2], + ctx->in_metadata.display_primaries_y[2], + ctx->in_metadata.white_point_x, + ctx->in_metadata.white_point_y); + } + } else { + av_log(avctx, AV_LOG_ERROR, "No mastering display data from input\n"); + return AVERROR(EINVAL); + } + + ctx->src_light = av_frame_get_side_data(input_frame, + AV_FRAME_DATA_CONTENT_LIGHT_LEVEL); + if (ctx->src_light) { + light_meta = (AVContentLightMetadata *)ctx->src_light->data; + if (!light_meta) { + av_log(avctx, AV_LOG_ERROR, "No light metadata\n"); + return AVERROR(EINVAL); + } + + ctx->in_metadata.max_content_light_level = light_meta->MaxCLL; + ctx->in_metadata.max_pic_average_light_level = light_meta->MaxFALL; + + av_log(avctx, AV_LOG_DEBUG, + "Mastering Content Light Level (in):\n"); + av_log(avctx, AV_LOG_DEBUG, + "MaxCLL(%u) MaxFALL(%u)\n", + ctx->in_metadata.max_content_light_level, + ctx->in_metadata.max_pic_average_light_level); + } else { + av_log(avctx, AV_LOG_DEBUG, "No content light level from input\n"); + } + return 0; +} + +static int tonemap_vaapi_set_filter_params(AVFilterContext *avctx, AVFrame *input_frame) +{ + VAAPIVPPContext *vpp_ctx = avctx->priv; + HDRVAAPIContext *ctx = avctx->priv; + VAStatus vas; + VAProcFilterParameterBufferHDRToneMapping *hdrtm_param; + + vas = vaMapBuffer(vpp_ctx->hwctx->display, vpp_ctx->filter_buffers[0], + (void**)&hdrtm_param); + if (vas != VA_STATUS_SUCCESS) { + av_log(avctx, AV_LOG_ERROR, "Failed to map " + "buffer (%d): %d (%s).\n", + vpp_ctx->filter_buffers[0], vas, vaErrorStr(vas)); + return AVERROR(EIO); + } + + memcpy(hdrtm_param->data.metadata, &ctx->in_metadata, sizeof(VAHdrMetaDataHDR10)); + + vas = vaUnmapBuffer(vpp_ctx->hwctx->display, vpp_ctx->filter_buffers[0]); + if (vas != VA_STATUS_SUCCESS) { + av_log(avctx, AV_LOG_ERROR, "Failed to unmap output buffers: " + "%d (%s).\n", vas, vaErrorStr(vas)); + return AVERROR(EIO); + } + + return 0; +} + +static int tonemap_vaapi_build_filter_params(AVFilterContext *avctx) +{ + VAAPIVPPContext *vpp_ctx = avctx->priv; + HDRVAAPIContext *ctx = avctx->priv; + VAStatus vas; + VAProcFilterParameterBufferHDRToneMapping hdrtm_param; + VAProcFilterCapHighDynamicRange hdr_cap[VAProcHighDynamicRangeMetadataTypeCount]; + int num_query_caps; + int i; + + memset(&hdrtm_param, 0, sizeof(hdrtm_param)); + memset(&ctx->in_metadata, 0, sizeof(ctx->in_metadata)); + + num_query_caps = VAProcHighDynamicRangeMetadataTypeCount; + vas = vaQueryVideoProcFilterCaps(vpp_ctx->hwctx->display, + vpp_ctx->va_context, + VAProcFilterHighDynamicRangeToneMapping, + &hdr_cap, &num_query_caps); + if (vas != VA_STATUS_SUCCESS) { + av_log(avctx, AV_LOG_ERROR, "Failed to query HDR caps " + "context: %d (%s).\n", vas, vaErrorStr(vas)); + return AVERROR(EIO); + } + + for (i = 0; i < num_query_caps; i++) { + if (hdr_cap[i].metadata_type != VAProcHighDynamicRangeMetadataNone) + break; + } + + if (i >= num_query_caps) { + av_log(avctx, AV_LOG_ERROR, "VAAPI driver doesn't support HDR\n"); + return AVERROR(EINVAL); + } + + for (i = 0; i < num_query_caps; i++) { + if (VA_TONE_MAPPING_HDR_TO_SDR & hdr_cap[i].caps_flag) + break; + } + + if (i >= num_query_caps) { + av_log(avctx, AV_LOG_ERROR, + "VAAPI driver doesn't support HDR to SDR\n"); + return AVERROR(EINVAL); + } + + hdrtm_param.type = VAProcFilterHighDynamicRangeToneMapping; + hdrtm_param.data.metadata_type = VAProcHighDynamicRangeMetadataHDR10; + hdrtm_param.data.metadata = &ctx->in_metadata; + hdrtm_param.data.metadata_size = sizeof(VAHdrMetaDataHDR10); + + return ff_vaapi_vpp_make_param_buffers(avctx, + VAProcFilterParameterBufferType, + &hdrtm_param, sizeof(hdrtm_param), 1); +} + +static int tonemap_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame) +{ + AVFilterContext *avctx = inlink->dst; + AVFilterLink *outlink = avctx->outputs[0]; + VAAPIVPPContext *vpp_ctx = avctx->priv; + HDRVAAPIContext *ctx = avctx->priv; + AVFrame *output_frame = NULL; + VASurfaceID input_surface, output_surface; + + VAProcPipelineParameterBuffer params; + int err; + + av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n", + av_get_pix_fmt_name(input_frame->format), + input_frame->width, input_frame->height, input_frame->pts); + + if (vpp_ctx->va_context == VA_INVALID_ID){ + av_frame_free(&input_frame); + return AVERROR(EINVAL); + } + + err = tonemap_vaapi_save_metadata(avctx, input_frame); + if (err < 0) + goto fail; + + err = tonemap_vaapi_set_filter_params(avctx, input_frame); + if (err < 0) + goto fail; + + input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3]; + av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for tonemap vpp input.\n", + input_surface); + + output_frame = ff_get_video_buffer(outlink, vpp_ctx->output_width, + vpp_ctx->output_height); + if (!output_frame) { + err = AVERROR(ENOMEM); + goto fail; + } + + output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3]; + av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for tonemap vpp output.\n", + output_surface); + memset(¶ms, 0, sizeof(params)); + + err = av_frame_copy_props(output_frame, input_frame); + if (err < 0) + goto fail; + + if (ctx->color_primaries != AVCOL_PRI_UNSPECIFIED) + output_frame->color_primaries = ctx->color_primaries; + + if (ctx->color_transfer != AVCOL_TRC_UNSPECIFIED) + output_frame->color_trc = ctx->color_transfer; + else + output_frame->color_trc = AVCOL_TRC_BT709; + + if (ctx->color_matrix != AVCOL_SPC_UNSPECIFIED) + output_frame->colorspace = ctx->color_matrix; + + err = ff_vaapi_vpp_init_params(avctx, ¶ms, + input_frame, output_frame); + if (err < 0) + goto fail; + + err = ff_vaapi_vpp_render_picture(avctx, ¶ms, output_frame); + if (err < 0) + goto fail; + + av_frame_free(&input_frame); + + av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n", + av_get_pix_fmt_name(output_frame->format), + output_frame->width, output_frame->height, output_frame->pts); + + return ff_filter_frame(outlink, output_frame); + +fail: + av_frame_free(&input_frame); + av_frame_free(&output_frame); + return err; +} + +static av_cold int tonemap_vaapi_init(AVFilterContext *avctx) +{ + VAAPIVPPContext *vpp_ctx = avctx->priv; + HDRVAAPIContext *ctx = avctx->priv; + + ff_vaapi_vpp_ctx_init(avctx); + vpp_ctx->build_filter_params = tonemap_vaapi_build_filter_params; + vpp_ctx->pipeline_uninit = ff_vaapi_vpp_pipeline_uninit; + + if (ctx->output_format_string) { + vpp_ctx->output_format = av_get_pix_fmt(ctx->output_format_string); + switch (vpp_ctx->output_format) { + case AV_PIX_FMT_NV12: + case AV_PIX_FMT_P010: + break; + default: + av_log(avctx, AV_LOG_ERROR, "Invalid output format.\n"); + return AVERROR(EINVAL); + } + } else { + vpp_ctx->output_format = AV_PIX_FMT_NV12; + av_log(avctx, AV_LOG_WARNING, "Output format not set, use default format NV12\n"); + } + +#define STRING_OPTION(var_name, func_name, default_value) do { \ + if (ctx->var_name ## _string) { \ + int var = av_ ## func_name ## _from_name(ctx->var_name ## _string); \ + if (var < 0) { \ + av_log(avctx, AV_LOG_ERROR, "Invalid %s.\n", #var_name); \ + return AVERROR(EINVAL); \ + } \ + ctx->var_name = var; \ + } else { \ + ctx->var_name = default_value; \ + } \ + } while (0) + + STRING_OPTION(color_primaries, color_primaries, AVCOL_PRI_UNSPECIFIED); + STRING_OPTION(color_transfer, color_transfer, AVCOL_TRC_UNSPECIFIED); + STRING_OPTION(color_matrix, color_space, AVCOL_SPC_UNSPECIFIED); + + return 0; +} + +#define OFFSET(x) offsetof(HDRVAAPIContext, x) +#define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM) +static const AVOption tonemap_vaapi_options[] = { + { "format", "Output pixel format set", OFFSET(output_format_string), AV_OPT_TYPE_STRING, .flags = FLAGS, "format" }, + { "matrix", "Output color matrix coefficient set", + OFFSET(color_matrix_string), AV_OPT_TYPE_STRING, + { .str = NULL }, .flags = FLAGS, "matrix" }, + { "m", "Output color matrix coefficient set", + OFFSET(color_matrix_string), AV_OPT_TYPE_STRING, + { .str = NULL }, .flags = FLAGS, "matrix" }, + { "primaries", "Output color primaries set", + OFFSET(color_primaries_string), AV_OPT_TYPE_STRING, + { .str = NULL }, .flags = FLAGS, "primaries" }, + { "p", "Output color primaries set", + OFFSET(color_primaries_string), AV_OPT_TYPE_STRING, + { .str = NULL }, .flags = FLAGS, "primaries" }, + { "transfer", "Output color transfer characteristics set", + OFFSET(color_transfer_string), AV_OPT_TYPE_STRING, + { .str = NULL }, .flags = FLAGS, "transfer" }, + { "t", "Output color transfer characteristics set", + OFFSET(color_transfer_string), AV_OPT_TYPE_STRING, + { .str = NULL }, .flags = FLAGS, "transfer" }, + { NULL } +}; + + +AVFILTER_DEFINE_CLASS(tonemap_vaapi); + +static const AVFilterPad tonemap_vaapi_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = &tonemap_vaapi_filter_frame, + .config_props = &ff_vaapi_vpp_config_input, + }, + { NULL } +}; + +static const AVFilterPad tonemap_vaapi_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = &ff_vaapi_vpp_config_output, + }, + { NULL } +}; + +AVFilter ff_vf_tonemap_vaapi = { + .name = "tonemap_vaapi", + .description = NULL_IF_CONFIG_SMALL("VAAPI VPP for tone-mapping"), + .priv_size = sizeof(HDRVAAPIContext), + .init = &tonemap_vaapi_init, + .uninit = &ff_vaapi_vpp_ctx_uninit, + .query_formats = &ff_vaapi_vpp_query_formats, + .inputs = tonemap_vaapi_inputs, + .outputs = tonemap_vaapi_outputs, + .priv_class = &tonemap_vaapi_class, + .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, +}; diff --git a/libavfilter/vf_vectorscope.c b/libavfilter/vf_vectorscope.c index e3e00797d0..38af878042 100644 --- a/libavfilter/vf_vectorscope.c +++ b/libavfilter/vf_vectorscope.c @@ -29,8 +29,16 @@ #include "internal.h" #include "video.h" +enum GraticuleType { + GRAT_NONE, + GRAT_GREEN, + GRAT_COLOR, + GRAT_INVERT, + NB_GRATICULES +}; + enum VectorscopeMode { - GRAY, + TINT, COLOR, COLOR2, COLOR3, @@ -45,6 +53,7 @@ typedef struct VectorscopeContext { int intensity; float fintensity; uint16_t bg_color[4]; + float ftint[2]; int planewidth[4]; int planeheight[4]; int hsub, vsub; @@ -59,6 +68,7 @@ typedef struct VectorscopeContext { float bgopacity; float lthreshold; float hthreshold; + int tint[2]; int tmin; int tmax; int flags; @@ -79,7 +89,8 @@ typedef struct VectorscopeContext { static const AVOption vectorscope_options[] = { { "mode", "set vectorscope mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, MODE_NB-1, FLAGS, "mode"}, { "m", "set vectorscope mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, MODE_NB-1, FLAGS, "mode"}, - { "gray", 0, 0, AV_OPT_TYPE_CONST, {.i64=GRAY}, 0, 0, FLAGS, "mode" }, + { "gray", 0, 0, AV_OPT_TYPE_CONST, {.i64=TINT}, 0, 0, FLAGS, "mode" }, + { "tint", 0, 0, AV_OPT_TYPE_CONST, {.i64=TINT}, 0, 0, FLAGS, "mode" }, { "color", 0, 0, AV_OPT_TYPE_CONST, {.i64=COLOR}, 0, 0, FLAGS, "mode" }, { "color2", 0, 0, AV_OPT_TYPE_CONST, {.i64=COLOR2}, 0, 0, FLAGS, "mode" }, { "color3", 0, 0, AV_OPT_TYPE_CONST, {.i64=COLOR3}, 0, 0, FLAGS, "mode" }, @@ -95,11 +106,12 @@ static const AVOption vectorscope_options[] = { { "instant", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "envelope" }, { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "envelope" }, { "peak+instant", 0, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "envelope" }, - { "graticule", "set graticule", OFFSET(graticule), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "graticule"}, - { "g", "set graticule", OFFSET(graticule), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, FLAGS, "graticule"}, - { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "graticule" }, - { "green", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "graticule" }, - { "color", 0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "graticule" }, + { "graticule", "set graticule", OFFSET(graticule), AV_OPT_TYPE_INT, {.i64=GRAT_NONE}, 0, NB_GRATICULES-1, FLAGS, "graticule"}, + { "g", "set graticule", OFFSET(graticule), AV_OPT_TYPE_INT, {.i64=GRAT_NONE}, 0, NB_GRATICULES-1, FLAGS, "graticule"}, + { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64=GRAT_NONE}, 0, 0, FLAGS, "graticule" }, + { "green", 0, 0, AV_OPT_TYPE_CONST, {.i64=GRAT_GREEN}, 0, 0, FLAGS, "graticule" }, + { "color", 0, 0, AV_OPT_TYPE_CONST, {.i64=GRAT_COLOR}, 0, 0, FLAGS, "graticule" }, + { "invert", 0, 0, AV_OPT_TYPE_CONST, {.i64=GRAT_INVERT},0, 0, FLAGS, "graticule" }, { "opacity", "set graticule opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS}, { "o", "set graticule opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS}, { "flags", "set graticule flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=4}, 0, 7, FLAGS, "flags"}, @@ -118,6 +130,10 @@ static const AVOption vectorscope_options[] = { { "auto", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "colorspace" }, { "601", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "colorspace" }, { "709", 0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "colorspace" }, + { "tint0", "set 1st tint", OFFSET(ftint[0]), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS}, + { "t0", "set 1st tint", OFFSET(ftint[0]), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS}, + { "tint1", "set 2nd tint", OFFSET(ftint[1]), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS}, + { "t1", "set 2nd tint", OFFSET(ftint[1]), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS}, { NULL } }; @@ -139,7 +155,7 @@ static const enum AVPixelFormat out_yuv10_pix_fmts[] = { }; static const enum AVPixelFormat out_yuv12_pix_fmts[] = { - AV_PIX_FMT_YUV444P12, + AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_NONE }; @@ -167,7 +183,7 @@ static const enum AVPixelFormat in1_pix_fmts[] = { AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, - AV_PIX_FMT_YUV444P12, + AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRAP12, @@ -188,6 +204,7 @@ static const enum AVPixelFormat in2_pix_fmts[] = { AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12, + AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_NONE }; @@ -417,6 +434,8 @@ static void vectorscope16(VectorscopeContext *s, AVFrame *in, AVFrame *out, int uint16_t *dpx = dst[px]; uint16_t *dpy = dst[py]; uint16_t *dpd = dst[pd]; + uint16_t *dp1 = dst[1]; + uint16_t *dp2 = dst[2]; const int max = s->size - 1; const int mid = s->size / 2; const int tmin = s->tmin; @@ -433,42 +452,21 @@ static void vectorscope16(VectorscopeContext *s, AVFrame *in, AVFrame *out, int switch (s->mode) { case COLOR: case COLOR5: - case GRAY: - if (s->is_yuv) { - for (i = 0; i < h; i++) { - const int iwx = i * slinesizex; - const int iwy = i * slinesizey; - const int iwd = i * slinesized; - for (j = 0; j < w; j++) { - const int x = FFMIN(spx[iwx + j], max); - const int y = FFMIN(spy[iwy + j], max); - const int z = spd[iwd + j]; - const int pos = y * dlinesize + x; + case TINT: + for (i = 0; i < h; i++) { + const int iwx = i * slinesizex; + const int iwy = i * slinesizey; + const int iwd = i * slinesized; + for (j = 0; j < w; j++) { + const int x = FFMIN(spx[iwx + j], max); + const int y = FFMIN(spy[iwy + j], max); + const int z = spd[iwd + j]; + const int pos = y * dlinesize + x; - if (z < tmin || z > tmax) - continue; + if (z < tmin || z > tmax) + continue; - dpd[pos] = FFMIN(dpd[pos] + intensity, max); - } - } - } else { - for (i = 0; i < h; i++) { - const int iwx = i * slinesizex; - const int iwy = i * slinesizey; - const int iwd = i * slinesized; - for (j = 0; j < w; j++) { - const int x = FFMIN(spx[iwx + j], max); - const int y = FFMIN(spy[iwy + j], max); - const int z = spd[iwd + j]; - const int pos = y * dlinesize + x; - - if (z < tmin || z > tmax) - continue; - - dst[0][pos] = FFMIN(dst[0][pos] + intensity, max); - dst[1][pos] = FFMIN(dst[1][pos] + intensity, max); - dst[2][pos] = FFMIN(dst[2][pos] + intensity, max); - } + dpd[pos] = FFMIN(dpd[pos] + intensity, max); } } break; @@ -572,7 +570,28 @@ static void vectorscope16(VectorscopeContext *s, AVFrame *in, AVFrame *out, int } } - if (s->mode == COLOR) { + if (s->mode == TINT && s->is_yuv && + (s->tint[0] != mid || s->tint[1] != mid)) { + for (i = 0; i < out->height; i++) { + for (j = 0; j < out->width; j++) { + const int pos = i * dlinesize + j; + if (dpd[pos]) { + dp1[pos] = s->tint[0]; + dp2[pos] = s->tint[1]; + } + } + } + } else if (s->mode == TINT && !s->is_yuv) { + for (i = 0; i < out->height; i++) { + for (j = 0; j < out->width; j++) { + const int pos = i * dlinesize + j; + if (dpd[pos]) { + dpx[pos] = av_clip(dpd[pos] + dpd[pos] * s->ftint[0], 0, max); + dpy[pos] = av_clip(dpd[pos] + dpd[pos] * s->ftint[1], 0, max); + } + } + } + } else if (s->mode == COLOR) { for (i = 0; i < out->height; i++) { for (j = 0; j < out->width; j++) { if (!dpd[i * dlinesize + j]) { @@ -615,6 +634,8 @@ static void vectorscope8(VectorscopeContext *s, AVFrame *in, AVFrame *out, int p uint8_t *dpx = dst[px]; uint8_t *dpy = dst[py]; uint8_t *dpd = dst[pd]; + uint8_t *dp1 = dst[1]; + uint8_t *dp2 = dst[2]; const int tmin = s->tmin; const int tmax = s->tmax; int i, j, k; @@ -627,42 +648,21 @@ static void vectorscope8(VectorscopeContext *s, AVFrame *in, AVFrame *out, int p switch (s->mode) { case COLOR5: case COLOR: - case GRAY: - if (s->is_yuv) { - for (i = 0; i < h; i++) { - const int iwx = i * slinesizex; - const int iwy = i * slinesizey; - const int iwd = i * slinesized; - for (j = 0; j < w; j++) { - const int x = spx[iwx + j]; - const int y = spy[iwy + j]; - const int z = spd[iwd + j]; - const int pos = y * dlinesize + x; + case TINT: + for (i = 0; i < h; i++) { + const int iwx = i * slinesizex; + const int iwy = i * slinesizey; + const int iwd = i * slinesized; + for (j = 0; j < w; j++) { + const int x = spx[iwx + j]; + const int y = spy[iwy + j]; + const int z = spd[iwd + j]; + const int pos = y * dlinesize + x; - if (z < tmin || z > tmax) - continue; + if (z < tmin || z > tmax) + continue; - dpd[pos] = FFMIN(dpd[pos] + intensity, 255); - } - } - } else { - for (i = 0; i < h; i++) { - const int iwx = i * slinesizex; - const int iwy = i * slinesizey; - const int iwd = i * slinesized; - for (j = 0; j < w; j++) { - const int x = spx[iwx + j]; - const int y = spy[iwy + j]; - const int z = spd[iwd + j]; - const int pos = y * dlinesize + x; - - if (z < tmin || z > tmax) - continue; - - dst[0][pos] = FFMIN(dst[0][pos] + intensity, 255); - dst[1][pos] = FFMIN(dst[1][pos] + intensity, 255); - dst[2][pos] = FFMIN(dst[2][pos] + intensity, 255); - } + dpd[pos] = FFMIN(dpd[pos] + intensity, 255); } } break; @@ -766,7 +766,28 @@ static void vectorscope8(VectorscopeContext *s, AVFrame *in, AVFrame *out, int p } } - if (s->mode == COLOR) { + if (s->mode == TINT && s->is_yuv && + (s->tint[0] != 128 || s->tint[1] != 128)) { + for (i = 0; i < out->height; i++) { + for (j = 0; j < out->width; j++) { + const int pos = i * dlinesize + j; + if (dpd[pos]) { + dp1[pos] = s->tint[0]; + dp2[pos] = s->tint[1]; + } + } + } + } else if (s->mode == TINT && !s->is_yuv) { + for (i = 0; i < out->height; i++) { + for (j = 0; j < out->width; j++) { + const int pos = i * dlinesize + j; + if (dpd[pos]) { + dpx[pos] = av_clip_uint8(dpd[pos] + dpd[pos] * s->ftint[0]); + dpy[pos] = av_clip_uint8(dpd[pos] + dpd[pos] * s->ftint[1]); + } + } + } + } else if (s->mode == COLOR) { for (i = 0; i < out->height; i++) { for (j = 0; j < out->width; j++) { if (!dpd[i * out->linesize[pd] + j]) { @@ -870,6 +891,28 @@ static void draw_dots(uint8_t *dst, int L, int v, float o) dst[-l + 2] = dst[-l + 2] * f + V; } +static void draw_idots(uint8_t *dst, int L, float o) +{ + const float f = 1. - o; + int l = L * 2; + + dst[ l - 3] = dst[ l - 3] * f + (255 - dst[ l - 3]) * o; + dst[ l + 3] = dst[ l + 3] * f + (255 - dst[ l + 3]) * o; + dst[-l - 3] = dst[-l - 3] * f + (255 - dst[-l - 3]) * o; + dst[-l + 3] = dst[-l + 3] * f + (255 - dst[-l + 3]) * o; + + l += L; + + dst[ l - 3] = dst[ l - 3] * f + (255 - dst[ l - 3]) * o; + dst[ l + 3] = dst[ l + 3] * f + (255 - dst[ l + 3]) * o; + dst[ l - 2] = dst[ l - 2] * f + (255 - dst[ l - 2]) * o; + dst[ l + 2] = dst[ l + 2] * f + (255 - dst[ l + 2]) * o; + dst[-l - 3] = dst[-l - 3] * f + (255 - dst[-l - 3]) * o; + dst[-l + 3] = dst[-l + 3] * f + (255 - dst[-l + 3]) * o; + dst[-l - 2] = dst[-l - 2] * f + (255 - dst[-l - 2]) * o; + dst[-l + 2] = dst[-l + 2] * f + (255 - dst[-l + 2]) * o; +} + static void draw_dots16(uint16_t *dst, int L, int v, float o) { const float f = 1. - o; @@ -893,10 +936,83 @@ static void draw_dots16(uint16_t *dst, int L, int v, float o) dst[-l + 2] = dst[-l + 2] * f + V; } +static void draw_idots16(uint16_t *dst, int L, int v, float o) +{ + const float f = 1. - o; + int l = L * 2; + + dst[ l - 3] = dst[ l - 3] * f + (v - dst[ l - 3]) * o; + dst[ l + 3] = dst[ l + 3] * f + (v - dst[ l + 3]) * o; + dst[-l - 3] = dst[-l - 3] * f + (v - dst[-l - 3]) * o; + dst[-l + 3] = dst[-l + 3] * f + (v - dst[-l + 3]) * o; + + l += L; + + dst[ l - 3] = dst[ l - 3] * f + (v - dst[ l - 3]) * o; + dst[ l + 3] = dst[ l + 3] * f + (v - dst[ l + 3]) * o; + dst[ l - 2] = dst[ l - 2] * f + (v - dst[ l - 2]) * o; + dst[ l + 2] = dst[ l + 2] * f + (v - dst[ l + 2]) * o; + dst[-l - 3] = dst[-l - 3] * f + (v - dst[-l - 3]) * o; + dst[-l + 3] = dst[-l + 3] * f + (v - dst[-l + 3]) * o; + dst[-l - 2] = dst[-l - 2] * f + (v - dst[-l - 2]) * o; + dst[-l + 2] = dst[-l + 2] * f + (v - dst[-l + 2]) * o; +} + static void none_graticule(VectorscopeContext *s, AVFrame *out, int X, int Y, int D, int P) { } +static void draw_ihtext(AVFrame *out, int x, int y, float o1, float o2, const char *txt, const uint8_t color[4]) +{ + const uint8_t *font; + int font_height; + int i, plane; + + font = avpriv_cga_font, font_height = 8; + + for (plane = 0; plane < 4 && out->data[plane]; plane++) { + for (i = 0; txt[i]; i++) { + int char_y, mask; + + uint8_t *p = out->data[plane] + y * out->linesize[plane] + (x + i * 8); + for (char_y = font_height - 1; char_y >= 0; char_y--) { + for (mask = 0x80; mask; mask >>= 1) { + if (font[txt[i] * font_height + char_y] & mask) + p[0] = p[0] * o2 + (255 - p[0]) * o1; + p++; + } + p += out->linesize[plane] - 8; + } + } + } +} + +static void draw_ihtext16(AVFrame *out, int x, int y, float o1, float o2, const char *txt, const uint16_t color[4]) +{ + const uint8_t *font; + int font_height; + int i, plane; + + font = avpriv_cga_font, font_height = 8; + + for (plane = 0; plane < 4 && out->data[plane]; plane++) { + for (i = 0; txt[i]; i++) { + int char_y, mask; + int v = color[plane]; + + uint16_t *p = (uint16_t *)(out->data[plane] + y * out->linesize[plane]) + (x + i * 8); + for (char_y = font_height - 1; char_y >= 0; char_y--) { + for (mask = 0x80; mask; mask >>= 1) { + if (font[txt[i] * font_height + char_y] & mask) + p[0] = p[0] * o2 + (v - p[0]) * o1; + p++; + } + p += out->linesize[plane] / 2 - 8; + } + } + } +} + static void draw_htext(AVFrame *out, int x, int y, float o1, float o2, const char *txt, const uint8_t color[4]) { const uint8_t *font; @@ -1201,6 +1317,123 @@ static void green_graticule(VectorscopeContext *s, AVFrame *out, int X, int Y, i } } +static void invert_graticule16(VectorscopeContext *s, AVFrame *out, int X, int Y, int D, int P) +{ + const int max = s->size - 1; + const float o = s->opacity; + int i; + + for (i = 0; i < 12; i++) { + int x = positions[P][i][X]; + int y = positions[P][i][Y]; + + draw_idots16((uint16_t *)(out->data[D] + y * out->linesize[D] + x * 2), out->linesize[D] / 2, max, o); + draw_idots16((uint16_t *)(out->data[X] + y * out->linesize[X] + x * 2), out->linesize[X] / 2, max, o); + draw_idots16((uint16_t *)(out->data[Y] + y * out->linesize[Y] + x * 2), out->linesize[Y] / 2, max, o); + if (out->data[3]) + draw_dots16((uint16_t *)(out->data[3] + y * out->linesize[3] + x * 2), out->linesize[3] / 2, max, o); + } + + if (s->flags & 1) { + int x = positions[P][12][X]; + int y = positions[P][12][Y]; + + draw_idots16((uint16_t *)(out->data[D] + y * out->linesize[D] + x * 2), out->linesize[D] / 2, max, o); + draw_idots16((uint16_t *)(out->data[X] + y * out->linesize[X] + x * 2), out->linesize[X] / 2, max, o); + draw_idots16((uint16_t *)(out->data[Y] + y * out->linesize[Y] + x * 2), out->linesize[Y] / 2, max, o); + if (out->data[3]) + draw_dots16((uint16_t *)(out->data[3] + y * out->linesize[3] + x * 2), out->linesize[3] / 2, max, o); + } + + if (s->flags & 2) { + int x = positions[P][13][X]; + int y = positions[P][13][Y]; + + draw_idots16((uint16_t *)(out->data[D] + y * out->linesize[D] + x * 2), out->linesize[D] / 2, max, o); + draw_idots16((uint16_t *)(out->data[X] + y * out->linesize[X] + x * 2), out->linesize[X] / 2, max, o); + draw_idots16((uint16_t *)(out->data[Y] + y * out->linesize[Y] + x * 2), out->linesize[Y] / 2, max, o); + if (out->data[3]) + draw_dots16((uint16_t *)(out->data[3] + y * out->linesize[3] + x * 2), out->linesize[3] / 2, max, o); + } + + for (i = 0; i < 6 && s->flags & 4; i++) { + uint16_t color[4] = { max, max, max, max }; + int x = positions[P][i][X]; + int y = positions[P][i][Y]; + + if (x > max / 2) + x += 8; + else + x -= 14; + if (y > max / 2) + y += 8; + else + y -= 14; + + x = av_clip(x, 0, out->width - 9); + y = av_clip(y, 0, out->height - 9); + draw_ihtext16(out, x, y, o, 1. - o, positions_name[i], color); + } +} + +static void invert_graticule(VectorscopeContext *s, AVFrame *out, int X, int Y, int D, int P) +{ + const float o = s->opacity; + int i; + + for (i = 0; i < 12; i++) { + int x = positions[P][i][X]; + int y = positions[P][i][Y]; + + draw_idots(out->data[D] + y * out->linesize[D] + x, out->linesize[D], o); + draw_idots(out->data[X] + y * out->linesize[X] + x, out->linesize[X], o); + draw_idots(out->data[Y] + y * out->linesize[Y] + x, out->linesize[Y], o); + if (out->data[3]) + draw_idots(out->data[3] + y * out->linesize[3] + x, out->linesize[3], o); + } + + if (s->flags & 1) { + int x = positions[P][12][X]; + int y = positions[P][12][Y]; + + draw_idots(out->data[D] + y * out->linesize[D] + x, out->linesize[D], o); + draw_idots(out->data[X] + y * out->linesize[X] + x, out->linesize[X], o); + draw_idots(out->data[Y] + y * out->linesize[Y] + x, out->linesize[Y], o); + if (out->data[3]) + draw_idots(out->data[3] + y * out->linesize[3] + x, out->linesize[3], o); + } + + if (s->flags & 2) { + int x = positions[P][13][X]; + int y = positions[P][13][Y]; + + draw_idots(out->data[D] + y * out->linesize[D] + x, out->linesize[D], o); + draw_idots(out->data[X] + y * out->linesize[X] + x, out->linesize[X], o); + draw_idots(out->data[Y] + y * out->linesize[Y] + x, out->linesize[Y], o); + if (out->data[3]) + draw_idots(out->data[3] + y * out->linesize[3] + x, out->linesize[3], o); + } + + for (i = 0; i < 6 && s->flags & 4; i++) { + uint8_t color[4] = { 255, 255, 255, 255 }; + int x = positions[P][i][X]; + int y = positions[P][i][Y]; + + if (x > 128) + x += 8; + else + x -= 14; + if (y > 128) + y += 8; + else + y -= 14; + + x = av_clip(x, 0, out->width - 9); + y = av_clip(y, 0, out->height - 9); + draw_ihtext(out, x, y, o, 1. - o, positions_name[i], color); + } +} + static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; @@ -1262,9 +1495,9 @@ static int config_input(AVFilterLink *inlink) return AVERROR(EINVAL); } - if (s->mode == GRAY && s->is_yuv) + if (s->mode == TINT && s->is_yuv) { s->pd = 0; - else { + } else { if ((s->x == 1 && s->y == 2) || (s->x == 2 && s->y == 1)) s->pd = 0; else if ((s->x == 0 && s->y == 2) || (s->x == 2 && s->y == 0)) @@ -1281,19 +1514,26 @@ static int config_input(AVFilterLink *inlink) s->graticulef = none_graticule; if (s->is_yuv && s->size == 256) { - if (s->graticule == 1) + if (s->graticule == GRAT_GREEN) s->graticulef = green_graticule; - else if (s->graticule == 2) + else if (s->graticule == GRAT_COLOR) s->graticulef = color_graticule; + else if (s->graticule == GRAT_INVERT) + s->graticulef = invert_graticule; } else if (s->is_yuv) { - if (s->graticule == 1) + if (s->graticule == GRAT_GREEN) s->graticulef = green_graticule16; - else if (s->graticule == 2) + else if (s->graticule == GRAT_COLOR) s->graticulef = color_graticule16; + else if (s->graticule == GRAT_INVERT) + s->graticulef = invert_graticule16; } s->bg_color[3] = s->bgopacity * (s->size - 1); + s->tint[0] = .5f * (s->ftint[0] + 1.f) * (s->size - 1); + s->tint[1] = .5f * (s->ftint[1] + 1.f) * (s->size - 1); + switch (inlink->format) { case AV_PIX_FMT_GBRP12: case AV_PIX_FMT_GBRP10: @@ -1306,8 +1546,8 @@ static int config_input(AVFilterLink *inlink) break; default: s->bg_color[0] = 0; - s->bg_color[1] = s->size / 2 - 1; - s->bg_color[2] = s->size / 2 - 1; + s->bg_color[1] = s->size / 2; + s->bg_color[2] = s->size / 2; } s->hsub = desc->log2_chroma_w; diff --git a/libavfilter/vf_vibrance.c b/libavfilter/vf_vibrance.c index aac61c0f10..8e1a55caca 100644 --- a/libavfilter/vf_vibrance.c +++ b/libavfilter/vf_vibrance.c @@ -224,7 +224,7 @@ static const AVFilterPad vibrance_outputs[] = { }; #define OFFSET(x) offsetof(VibranceContext, x) -#define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM +#define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption vibrance_options[] = { { "intensity", "set the intensity value", OFFSET(intensity), AV_OPT_TYPE_FLOAT, {.dbl=0}, -2, 2, VF }, @@ -249,4 +249,5 @@ AVFilter ff_vf_vibrance = { .inputs = vibrance_inputs, .outputs = vibrance_outputs, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS, + .process_command = ff_filter_process_command, }; diff --git a/libavfilter/vf_waveform.c b/libavfilter/vf_waveform.c index a209731ae2..b2c5b46d80 100644 --- a/libavfilter/vf_waveform.c +++ b/libavfilter/vf_waveform.c @@ -112,6 +112,8 @@ typedef struct WaveformContext { GraticuleLines *glines; int nb_glines; int rgb; + float ftint[2]; + int tint[2]; int (*waveform_slice)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs); @@ -179,6 +181,10 @@ static const AVOption waveform_options[] = { { "ire", NULL, 0, AV_OPT_TYPE_CONST, {.i64=IRE}, 0, 0, FLAGS, "scale" }, { "bgopacity", "set background opacity", OFFSET(bgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS }, { "b", "set background opacity", OFFSET(bgopacity), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, FLAGS }, + { "tint0", "set 1st tint", OFFSET(ftint[0]), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS}, + { "t0", "set 1st tint", OFFSET(ftint[0]), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS}, + { "tint1", "set 2nd tint", OFFSET(ftint[1]), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS}, + { "t1", "set 2nd tint", OFFSET(ftint[1]), AV_OPT_TYPE_FLOAT, {.dbl=0}, -1, 1, FLAGS}, { NULL } }; @@ -199,6 +205,7 @@ static const enum AVPixelFormat in_lowpass_pix_fmts[] = { AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV440P12, + AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_NONE }; @@ -216,6 +223,7 @@ static const enum AVPixelFormat in_color_pix_fmts[] = { AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV440P12, + AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_NONE }; @@ -231,6 +239,7 @@ static const enum AVPixelFormat in_flat_pix_fmts[] = { AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV440P12, + AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_NONE }; @@ -270,7 +279,7 @@ static const enum AVPixelFormat out_yuv10_lowpass_pix_fmts[] = { }; static const enum AVPixelFormat out_yuv12_lowpass_pix_fmts[] = { - AV_PIX_FMT_YUV444P12, + AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_NONE }; @@ -678,10 +687,11 @@ static av_always_inline void lowpass16(WaveformContext *s, int jobnr, int nb_jobs) { const int plane = s->desc->comp[component].plane; + const int dplane = (s->rgb || s->display == OVERLAY) ? plane : 0; const int shift_w = s->shift_w[component]; const int shift_h = s->shift_h[component]; const int src_linesize = in->linesize[plane] / 2; - const int dst_linesize = out->linesize[plane] / 2; + const int dst_linesize = out->linesize[dplane] / 2; const int dst_signed_linesize = dst_linesize * (mirror == 1 ? -1 : 1); const int limit = s->max - 1; const int max = limit - intensity; @@ -693,7 +703,7 @@ static av_always_inline void lowpass16(WaveformContext *s, const int slicew_end = column ? (src_w * (jobnr+1)) / nb_jobs : src_w; const int step = column ? 1 << shift_w : 1 << shift_h; const uint16_t *src_data = (const uint16_t *)in->data[plane] + sliceh_start * src_linesize; - uint16_t *dst_data = (uint16_t *)out->data[plane] + (offset_y + sliceh_start * step) * dst_linesize + offset_x; + uint16_t *dst_data = (uint16_t *)out->data[dplane] + (offset_y + sliceh_start * step) * dst_linesize + offset_x; uint16_t * const dst_bottom_line = dst_data + dst_linesize * (s->size - 1); uint16_t * const dst_line = (mirror ? dst_bottom_line : dst_data); const uint16_t *p; @@ -730,6 +740,56 @@ static av_always_inline void lowpass16(WaveformContext *s, src_data += src_linesize; dst_data += dst_linesize * step; } + + if (s->display != OVERLAY && column && !s->rgb) { + const int mult = s->max / 256; + const int bg = s->bg_color[0] * mult; + const int t0 = s->tint[0]; + const int t1 = s->tint[1]; + uint16_t *dst0, *dst1; + const uint16_t *src; + int x; + + src = (const uint16_t *)(out->data[0]) + offset_y * dst_linesize + offset_x; + dst0 = (uint16_t *)(out->data[1]) + offset_y * dst_linesize + offset_x; + dst1 = (uint16_t *)(out->data[2]) + offset_y * dst_linesize + offset_x; + for (y = 0; y < s->max; y++) { + for (x = slicew_start * step; x < slicew_end * step; x++) { + if (src[x] != bg) { + dst0[x] = t0; + dst1[x] = t1; + } + } + + src += dst_linesize; + dst0 += dst_linesize; + dst1 += dst_linesize; + } + } else if (s->display != OVERLAY && !s->rgb) { + const int mult = s->max / 256; + const int bg = s->bg_color[0] * mult; + const int t0 = s->tint[0]; + const int t1 = s->tint[1]; + uint16_t *dst0, *dst1; + const uint16_t *src; + int x; + + src = (const uint16_t *)out->data[0] + (offset_y + sliceh_start * step) * dst_linesize + offset_x; + dst0 = (uint16_t *)(out->data[1]) + (offset_y + sliceh_start * step) * dst_linesize + offset_x; + dst1 = (uint16_t *)(out->data[2]) + (offset_y + sliceh_start * step) * dst_linesize + offset_x; + for (y = sliceh_start * step; y < sliceh_end * step; y++) { + for (x = 0; x < s->max; x++) { + if (src[x] != bg) { + dst0[x] = t0; + dst1[x] = t1; + } + } + + src += dst_linesize; + dst0 += dst_linesize; + dst1 += dst_linesize; + } + } } #define LOWPASS16_FUNC(name, column, mirror) \ @@ -765,10 +825,11 @@ static av_always_inline void lowpass(WaveformContext *s, int jobnr, int nb_jobs) { const int plane = s->desc->comp[component].plane; + const int dplane = (s->rgb || s->display == OVERLAY) ? plane : 0; const int shift_w = s->shift_w[component]; const int shift_h = s->shift_h[component]; const int src_linesize = in->linesize[plane]; - const int dst_linesize = out->linesize[plane]; + const int dst_linesize = out->linesize[dplane]; const int dst_signed_linesize = dst_linesize * (mirror == 1 ? -1 : 1); const int max = 255 - intensity; const int src_h = AV_CEIL_RSHIFT(in->height, shift_h); @@ -779,7 +840,7 @@ static av_always_inline void lowpass(WaveformContext *s, const int slicew_end = column ? (src_w * (jobnr+1)) / nb_jobs : src_w; const int step = column ? 1 << shift_w : 1 << shift_h; const uint8_t *src_data = in->data[plane] + sliceh_start * src_linesize; - uint8_t *dst_data = out->data[plane] + (offset_y + sliceh_start * step) * dst_linesize + offset_x; + uint8_t *dst_data = out->data[dplane] + (offset_y + sliceh_start * step) * dst_linesize + offset_x; uint8_t * const dst_bottom_line = dst_data + dst_linesize * (s->size - 1); uint8_t * const dst_line = (mirror ? dst_bottom_line : dst_data); const uint8_t *p; @@ -794,48 +855,76 @@ static av_always_inline void lowpass(WaveformContext *s, for (p = src_data + slicew_start; p < src_data_end; p++) { uint8_t *target; + int i = 0; + if (column) { - target = dst + dst_signed_linesize * *p; - dst += step; - update(target, max, intensity); + do { + target = dst++ + dst_signed_linesize * *p; + update(target, max, intensity); + } while (++i < step); } else { uint8_t *row = dst_data; - if (mirror) - target = row - *p - 1; - else - target = row + *p; - update(target, max, intensity); - row += dst_linesize; + do { + if (mirror) + target = row - *p - 1; + else + target = row + *p; + update(target, max, intensity); + row += dst_linesize; + } while (++i < step); } } src_data += src_linesize; dst_data += dst_linesize * step; } - if (column && step > 1) { + if (s->display != OVERLAY && column && !s->rgb) { + const int bg = s->bg_color[0]; const int dst_h = 256; - uint8_t *dst; - int x, z; + const int t0 = s->tint[0]; + const int t1 = s->tint[1]; + uint8_t *dst0, *dst1; + const uint8_t *src; + int x; - dst = out->data[plane] + offset_y * dst_linesize + offset_x; + src = out->data[0] + offset_y * dst_linesize + offset_x; + dst0 = out->data[1] + offset_y * dst_linesize + offset_x; + dst1 = out->data[2] + offset_y * dst_linesize + offset_x; for (y = 0; y < dst_h; y++) { - for (x = slicew_start * step; x < slicew_end * step; x+=step) { - for (z = 1; z < step; z++) { - dst[x + z] = dst[x]; + for (x = slicew_start * step; x < slicew_end * step; x++) { + if (src[x] != bg) { + dst0[x] = t0; + dst1[x] = t1; } } - dst += dst_linesize; - } - } else if (step > 1) { - const int dst_w = 256; - uint8_t *dst; - int z; - dst = out->data[plane] + (offset_y + sliceh_start * step) * dst_linesize + offset_x; - for (y = sliceh_start * step; y < sliceh_end * step; y+=step) { - for (z = 1; z < step; z++) - memcpy(dst + dst_linesize * z, dst, dst_w); - dst += dst_linesize * step; + src += dst_linesize; + dst0 += dst_linesize; + dst1 += dst_linesize; + } + } else if (s->display != OVERLAY && !s->rgb) { + const int bg = s->bg_color[0]; + const int dst_w = 256; + const int t0 = s->tint[0]; + const int t1 = s->tint[1]; + uint8_t *dst0, *dst1; + const uint8_t *src; + int x; + + src = out->data[0] + (offset_y + sliceh_start * step) * dst_linesize + offset_x; + dst0 = out->data[1] + (offset_y + sliceh_start * step) * dst_linesize + offset_x; + dst1 = out->data[2] + (offset_y + sliceh_start * step) * dst_linesize + offset_x; + for (y = sliceh_start * step; y < sliceh_end * step; y++) { + for (x = 0; x < dst_w; x++) { + if (src[x] != bg) { + dst0[x] = t0; + dst1[x] = t1; + } + } + + src += dst_linesize; + dst0 += dst_linesize; + dst1 += dst_linesize; } } } @@ -3198,6 +3287,9 @@ static int config_input(AVFilterLink *inlink) s->size = s->size << (s->bits - 8); + s->tint[0] = .5f * (s->ftint[0] + 1.f) * (s->size - 1); + s->tint[1] = .5f * (s->ftint[1] + 1.f) * (s->size - 1); + switch (inlink->format) { case AV_PIX_FMT_GBRAP: case AV_PIX_FMT_GBRP: @@ -3334,10 +3426,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) td.offset_x = offset_x; ctx->internal->execute(ctx, s->waveform_slice, &td, NULL, ff_filter_get_nb_threads(ctx)); switch (s->filter) { + case LOWPASS: + if (s->bits <= 8) + envelope(s, out, plane, s->rgb || s->display == OVERLAY ? plane : 0, s->mode ? offset_x : offset_y); + else + envelope16(s, out, plane, s->rgb || s->display == OVERLAY ? plane : 0, s->mode ? offset_x : offset_y); + break; case ACOLOR: case CHROMA: case COLOR: - case LOWPASS: if (s->bits <= 8) envelope(s, out, plane, plane, s->mode ? offset_x : offset_y); else diff --git a/libavfilter/vf_yadif.c b/libavfilter/vf_yadif.c index 0b34c5045b..43dea67add 100644 --- a/libavfilter/vf_yadif.c +++ b/libavfilter/vf_yadif.c @@ -265,42 +265,19 @@ static av_cold void uninit(AVFilterContext *ctx) static int query_formats(AVFilterContext *ctx) { static const enum AVPixelFormat pix_fmts[] = { - AV_PIX_FMT_YUV420P, - AV_PIX_FMT_YUV422P, - AV_PIX_FMT_YUV444P, - AV_PIX_FMT_YUV410P, - AV_PIX_FMT_YUV411P, - AV_PIX_FMT_GRAY8, - AV_PIX_FMT_YUVJ420P, - AV_PIX_FMT_YUVJ422P, - AV_PIX_FMT_YUVJ444P, - AV_PIX_FMT_GRAY16, - AV_PIX_FMT_YUV440P, + AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, + AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV440P, + AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16, + AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, - AV_PIX_FMT_YUV420P9, - AV_PIX_FMT_YUV422P9, - AV_PIX_FMT_YUV444P9, - AV_PIX_FMT_YUV420P10, - AV_PIX_FMT_YUV422P10, - AV_PIX_FMT_YUV444P10, - AV_PIX_FMT_YUV420P12, - AV_PIX_FMT_YUV422P12, - AV_PIX_FMT_YUV444P12, - AV_PIX_FMT_YUV420P14, - AV_PIX_FMT_YUV422P14, - AV_PIX_FMT_YUV444P14, - AV_PIX_FMT_YUV420P16, - AV_PIX_FMT_YUV422P16, - AV_PIX_FMT_YUV444P16, - AV_PIX_FMT_YUVA420P, - AV_PIX_FMT_YUVA422P, - AV_PIX_FMT_YUVA444P, - AV_PIX_FMT_GBRP, - AV_PIX_FMT_GBRP9, - AV_PIX_FMT_GBRP10, - AV_PIX_FMT_GBRP12, - AV_PIX_FMT_GBRP14, - AV_PIX_FMT_GBRP16, + AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, + AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, + AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, + AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14, + AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, + AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, + AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, + AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16, AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE }; diff --git a/libavfilter/vf_zscale.c b/libavfilter/vf_zscale.c index db2dd17756..46c5dd7f38 100644 --- a/libavfilter/vf_zscale.c +++ b/libavfilter/vf_zscale.c @@ -738,12 +738,13 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar #define OFFSET(x) offsetof(ZScaleContext, x) #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM +#define TFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM static const AVOption zscale_options[] = { - { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS }, - { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS }, - { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS }, - { "height", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS }, + { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS }, + { "width", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS }, + { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS }, + { "height", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = TFLAGS }, { "size", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, { "s", "set video size", OFFSET(size_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS }, { "dither", "set dither type", OFFSET(dither), AV_OPT_TYPE_INT, {.i64 = 0}, 0, ZIMG_DITHER_ERROR_DIFFUSION, FLAGS, "dither" }, diff --git a/libavfilter/vsrc_cellauto.c b/libavfilter/vsrc_cellauto.c index c75460334e..6fd812c54f 100644 --- a/libavfilter/vsrc_cellauto.c +++ b/libavfilter/vsrc_cellauto.c @@ -199,7 +199,7 @@ static av_cold int init(AVFilterContext *ctx) } av_log(ctx, AV_LOG_VERBOSE, - "s:%dx%d r:%d/%d rule:%d stitch:%d scroll:%d full:%d seed:%"PRIu32"\n", + "s:%dx%d r:%d/%d rule:%d stitch:%d scroll:%d full:%d seed:%"PRId64"\n", s->w, s->h, s->frame_rate.num, s->frame_rate.den, s->rule, s->stitch, s->scroll, s->start_full, s->random_seed); diff --git a/libavfilter/vsrc_life.c b/libavfilter/vsrc_life.c index 4c0ea73d8a..5932c2395b 100644 --- a/libavfilter/vsrc_life.c +++ b/libavfilter/vsrc_life.c @@ -260,7 +260,7 @@ static av_cold int init(AVFilterContext *ctx) } av_log(ctx, AV_LOG_VERBOSE, - "s:%dx%d r:%d/%d rule:%s stay_rule:%d born_rule:%d stitch:%d seed:%"PRIu32"\n", + "s:%dx%d r:%d/%d rule:%s stay_rule:%d born_rule:%d stitch:%d seed:%"PRId64"\n", life->w, life->h, life->frame_rate.num, life->frame_rate.den, life->rule_str, life->stay_rule, life->born_rule, life->stitch, life->random_seed); diff --git a/libavfilter/x86/vf_interlace.asm b/libavfilter/x86/vf_interlace.asm index a6c65b805d..f4a405c754 100644 --- a/libavfilter/x86/vf_interlace.asm +++ b/libavfilter/x86/vf_interlace.asm @@ -49,7 +49,7 @@ SECTION .text pxor m2, m6, [srcq+hq] pavg%1 m0, m2 pxor m0, m6 - mova [dstq+hq], m0 + movu [dstq+hq], m0 add hq, mmsize jge .end @@ -66,8 +66,8 @@ SECTION .text pavg%1 m1, m3 pxor m0, m6 pxor m1, m6 - mova [dstq+hq], m0 - mova [dstq+hq+mmsize], m1 + movu [dstq+hq], m0 + movu [dstq+hq+mmsize], m1 add hq, 2*mmsize jl .loop @@ -140,7 +140,7 @@ cglobal lowpass_line_complex, 5, 5, 8, dst, h, src, mref, pref pand m0, m6 pandn m6, m1 por m0, m6 - mova [dstq], m0 + movu [dstq], m0 add dstq, mmsize add srcq, mmsize @@ -201,8 +201,8 @@ cglobal lowpass_line_complex_12, 5, 5, 8, 16, dst, h, src, mref, pref, clip_max pandn m7, m3 por m0, m6 por m1, m7 - mova [dstq], m0 - mova [dstq+mmsize], m1 + movu [dstq], m0 + movu [dstq+mmsize], m1 add dstq, 2*mmsize add srcq, 2*mmsize diff --git a/libavformat/4xm.c b/libavformat/4xm.c index a6101a92ec..aea9226984 100644 --- a/libavformat/4xm.c +++ b/libavformat/4xm.c @@ -322,8 +322,10 @@ static int fourxm_read_packet(AVFormatContext *s, case cfr2_TAG: /* allocate 8 more bytes than 'size' to account for fourcc * and size */ - if (size + 8 < size || av_new_packet(pkt, size + 8)) - return AVERROR(EIO); + if (size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE - 8) + return AVERROR_INVALIDDATA; + if ((ret = av_new_packet(pkt, size + 8)) < 0) + return ret; pkt->stream_index = fourxm->video_stream_index; pkt->pts = fourxm->video_pts; pkt->pos = avio_tell(s->pb); @@ -347,7 +349,7 @@ static int fourxm_read_packet(AVFormatContext *s, fourxm->tracks[track_number].channels > 0) { ret = av_get_packet(s->pb, pkt, size); if (ret < 0) - return AVERROR(EIO); + return ret; pkt->stream_index = fourxm->tracks[track_number].stream_index; pkt->pts = fourxm->tracks[track_number].audio_pts; diff --git a/libavformat/adxdec.c b/libavformat/adxdec.c index 1038a0d67e..f80b4b80f0 100644 --- a/libavformat/adxdec.c +++ b/libavformat/adxdec.c @@ -83,7 +83,7 @@ static int adx_read_header(AVFormatContext *s) { ADXDemuxerContext *c = s->priv_data; AVCodecParameters *par; - + int ret; AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); @@ -94,8 +94,8 @@ static int adx_read_header(AVFormatContext *s) c->header_size = avio_rb16(s->pb) + 4; avio_seek(s->pb, -4, SEEK_CUR); - if (ff_get_extradata(s, par, s->pb, c->header_size) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_get_extradata(s, par, s->pb, c->header_size)) < 0) + return ret; if (par->extradata_size < 12) { av_log(s, AV_LOG_ERROR, "Invalid extradata size.\n"); diff --git a/libavformat/afc.c b/libavformat/afc.c index 542cb168fc..2da04eb5dc 100644 --- a/libavformat/afc.c +++ b/libavformat/afc.c @@ -31,6 +31,7 @@ static int afc_read_header(AVFormatContext *s) { AFCDemuxContext *c = s->priv_data; AVStream *st; + int ret; st = avformat_new_stream(s, NULL); if (!st) @@ -40,8 +41,8 @@ static int afc_read_header(AVFormatContext *s) st->codecpar->channels = 2; st->codecpar->channel_layout = AV_CH_LAYOUT_STEREO; - if (ff_alloc_extradata(st->codecpar, 1)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, 1)) < 0) + return ret; st->codecpar->extradata[0] = 8 * st->codecpar->channels; c->data_end = avio_rb32(s->pb) + 32LL; diff --git a/libavformat/aiffdec.c b/libavformat/aiffdec.c index a42987c15f..cb2f1b60fb 100644 --- a/libavformat/aiffdec.c +++ b/libavformat/aiffdec.c @@ -301,8 +301,8 @@ static int aiff_read_header(AVFormatContext *s) case MKTAG('w', 'a', 'v', 'e'): if ((uint64_t)size > (1<<30)) return -1; - if (ff_get_extradata(s, st->codecpar, pb, size) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_get_extradata(s, st->codecpar, pb, size)) < 0) + return ret; if ( (st->codecpar->codec_id == AV_CODEC_ID_QDMC || st->codecpar->codec_id == AV_CODEC_ID_QDM2) && size>=12*4 && !st->codecpar->block_align) { st->codecpar->block_align = AV_RB32(st->codecpar->extradata+11*4); @@ -325,8 +325,8 @@ static int aiff_read_header(AVFormatContext *s) } break; case MKTAG('C','H','A','N'): - if(ff_mov_read_chan(s, pb, st, size) < 0) - return AVERROR_INVALIDDATA; + if ((ret = ff_mov_read_chan(s, pb, st, size)) < 0) + return ret; break; case MKTAG('A','P','C','M'): /* XA ADPCM compressed sound chunk */ st->codecpar->codec_id = AV_CODEC_ID_ADPCM_XA; diff --git a/libavformat/aiffenc.c b/libavformat/aiffenc.c index e25794d185..0145596bec 100644 --- a/libavformat/aiffenc.c +++ b/libavformat/aiffenc.c @@ -199,9 +199,6 @@ static int aiff_write_header(AVFormatContext *s) avpriv_set_pts_info(s->streams[aiff->audio_stream_idx], 64, 1, s->streams[aiff->audio_stream_idx]->codecpar->sample_rate); - /* Data is starting here */ - avio_flush(pb); - return 0; } @@ -266,8 +263,6 @@ static int aiff_write_trailer(AVFormatContext *s) file_size = avio_tell(pb); avio_seek(pb, aiff->form, SEEK_SET); avio_wb32(pb, file_size - aiff->form - 4); - - avio_flush(pb); } return ret; diff --git a/libavformat/amr.c b/libavformat/amr.c index 42840a50a3..650b565b1b 100644 --- a/libavformat/amr.c +++ b/libavformat/amr.c @@ -60,7 +60,6 @@ static int amr_write_header(AVFormatContext *s) } else { return -1; } - avio_flush(pb); return 0; } diff --git a/libavformat/apc.c b/libavformat/apc.c index 835d1b0f6e..7210bfbb56 100644 --- a/libavformat/apc.c +++ b/libavformat/apc.c @@ -37,6 +37,7 @@ static int apc_read_header(AVFormatContext *s) { AVIOContext *pb = s->pb; AVStream *st; + int ret; avio_rl32(pb); /* CRYO */ avio_rl32(pb); /* _APC */ @@ -53,8 +54,8 @@ static int apc_read_header(AVFormatContext *s) st->codecpar->sample_rate = avio_rl32(pb); /* initial predictor values for adpcm decoder */ - if (ff_get_extradata(s, st->codecpar, pb, 2 * 4) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_get_extradata(s, st->codecpar, pb, 2 * 4)) < 0) + return ret; if (avio_rl32(pb)) { st->codecpar->channels = 2; @@ -78,7 +79,6 @@ static int apc_read_packet(AVFormatContext *s, AVPacket *pkt) { if (av_get_packet(s->pb, pkt, MAX_READ_SIZE) <= 0) return AVERROR(EIO); - pkt->flags &= ~AV_PKT_FLAG_CORRUPT; pkt->stream_index = 0; return 0; } diff --git a/libavformat/ape.c b/libavformat/ape.c index 977e6f3d18..e31a00dc96 100644 --- a/libavformat/ape.c +++ b/libavformat/ape.c @@ -163,7 +163,7 @@ static int ape_read_header(AVFormatContext * s) APEContext *ape = s->priv_data; AVStream *st; uint32_t tag; - int i; + int i, ret; int total_blocks, final_size = 0; int64_t pts, file_size; @@ -358,8 +358,8 @@ static int ape_read_header(AVFormatContext * s) st->duration = total_blocks; avpriv_set_pts_info(st, 64, 1, ape->samplerate); - if (ff_alloc_extradata(st->codecpar, APE_EXTRADATA_SIZE)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, APE_EXTRADATA_SIZE)) < 0) + return ret; AV_WL16(st->codecpar->extradata + 0, ape->fileversion); AV_WL16(st->codecpar->extradata + 2, ape->compressiontype); AV_WL16(st->codecpar->extradata + 4, ape->formatflags); @@ -386,14 +386,16 @@ static int ape_read_packet(AVFormatContext * s, AVPacket * pkt) int nblocks; APEContext *ape = s->priv_data; uint32_t extra_size = 8; + int64_t ret64; if (avio_feof(s->pb)) return AVERROR_EOF; if (ape->currentframe >= ape->totalframes) return AVERROR_EOF; - if (avio_seek(s->pb, ape->frames[ape->currentframe].pos, SEEK_SET) < 0) - return AVERROR(EIO); + ret64 = avio_seek(s->pb, ape->frames[ape->currentframe].pos, SEEK_SET); + if (ret64 < 0) + return ret64; /* Calculate how many blocks there are in this frame */ if (ape->currentframe == (ape->totalframes - 1)) @@ -409,8 +411,9 @@ static int ape_read_packet(AVFormatContext * s, AVPacket * pkt) return AVERROR(EIO); } - if (av_new_packet(pkt, ape->frames[ape->currentframe].size + extra_size) < 0) - return AVERROR(ENOMEM); + ret = av_new_packet(pkt, ape->frames[ape->currentframe].size + extra_size); + if (ret < 0) + return ret; AV_WL32(pkt->data , nblocks); AV_WL32(pkt->data + 4, ape->frames[ape->currentframe].skip); @@ -447,12 +450,13 @@ static int ape_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp AVStream *st = s->streams[stream_index]; APEContext *ape = s->priv_data; int index = av_index_search_timestamp(st, timestamp, flags); + int64_t ret; if (index < 0) return -1; - if (avio_seek(s->pb, st->index_entries[index].pos, SEEK_SET) < 0) - return -1; + if ((ret = avio_seek(s->pb, st->index_entries[index].pos, SEEK_SET)) < 0) + return ret; ape->currentframe = index; return 0; } diff --git a/libavformat/apetag.c b/libavformat/apetag.c index 4e19f49bf1..454c6c688b 100644 --- a/libavformat/apetag.c +++ b/libavformat/apetag.c @@ -96,8 +96,8 @@ static int ape_tag_read_field(AVFormatContext *s) st->attached_pic.stream_index = st->index; st->attached_pic.flags |= AV_PKT_FLAG_KEY; } else { - if (ff_get_extradata(s, st->codecpar, s->pb, size) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_get_extradata(s, st->codecpar, s->pb, size)) < 0) + return ret; st->codecpar->codec_type = AVMEDIA_TYPE_ATTACHMENT; } } else { diff --git a/libavformat/apngdec.c b/libavformat/apngdec.c index c8db9c6e1f..0f1d04a365 100644 --- a/libavformat/apngdec.c +++ b/libavformat/apngdec.c @@ -127,13 +127,14 @@ static int append_extradata(AVCodecParameters *par, AVIOContext *pb, int len) int new_size, ret; uint8_t *new_extradata; - if (previous_size > INT_MAX - len) + if (previous_size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE - len) return AVERROR_INVALIDDATA; new_size = previous_size + len; new_extradata = av_realloc(par->extradata, new_size + AV_INPUT_BUFFER_PADDING_SIZE); if (!new_extradata) return AVERROR(ENOMEM); + memset(new_extradata + new_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); par->extradata = new_extradata; par->extradata_size = new_size; @@ -177,10 +178,9 @@ static int apng_read_header(AVFormatContext *s) return ret; /* extradata will contain every chunk up to the first fcTL (excluded) */ - st->codecpar->extradata = av_malloc(len + 12 + AV_INPUT_BUFFER_PADDING_SIZE); - if (!st->codecpar->extradata) - return AVERROR(ENOMEM); - st->codecpar->extradata_size = len + 12; + ret = ff_alloc_extradata(st->codecpar, len + 12); + if (ret < 0) + return ret; AV_WB32(st->codecpar->extradata, len); AV_WL32(st->codecpar->extradata+4, tag); AV_WB32(st->codecpar->extradata+8, st->codecpar->width); @@ -241,10 +241,6 @@ static int apng_read_header(AVFormatContext *s) } fail: - if (st->codecpar->extradata_size) { - av_freep(&st->codecpar->extradata); - st->codecpar->extradata_size = 0; - } return ret; } diff --git a/libavformat/asfenc.c b/libavformat/asfenc.c index 9f54173bf9..73afb13200 100644 --- a/libavformat/asfenc.c +++ b/libavformat/asfenc.c @@ -357,12 +357,12 @@ static int asf_write_markers(AVFormatContext *s) int64_t pres_time = av_rescale_q(c->start, c->time_base, scale); uint64_t offset; int32_t send_time = get_send_time(asf, pres_time, &offset); - int len = 0; + int len = 0, ret; uint8_t *buf; AVIOContext *dyn_buf; if (t) { - if (avio_open_dyn_buf(&dyn_buf) < 0) - return AVERROR(ENOMEM); + if ((ret = avio_open_dyn_buf(&dyn_buf)) < 0) + return ret; avio_put_str16le(dyn_buf, t->value); len = avio_close_dyn_buf(dyn_buf, &buf); } @@ -579,12 +579,12 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size, /* title and other info */ if (has_title) { - int len; + int len, ret; uint8_t *buf; AVIOContext *dyn_buf; - if (avio_open_dyn_buf(&dyn_buf) < 0) - return AVERROR(ENOMEM); + if ((ret = avio_open_dyn_buf(&dyn_buf)) < 0) + return ret; hpos = put_header(pb, &ff_asf_comment_header); @@ -714,10 +714,10 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size, if (desc) { AVIOContext *dyn_buf; uint8_t *buf; - int len; + int len, ret; - if (avio_open_dyn_buf(&dyn_buf) < 0) - return AVERROR(ENOMEM); + if ((ret = avio_open_dyn_buf(&dyn_buf)) < 0) + return ret; avio_put_str16le(dyn_buf, desc); len = avio_close_dyn_buf(dyn_buf, &buf); @@ -801,8 +801,6 @@ static int asf_write_header(AVFormatContext *s) return -1; } - avio_flush(s->pb); - asf->packet_nb_payloads = 0; asf->packet_timestamp_start = -1; asf->packet_timestamp_end = -1; @@ -894,7 +892,8 @@ static void flush_packet(AVFormatContext *s) avio_write(s->pb, asf->packet_buf, s->packet_size - packet_hdr_size); - avio_flush(s->pb); + avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT); + asf->nb_packets++; asf->packet_nb_payloads = 0; asf->packet_timestamp_start = -1; @@ -1132,7 +1131,6 @@ static int asf_write_trailer(AVFormatContext *s) return ret; asf_write_index(s, asf->index_ptr, asf->maximum_packet, asf->next_start_sec); } - avio_flush(s->pb); if (asf->is_streamed || !(s->pb->seekable & AVIO_SEEKABLE_NORMAL)) { put_chunk(s, 0x4524, 0, 0); /* end of stream */ diff --git a/libavformat/assenc.c b/libavformat/assenc.c index 12aadca171..68c3396e5a 100644 --- a/libavformat/assenc.c +++ b/libavformat/assenc.c @@ -77,7 +77,6 @@ static int write_header(AVFormatContext *s) avio_printf(s->pb, "[Events]\r\nFormat: %s, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\r\n", ass->ssa_mode ? "Marked" : "Layer"); } - avio_flush(s->pb); return 0; } diff --git a/libavformat/astenc.c b/libavformat/astenc.c index 578e658891..e0b94b8b63 100644 --- a/libavformat/astenc.c +++ b/libavformat/astenc.c @@ -101,8 +101,6 @@ static int ast_write_header(AVFormatContext *s) avio_wb64(pb, 0); avio_wb32(pb, 0); - avio_flush(pb); - return 0; } @@ -180,7 +178,6 @@ static int ast_write_trailer(AVFormatContext *s) } avio_seek(pb, file_size, SEEK_SET); - avio_flush(pb); } return 0; } diff --git a/libavformat/au.c b/libavformat/au.c index cb48e67feb..4afee85a94 100644 --- a/libavformat/au.c +++ b/libavformat/au.c @@ -311,7 +311,6 @@ static int au_write_header(AVFormatContext *s) } else { avio_wb64(pb, 0); /* annotation field */ } - avio_flush(pb); return 0; } @@ -327,7 +326,6 @@ static int au_write_trailer(AVFormatContext *s) avio_seek(pb, 8, SEEK_SET); avio_wb32(pb, (uint32_t)(file_size - au->header_size)); avio_seek(pb, file_size, SEEK_SET); - avio_flush(pb); } return 0; diff --git a/libavformat/av1.c b/libavformat/av1.c index 132f4e987b..5ad9222900 100644 --- a/libavformat/av1.c +++ b/libavformat/av1.c @@ -257,7 +257,7 @@ static int parse_sequence_header(AV1SequenceParameters *seq_params, const uint8_ if (!reduced_still_picture_header) { int enable_order_hint, seq_force_screen_content_tools; - skip_bits(&gb, 4); // enable_intraintra_compound (1), enable_masked_compound (1) + skip_bits(&gb, 4); // enable_interintra_compound (1), enable_masked_compound (1) // enable_warped_motion (1), enable_dual_filter (1) enable_order_hint = get_bits1(&gb); diff --git a/libavformat/avformat.h b/libavformat/avformat.h index d4d9a3b06e..9b9b634ec3 100644 --- a/libavformat/avformat.h +++ b/libavformat/avformat.h @@ -715,8 +715,7 @@ typedef struct AVInputFormat { * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a * background thread). * @return 0 on success, < 0 on error. - * When returning an error, pkt must not have been allocated - * or must be freed before returning + * Upon returning an error, pkt must be unreferenced by the caller. */ int (*read_packet)(struct AVFormatContext *, AVPacket *pkt); diff --git a/libavformat/avidec.c b/libavformat/avidec.c index 5cf67a4771..ae0c227bb9 100644 --- a/libavformat/avidec.c +++ b/libavformat/avidec.c @@ -770,10 +770,11 @@ FF_ENABLE_DEPRECATION_WARNINGS st->codecpar->extradata_size = size - 10 * 4; if (st->codecpar->extradata) { av_log(s, AV_LOG_WARNING, "New extradata in strf chunk, freeing previous one.\n"); - av_freep(&st->codecpar->extradata); } - if (ff_get_extradata(s, st->codecpar, pb, st->codecpar->extradata_size) < 0) - return AVERROR(ENOMEM); + ret = ff_get_extradata(s, st->codecpar, pb, + st->codecpar->extradata_size); + if (ret < 0) + return ret; } // FIXME: check if the encoder really did this correctly @@ -930,10 +931,9 @@ FF_ENABLE_DEPRECATION_WARNINGS if (size<(1<<30)) { if (st->codecpar->extradata) { av_log(s, AV_LOG_WARNING, "New extradata in strd chunk, freeing previous one.\n"); - av_freep(&st->codecpar->extradata); } - if (ff_get_extradata(s, st->codecpar, pb, size) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_get_extradata(s, st->codecpar, pb, size)) < 0) + return ret; } if (st->codecpar->extradata_size & 1) //FIXME check if the encoder really did this correctly diff --git a/libavformat/avienc.c b/libavformat/avienc.c index 940ea33510..d6cfb1b783 100644 --- a/libavformat/avienc.c +++ b/libavformat/avienc.c @@ -580,8 +580,6 @@ static int avi_write_header(AVFormatContext *s) avi->movi_list = ff_start_tag(pb, "LIST"); ffio_wfourcc(pb, "movi"); - avio_flush(pb); - return 0; } @@ -593,7 +591,6 @@ static void update_odml_entry(AVFormatContext *s, int stream_index, int64_t ix, int64_t pos; int au_byterate, au_ssize, au_scale; - avio_flush(pb); pos = avio_tell(pb); /* Updating one entry in the AVI OpenDML master index */ diff --git a/libavformat/aviobuf.c b/libavformat/aviobuf.c index 70e1d2ca10..384c3a4cb6 100644 --- a/libavformat/aviobuf.c +++ b/libavformat/aviobuf.c @@ -1197,6 +1197,8 @@ int ffio_open_whitelist(AVIOContext **s, const char *filename, int flags, URLContext *h; int err; + *s = NULL; + err = ffurl_open_whitelist(&h, filename, flags, int_cb, options, whitelist, blacklist, NULL); if (err < 0) return err; diff --git a/libavformat/avisynth.c b/libavformat/avisynth.c index 5dfe94ae0c..55a2efd884 100644 --- a/libavformat/avisynth.c +++ b/libavformat/avisynth.c @@ -640,7 +640,7 @@ static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, AVS_VideoFrame *frame; unsigned char *dst_p; const unsigned char *src_p; - int n, i, plane, rowsize, planeheight, pitch, bits; + int n, i, plane, rowsize, planeheight, pitch, bits, ret; const char *error; int avsplus av_unused; @@ -676,8 +676,8 @@ static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, if (!pkt->size) return AVERROR_UNKNOWN; - if (av_new_packet(pkt, pkt->size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, pkt->size)) < 0) + return ret; pkt->pts = n; pkt->dts = n; @@ -739,7 +739,7 @@ static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt, { AviSynthContext *avs = s->priv_data; AVRational fps, samplerate; - int samples; + int samples, ret; int64_t n; const char *error; @@ -782,8 +782,8 @@ static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt, if (!pkt->size) return AVERROR_UNKNOWN; - if (av_new_packet(pkt, pkt->size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, pkt->size)) < 0) + return ret; pkt->pts = n; pkt->dts = n; diff --git a/libavformat/avs.c b/libavformat/avs.c index 47fa41017d..3f8780d42d 100644 --- a/libavformat/avs.c +++ b/libavformat/avs.c @@ -224,11 +224,6 @@ static int avs_read_packet(AVFormatContext * s, AVPacket * pkt) } } -static int avs_read_close(AVFormatContext * s) -{ - return 0; -} - AVInputFormat ff_avs_demuxer = { .name = "avs", .long_name = NULL_IF_CONFIG_SMALL("Argonaut Games Creature Shock"), @@ -236,5 +231,4 @@ AVInputFormat ff_avs_demuxer = { .read_probe = avs_probe, .read_header = avs_read_header, .read_packet = avs_read_packet, - .read_close = avs_read_close, }; diff --git a/libavformat/bink.c b/libavformat/bink.c index 631b8c4d7d..a1422e3f35 100644 --- a/libavformat/bink.c +++ b/libavformat/bink.c @@ -150,8 +150,8 @@ static int read_header(AVFormatContext *s) vst->codecpar->codec_id = AV_CODEC_ID_NONE; } - if (ff_get_extradata(s, vst->codecpar, pb, 4) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_get_extradata(s, vst->codecpar, pb, 4)) < 0) + return ret; bink->num_audio_tracks = avio_rl32(pb); @@ -190,8 +190,8 @@ static int read_header(AVFormatContext *s) ast->codecpar->channels = 1; ast->codecpar->channel_layout = AV_CH_LAYOUT_MONO; } - if (ff_alloc_extradata(ast->codecpar, 4)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(ast->codecpar, 4)) < 0) + return ret; AV_WL32(ast->codecpar->extradata, vst->codecpar->codec_tag); } @@ -302,13 +302,15 @@ static int read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, in { BinkDemuxContext *bink = s->priv_data; AVStream *vst = s->streams[0]; + int64_t ret; if (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL)) return -1; /* seek to the first frame */ - if (avio_seek(s->pb, vst->index_entries[0].pos + bink->smush_size, SEEK_SET) < 0) - return -1; + ret = avio_seek(s->pb, vst->index_entries[0].pos + bink->smush_size, SEEK_SET); + if (ret < 0) + return ret; bink->video_pts = 0; memset(bink->audio_pts, 0, sizeof(bink->audio_pts)); diff --git a/libavformat/bintext.c b/libavformat/bintext.c index 61b89f56d6..7dab5f377d 100644 --- a/libavformat/bintext.c +++ b/libavformat/bintext.c @@ -177,14 +177,14 @@ static int bintext_read_header(AVFormatContext *s) { BinDemuxContext *bin = s->priv_data; AVIOContext *pb = s->pb; - + int ret; AVStream *st = init_stream(s); if (!st) return AVERROR(ENOMEM); st->codecpar->codec_id = AV_CODEC_ID_BINTEXT; - if (ff_alloc_extradata(st->codecpar, 2)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, 2)) < 0) + return ret; st->codecpar->extradata[0] = 16; st->codecpar->extradata[1] = 0; @@ -222,7 +222,7 @@ static int xbin_read_header(AVFormatContext *s) BinDemuxContext *bin = s->priv_data; AVIOContext *pb = s->pb; char fontheight, flags; - + int ret; AVStream *st = init_stream(s); if (!st) return AVERROR(ENOMEM); @@ -241,8 +241,9 @@ static int xbin_read_header(AVFormatContext *s) st->codecpar->extradata_size += fontheight * (flags & 0x10 ? 512 : 256); st->codecpar->codec_id = flags & 4 ? AV_CODEC_ID_XBIN : AV_CODEC_ID_BINTEXT; - if (ff_alloc_extradata(st->codecpar, st->codecpar->extradata_size)) - return AVERROR(ENOMEM); + ret = ff_alloc_extradata(st->codecpar, st->codecpar->extradata_size); + if (ret < 0) + return ret; st->codecpar->extradata[0] = fontheight; st->codecpar->extradata[1] = flags; if (avio_read(pb, st->codecpar->extradata + 2, st->codecpar->extradata_size - 2) < 0) @@ -264,6 +265,7 @@ static int adf_read_header(AVFormatContext *s) BinDemuxContext *bin = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; + int ret; if (avio_r8(pb) != 1) return AVERROR_INVALIDDATA; @@ -273,8 +275,8 @@ static int adf_read_header(AVFormatContext *s) return AVERROR(ENOMEM); st->codecpar->codec_id = AV_CODEC_ID_BINTEXT; - if (ff_alloc_extradata(st->codecpar, 2 + 48 + 4096)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, 2 + 48 + 4096)) < 0) + return ret; st->codecpar->extradata[0] = 16; st->codecpar->extradata[1] = BINTEXT_PALETTE|BINTEXT_FONT; @@ -318,7 +320,7 @@ static int idf_read_header(AVFormatContext *s) BinDemuxContext *bin = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; - int got_width = 0; + int got_width = 0, ret; if (!(pb->seekable & AVIO_SEEKABLE_NORMAL)) return AVERROR(EIO); @@ -328,8 +330,8 @@ static int idf_read_header(AVFormatContext *s) return AVERROR(ENOMEM); st->codecpar->codec_id = AV_CODEC_ID_IDF; - if (ff_alloc_extradata(st->codecpar, 2 + 48 + 4096)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, 2 + 48 + 4096)) < 0) + return ret; st->codecpar->extradata[0] = 16; st->codecpar->extradata[1] = BINTEXT_PALETTE|BINTEXT_FONT; diff --git a/libavformat/bit.c b/libavformat/bit.c index 0aacfc7c38..2dc7d4f3f7 100644 --- a/libavformat/bit.c +++ b/libavformat/bit.c @@ -94,8 +94,8 @@ static int read_packet(AVFormatContext *s, if(ret != 8 * packet_size * sizeof(uint16_t)) return AVERROR(EIO); - if (av_new_packet(pkt, packet_size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, packet_size)) < 0) + return ret; init_put_bits(&pbo, pkt->data, packet_size); for(j=0; j < packet_size; j++) diff --git a/libavformat/bmv.c b/libavformat/bmv.c index ac567c21eb..9f03fba058 100644 --- a/libavformat/bmv.c +++ b/libavformat/bmv.c @@ -96,8 +96,8 @@ static int bmv_read_packet(AVFormatContext *s, AVPacket *pkt) audio_size, c->size); return AVERROR_INVALIDDATA; } - if (av_new_packet(pkt, audio_size) < 0) - return AVERROR(ENOMEM); + if ((err = av_new_packet(pkt, audio_size)) < 0) + return err; memcpy(pkt->data, c->packet + 1, pkt->size); pkt->stream_index = 1; pkt->pts = c->audio_pos; @@ -108,8 +108,8 @@ static int bmv_read_packet(AVFormatContext *s, AVPacket *pkt) } else break; } - if (av_new_packet(pkt, c->size + 1) < 0) - return AVERROR(ENOMEM); + if ((err = av_new_packet(pkt, c->size + 1)) < 0) + return err; pkt->stream_index = 0; c->get_next = 1; memcpy(pkt->data, c->packet, pkt->size); diff --git a/libavformat/brstm.c b/libavformat/brstm.c index e8a1eaa022..1470690731 100644 --- a/libavformat/brstm.c +++ b/libavformat/brstm.c @@ -403,8 +403,8 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) (32 + 4 + size) > (INT_MAX / par->channels) || (32 + 4 + size) * par->channels > INT_MAX - 8) return AVERROR_INVALIDDATA; - if (av_new_packet(pkt, 8 + (32 + 4 + size) * par->channels) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, 8 + (32 + 4 + size) * par->channels)) < 0) + return ret; dst = pkt->data; if (par->codec_id == AV_CODEC_ID_ADPCM_THP_LE) { bytestream_put_le32(&dst, size * par->channels); diff --git a/libavformat/cafdec.c b/libavformat/cafdec.c index 86228595c9..d0f942f3e4 100644 --- a/libavformat/cafdec.c +++ b/libavformat/cafdec.c @@ -100,6 +100,7 @@ static int read_kuki_chunk(AVFormatContext *s, int64_t size) { AVIOContext *pb = s->pb; AVStream *st = s->streams[0]; + int ret; if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) return -1; @@ -134,9 +135,8 @@ static int read_kuki_chunk(AVFormatContext *s, int64_t size) return AVERROR_INVALIDDATA; } - av_freep(&st->codecpar->extradata); - if (ff_alloc_extradata(st->codecpar, ALAC_HEADER)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, ALAC_HEADER)) < 0) + return ret; /* For the old style cookie, we skip 12 bytes, then read 36 bytes. * The new style cookie only contains the last 24 bytes of what was @@ -174,10 +174,8 @@ static int read_kuki_chunk(AVFormatContext *s, int64_t size) return AVERROR_PATCHWELCOME; } avio_skip(pb, size); - } else { - av_freep(&st->codecpar->extradata); - if (ff_get_extradata(s, st->codecpar, pb, size) < 0) - return AVERROR(ENOMEM); + } else if ((ret = ff_get_extradata(s, st->codecpar, pb, size)) < 0) { + return ret; } return 0; diff --git a/libavformat/cafenc.c b/libavformat/cafenc.c index 0f7c4ebbb3..98d4d9212f 100644 --- a/libavformat/cafenc.c +++ b/libavformat/cafenc.c @@ -203,7 +203,6 @@ static int caf_write_header(AVFormatContext *s) avio_wb64(pb, -1); //< mChunkSize avio_wb32(pb, 0); //< mEditCount - avio_flush(pb); return 0; } @@ -259,7 +258,6 @@ static int caf_write_trailer(AVFormatContext *s) avio_write(pb, caf->pkt_sizes, caf->size_entries_used); caf->size_buffer_size = 0; } - avio_flush(pb); } av_freep(&caf->pkt_sizes); return 0; diff --git a/libavformat/cdxl.c b/libavformat/cdxl.c index e675b2c8f1..31f7cb823a 100644 --- a/libavformat/cdxl.c +++ b/libavformat/cdxl.c @@ -202,8 +202,8 @@ static int cdxl_read_packet(AVFormatContext *s, AVPacket *pkt) avpriv_set_pts_info(st, 64, 1, cdxl->sample_rate); } - if (av_new_packet(pkt, video_size + CDXL_HEADER_SIZE) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, video_size + CDXL_HEADER_SIZE)) < 0) + return ret; memcpy(pkt->data, cdxl->header, CDXL_HEADER_SIZE); ret = avio_read(pb, pkt->data + CDXL_HEADER_SIZE, video_size); if (ret < 0) { diff --git a/libavformat/concatdec.c b/libavformat/concatdec.c index b80294efbf..e826821241 100644 --- a/libavformat/concatdec.c +++ b/libavformat/concatdec.c @@ -171,10 +171,6 @@ static int copy_stream_props(AVStream *st, AVStream *source_st) if (st->codecpar->codec_id || !source_st->codecpar->codec_id) { if (st->codecpar->extradata_size < source_st->codecpar->extradata_size) { - if (st->codecpar->extradata) { - av_freep(&st->codecpar->extradata); - st->codecpar->extradata_size = 0; - } ret = ff_alloc_extradata(st->codecpar, source_st->codecpar->extradata_size); if (ret < 0) diff --git a/libavformat/dashdec.c b/libavformat/dashdec.c index 72ba9605f0..15e79fd51a 100644 --- a/libavformat/dashdec.c +++ b/libavformat/dashdec.c @@ -1851,7 +1851,7 @@ static int save_avio_options(AVFormatContext *s) { DASHContext *c = s->priv_data; const char *opts[] = { - "headers", "user_agent", "cookies", "http_proxy", "referer", "rw_timeout", NULL }; + "headers", "user_agent", "cookies", "http_proxy", "referer", "rw_timeout", "icy", NULL }; const char **opt = opts; uint8_t *buf = NULL; int ret = 0; diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c index 8c28fb6b6e..3d8515d9ca 100644 --- a/libavformat/dashenc.c +++ b/libavformat/dashenc.c @@ -31,6 +31,7 @@ #include "libavutil/intreadwrite.h" #include "libavutil/mathematics.h" #include "libavutil/opt.h" +#include "libavutil/parseutils.h" #include "libavutil/rational.h" #include "libavutil/time.h" #include "libavutil/time_internal.h" @@ -57,6 +58,17 @@ typedef enum { SEGMENT_TYPE_NB } SegmentType; +enum { + FRAG_TYPE_NONE = 0, + FRAG_TYPE_EVERY_FRAME, + FRAG_TYPE_DURATION, + FRAG_TYPE_PFRAMES, + FRAG_TYPE_NB +}; + +#define MPD_PROFILE_DASH 1 +#define MPD_PROFILE_DVB 2 + typedef struct Segment { char file[1024]; int64_t start_pos; @@ -70,24 +82,37 @@ typedef struct Segment { typedef struct AdaptationSet { char id[10]; char *descriptor; + int64_t seg_duration; + int64_t frag_duration; + int frag_type; enum AVMediaType media_type; AVDictionary *metadata; AVRational min_frame_rate, max_frame_rate; int ambiguous_frame_rate; + int64_t max_frag_duration; + int max_width, max_height; + int nb_streams; + AVRational par; } AdaptationSet; typedef struct OutputStream { AVFormatContext *ctx; int ctx_inited, as_idx; AVIOContext *out; + AVCodecParserContext *parser; + AVCodecContext *parser_avctx; int packets_written; char initfile[1024]; int64_t init_start_pos, pos; int init_range_length; int nb_segments, segments_size, segment_index; + int64_t seg_duration; + int64_t frag_duration; + int64_t last_duration; Segment **segments; int64_t first_pts, start_pts, max_pts; int64_t last_dts, last_pts; + int last_flags; int bit_rate; SegmentType segment_type; /* segment type selected for this particular stream */ const char *format_name; @@ -102,8 +127,14 @@ typedef struct OutputStream { char full_path[1024]; char temp_path[1024]; double availability_time_offset; + int64_t producer_reference_time; + char producer_reference_time_str[100]; int total_pkt_size; + int64_t total_pkt_duration; int muxer_overhead; + int frag_type; + int64_t gop_size; + AVRational sar; } OutputStream; typedef struct DASHContext { @@ -117,6 +148,7 @@ typedef struct DASHContext { int min_seg_duration; #endif int64_t seg_duration; + int64_t frag_duration; int remove_at_exit; int use_template; int use_timeline; @@ -127,6 +159,7 @@ typedef struct DASHContext { int64_t total_duration; char availability_start_time[100]; time_t start_time_s; + int64_t presentation_time_offset; char dirname[1024]; const char *single_file_name; /* file names as specified in options */ const char *init_seg_name; @@ -134,6 +167,7 @@ typedef struct DASHContext { const char *utc_timing_url; const char *method; const char *user_agent; + char *http_opts; int hls_playlist; int http_persistent; int master_playlist_created; @@ -142,14 +176,21 @@ typedef struct DASHContext { int streaming; int64_t timeout; int index_correction; - char *format_options_str; + AVDictionary *format_options; int global_sidx; SegmentType segment_type_option; /* segment type as specified in options */ int ignore_io_errors; int lhls; + int ldash; int master_publish_rate; int nr_of_streams_to_flush; int nr_of_streams_flushed; + int frag_type; + int write_prft; + int64_t max_gop_size; + int profile; + int64_t target_latency; + int target_latency_refid; } DASHContext; static struct codec_string { @@ -438,6 +479,8 @@ static void set_http_options(AVDictionary **options, DASHContext *c) { if (c->method) av_dict_set(options, "method", c->method, 0); + if (c->http_opts) + av_dict_parse_string(options, c->http_opts, "=", ",", 0); if (c->user_agent) av_dict_set(options, "user_agent", c->user_agent, 0); if (c->http_persistent) @@ -589,6 +632,8 @@ static void dash_free(AVFormatContext *s) } ff_format_io_close(s, &os->out); avformat_free_context(os->ctx); + avcodec_free_context(&os->parser_avctx); + av_parser_close(os->parser); for (j = 0; j < os->nb_segments; j++) av_free(os->segments[j]); av_free(os->segments); @@ -613,12 +658,19 @@ static void output_segment_list(OutputStream *os, AVIOContext *out, AVFormatCont int timescale = c->use_timeline ? os->ctx->streams[0]->time_base.den : AV_TIME_BASE; avio_printf(out, "\t\t\t\tuse_timeline) { - avio_printf(out, "duration=\"%"PRId64"\" ", c->seg_duration); + avio_printf(out, "duration=\"%"PRId64"\" ", os->seg_duration); if (c->streaming && os->availability_time_offset) avio_printf(out, "availabilityTimeOffset=\"%.3f\" ", os->availability_time_offset); } - avio_printf(out, "initialization=\"%s\" media=\"%s\" startNumber=\"%d\">\n", os->init_seg_name, os->media_seg_name, c->use_timeline ? start_number : 1); + if (c->ldash && !final && os->frag_type != FRAG_TYPE_NONE && + (os->frag_type != FRAG_TYPE_DURATION || os->frag_duration != os->seg_duration)) + avio_printf(out, "availabilityTimeComplete=\"false\" "); + + avio_printf(out, "initialization=\"%s\" media=\"%s\" startNumber=\"%d\"", os->init_seg_name, os->media_seg_name, c->use_timeline ? start_number : 1); + if (c->presentation_time_offset) + avio_printf(out, " presentationTimeOffset=\"%"PRId64"\"", c->presentation_time_offset); + avio_printf(out, ">\n"); if (c->use_timeline) { int64_t cur_time = 0; avio_printf(out, "\t\t\t\t\t\n"); @@ -646,7 +698,7 @@ static void output_segment_list(OutputStream *os, AVIOContext *out, AVFormatCont avio_printf(out, "\t\t\t\t\n"); } else if (c->single_file) { avio_printf(out, "\t\t\t\t%s\n", os->initfile); - avio_printf(out, "\t\t\t\t\n", AV_TIME_BASE, c->last_duration, start_number); + avio_printf(out, "\t\t\t\t\n", AV_TIME_BASE, FFMIN(os->seg_duration, os->last_duration), start_number); avio_printf(out, "\t\t\t\t\t\n", os->init_start_pos, os->init_start_pos + os->init_range_length - 1); for (i = start_index; i < os->nb_segments; i++) { Segment *seg = os->segments[i]; @@ -657,7 +709,7 @@ static void output_segment_list(OutputStream *os, AVIOContext *out, AVFormatCont } avio_printf(out, "\t\t\t\t\n"); } else { - avio_printf(out, "\t\t\t\t\n", AV_TIME_BASE, c->last_duration, start_number); + avio_printf(out, "\t\t\t\t\n", AV_TIME_BASE, FFMIN(os->seg_duration, os->last_duration), start_number); avio_printf(out, "\t\t\t\t\t\n", os->initfile); for (i = start_index; i < os->nb_segments; i++) { Segment *seg = os->segments[i]; @@ -727,10 +779,9 @@ static void write_time(AVIOContext *out, int64_t time) avio_printf(out, "%d.%dS", seconds, fractions / (AV_TIME_BASE / 10)); } -static void format_date_now(char *buf, int size) +static void format_date(char *buf, int size, int64_t time_us) { struct tm *ptm, tmbuf; - int64_t time_us = av_gettime(); int64_t time_ms = time_us / 1000; const time_t time_s = time_ms / 1000; int millisec = time_ms - (time_s * 1000); @@ -758,17 +809,26 @@ static int write_adaptation_set(AVFormatContext *s, AVIOContext *out, int as_ind as->id, as->media_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio"); if (as->media_type == AVMEDIA_TYPE_VIDEO && as->max_frame_rate.num && !as->ambiguous_frame_rate && av_cmp_q(as->min_frame_rate, as->max_frame_rate) < 0) avio_printf(out, " maxFrameRate=\"%d/%d\"", as->max_frame_rate.num, as->max_frame_rate.den); + else if (as->media_type == AVMEDIA_TYPE_VIDEO && as->max_frame_rate.num && !as->ambiguous_frame_rate && !av_cmp_q(as->min_frame_rate, as->max_frame_rate)) + avio_printf(out, " frameRate=\"%d/%d\"", as->max_frame_rate.num, as->max_frame_rate.den); + if (as->media_type == AVMEDIA_TYPE_VIDEO) { + avio_printf(out, " maxWidth=\"%d\" maxHeight=\"%d\"", as->max_width, as->max_height); + avio_printf(out, " par=\"%d:%d\"", as->par.num, as->par.den); + } lang = av_dict_get(as->metadata, "language", NULL, 0); if (lang) avio_printf(out, " lang=\"%s\"", lang->value); avio_printf(out, ">\n"); + if (!final && c->ldash && as->max_frag_duration) + avio_printf(out, "\t\t\t\n", as->max_frag_duration); role = av_dict_get(as->metadata, "role", NULL, 0); if (role) avio_printf(out, "\t\t\t\n", role->value); if (as->descriptor) avio_printf(out, "\t\t\t%s\n", as->descriptor); for (i = 0; i < s->nb_streams; i++) { + AVStream *st = s->streams[i]; OutputStream *os = &c->streams[i]; char bandwidth_str[64] = {'\0'}; @@ -780,10 +840,14 @@ static int write_adaptation_set(AVFormatContext *s, AVIOContext *out, int as_ind os->bit_rate); if (as->media_type == AVMEDIA_TYPE_VIDEO) { - AVStream *st = s->streams[i]; avio_printf(out, "\t\t\tformat_name, os->codec_str, bandwidth_str, s->streams[i]->codecpar->width, s->streams[i]->codecpar->height); - if (st->avg_frame_rate.num) + if (st->codecpar->field_order == AV_FIELD_UNKNOWN) + avio_printf(out, " scanType=\"unknown\""); + else if (st->codecpar->field_order != AV_FIELD_PROGRESSIVE) + avio_printf(out, " scanType=\"interlaced\""); + avio_printf(out, " sar=\"%d:%d\"", os->sar.num, os->sar.den); + if (st->avg_frame_rate.num && av_cmp_q(as->min_frame_rate, as->max_frame_rate) < 0) avio_printf(out, " frameRate=\"%d/%d\"", st->avg_frame_rate.num, st->avg_frame_rate.den); avio_printf(out, ">\n"); } else { @@ -792,6 +856,15 @@ static int write_adaptation_set(AVFormatContext *s, AVIOContext *out, int as_ind avio_printf(out, "\t\t\t\t\n", s->streams[i]->codecpar->channels); } + if (!final && c->write_prft && os->producer_reference_time_str[0]) { + avio_printf(out, "\t\t\t\t\n", + i, os->producer_reference_time_str, c->presentation_time_offset); + avio_printf(out, "\t\t\t\t\t\n", c->utc_timing_url); + avio_printf(out, "\t\t\t\t\n"); + } + if (!final && c->ldash && os->gop_size && os->frag_type != FRAG_TYPE_NONE && + (os->frag_type != FRAG_TYPE_DURATION || os->frag_duration != os->seg_duration)) + avio_printf(out, "\t\t\t\t\n", os->gop_size); output_segment_list(os, out, s, i, final); avio_printf(out, "\t\t\t\n"); } @@ -803,8 +876,13 @@ static int write_adaptation_set(AVFormatContext *s, AVIOContext *out, int as_ind static int add_adaptation_set(AVFormatContext *s, AdaptationSet **as, enum AVMediaType type) { DASHContext *c = s->priv_data; + void *mem; - void *mem = av_realloc(c->as, sizeof(*c->as) * (c->nb_as + 1)); + if (c->profile & MPD_PROFILE_DVB && (c->nb_as + 1) > 16) { + av_log(s, AV_LOG_ERROR, "DVB-DASH profile allows a max of 16 Adaptation Sets\n"); + return AVERROR(EINVAL); + } + mem = av_realloc(c->as, sizeof(*c->as) * (c->nb_as + 1)); if (!mem) return AVERROR(ENOMEM); c->as = mem; @@ -813,6 +891,7 @@ static int add_adaptation_set(AVFormatContext *s, AdaptationSet **as, enum AVMed *as = &c->as[c->nb_as - 1]; memset(*as, 0, sizeof(**as)); (*as)->media_type = type; + (*as)->frag_type = -1; return 0; } @@ -830,7 +909,12 @@ static int adaptation_set_add_stream(AVFormatContext *s, int as_idx, int i) av_log(s, AV_LOG_ERROR, "Stream %d is already assigned to an AdaptationSet\n", i); return AVERROR(EINVAL); } + if (c->profile & MPD_PROFILE_DVB && (as->nb_streams + 1) > 16) { + av_log(s, AV_LOG_ERROR, "DVB-DASH profile allows a max of 16 Representations per Adaptation Set\n"); + return AVERROR(EINVAL); + } os->as_idx = as_idx; + ++as->nb_streams; return 0; } @@ -839,7 +923,7 @@ static int parse_adaptation_sets(AVFormatContext *s) { DASHContext *c = s->priv_data; const char *p = c->adaptation_sets; - enum { new_set, parse_id, parsing_streams, parse_descriptor } state; + enum { new_set, parse_default, parsing_streams, parse_seg_duration, parse_frag_duration } state; AdaptationSet *as; int i, n, ret; @@ -851,14 +935,18 @@ static int parse_adaptation_sets(AVFormatContext *s) snprintf(as->id, sizeof(as->id), "%d", i); c->streams[i].as_idx = c->nb_as; + ++as->nb_streams; } goto end; } // syntax id=0,streams=0,1,2 id=1,streams=3,4 and so on // option id=0,descriptor=descriptor_str,streams=0,1,2 and so on + // option id=0,seg_duration=2.5,frag_duration=0.5,streams=0,1,2 and so on // descriptor is useful to the scheme defined by ISO/IEC 23009-1:2014/Amd.2:2015 // descriptor_str should be a self-closing xml tag. + // seg_duration and frag_duration have the same syntax as the global options of + // the same name, and the former have precedence over them if set. state = new_set; while (*p) { if (*p == ' ') { @@ -875,8 +963,55 @@ static int parse_adaptation_sets(AVFormatContext *s) p += n; if (*p) p++; - state = parse_id; - } else if (state == parse_id && av_strstart(p, "descriptor=", &p)) { + state = parse_default; + } else if (state != new_set && av_strstart(p, "seg_duration=", &p)) { + state = parse_seg_duration; + } else if (state != new_set && av_strstart(p, "frag_duration=", &p)) { + state = parse_frag_duration; + } else if (state == parse_seg_duration || state == parse_frag_duration) { + char str[32]; + int64_t usecs = 0; + + n = strcspn(p, ","); + snprintf(str, sizeof(str), "%.*s", n, p); + p += n; + if (*p) + p++; + + ret = av_parse_time(&usecs, str, 1); + if (ret < 0) { + av_log(s, AV_LOG_ERROR, "Unable to parse option value \"%s\" as duration\n", str); + return ret; + } + + if (state == parse_seg_duration) + as->seg_duration = usecs; + else + as->frag_duration = usecs; + state = parse_default; + } else if (state != new_set && av_strstart(p, "frag_type=", &p)) { + char type_str[16]; + + n = strcspn(p, ","); + snprintf(type_str, sizeof(type_str), "%.*s", n, p); + p += n; + if (*p) + p++; + + if (!strcmp(type_str, "duration")) + as->frag_type = FRAG_TYPE_DURATION; + else if (!strcmp(type_str, "pframes")) + as->frag_type = FRAG_TYPE_PFRAMES; + else if (!strcmp(type_str, "every_frame")) + as->frag_type = FRAG_TYPE_EVERY_FRAME; + else if (!strcmp(type_str, "none")) + as->frag_type = FRAG_TYPE_NONE; + else { + av_log(s, AV_LOG_ERROR, "Unable to parse option value \"%s\" as fragment type\n", type_str); + return ret; + } + state = parse_default; + } else if (state != new_set && av_strstart(p, "descriptor=", &p)) { n = strcspn(p, ">") + 1; //followed by one comma, so plus 1 if (n < strlen(p)) { as->descriptor = av_strndup(p, n); @@ -887,8 +1022,8 @@ static int parse_adaptation_sets(AVFormatContext *s) p += n; if (*p) p++; - state = parse_descriptor; - } else if ((state == parse_id || state == parse_descriptor) && av_strstart(p, "streams=", &p)) { //descriptor is optional + state = parse_default; + } else if ((state != new_set) && av_strstart(p, "streams=", &p)) { //descriptor and durations are optional state = parsing_streams; } else if (state == parsing_streams) { AdaptationSet *as = &c->as[c->nb_as - 1]; @@ -977,8 +1112,13 @@ static int write_manifest(AVFormatContext *s, int final) "\txmlns=\"urn:mpeg:dash:schema:mpd:2011\"\n" "\txmlns:xlink=\"http://www.w3.org/1999/xlink\"\n" "\txsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011 http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-DASH_schema_files/DASH-MPD.xsd\"\n" - "\tprofiles=\"urn:mpeg:dash:profile:isoff-live:2011\"\n" - "\ttype=\"%s\"\n", final ? "static" : "dynamic"); + "\tprofiles=\""); + if (c->profile & MPD_PROFILE_DASH) + avio_printf(out, "%s%s", "urn:mpeg:dash:profile:isoff-live:2011", c->profile & MPD_PROFILE_DVB ? "," : "\"\n"); + if (c->profile & MPD_PROFILE_DVB) + avio_printf(out, "%s", "urn:dvb:dash:profile:dvb-dash:2014\"\n"); + avio_printf(out, "\ttype=\"%s\"\n", + final ? "static" : "dynamic"); if (final) { avio_printf(out, "\tmediaPresentationDuration=\""); write_time(out, c->total_duration); @@ -989,10 +1129,11 @@ static int write_manifest(AVFormatContext *s, int final) if (c->use_template && !c->use_timeline) update_period = 500; avio_printf(out, "\tminimumUpdatePeriod=\"PT%"PRId64"S\"\n", update_period); - avio_printf(out, "\tsuggestedPresentationDelay=\"PT%"PRId64"S\"\n", c->last_duration / AV_TIME_BASE); + if (!c->ldash) + avio_printf(out, "\tsuggestedPresentationDelay=\"PT%"PRId64"S\"\n", c->last_duration / AV_TIME_BASE); if (c->availability_start_time[0]) avio_printf(out, "\tavailabilityStartTime=\"%s\"\n", c->availability_start_time); - format_date_now(now_str, sizeof(now_str)); + format_date(now_str, sizeof(now_str), av_gettime()); if (now_str[0]) avio_printf(out, "\tpublishTime=\"%s\"\n", now_str); if (c->window_size && c->use_template) { @@ -1002,7 +1143,7 @@ static int write_manifest(AVFormatContext *s, int final) } } avio_printf(out, "\tminBufferTime=\""); - write_time(out, c->last_duration * 2); + write_time(out, c->ldash && c->max_gop_size ? c->max_gop_size : c->last_duration * 2); avio_printf(out, "\">\n"); avio_printf(out, "\t\n"); if (title) { @@ -1011,6 +1152,14 @@ static int write_manifest(AVFormatContext *s, int final) av_free(escaped); } avio_printf(out, "\t\n"); + if (!final && c->target_latency && c->target_latency_refid >= 0) { + avio_printf(out, "\t\n"); + avio_printf(out, "\t\ttarget_latency / 1000); + if (s->nb_streams > 1) + avio_printf(out, " referenceId=\"%d\"", c->target_latency_refid); + avio_printf(out, "/>\n"); + avio_printf(out, "\t\n"); + } if (c->window_size && s->nb_streams > 0 && c->streams[0].nb_segments > 0 && !c->use_template) { OutputStream *os = &c->streams[0]; @@ -1148,6 +1297,10 @@ static int dash_init(AVFormatContext *s) if (c->single_file) c->use_template = 0; + if (!c->profile) { + av_log(s, AV_LOG_ERROR, "At least one profile must be enabled.\n"); + return AVERROR(EINVAL); + } #if FF_API_DASH_MIN_SEG_DURATION if (c->min_seg_duration != 5000000) { av_log(s, AV_LOG_WARNING, "The min_seg_duration option is deprecated and will be removed. Please use the -seg_duration\n"); @@ -1170,6 +1323,16 @@ static int dash_init(AVFormatContext *s) c->lhls = 0; } + if (c->ldash && !c->streaming) { + av_log(s, AV_LOG_WARNING, "LDash option will be ignored as streaming is not enabled\n"); + c->ldash = 0; + } + + if (c->target_latency && !c->streaming) { + av_log(s, AV_LOG_WARNING, "Target latency option will be ignored as streaming is not enabled\n"); + c->target_latency = 0; + } + if (c->global_sidx && !c->single_file) { av_log(s, AV_LOG_WARNING, "Global SIDX option will be ignored as single_file is not enabled\n"); c->global_sidx = 0; @@ -1179,6 +1342,25 @@ static int dash_init(AVFormatContext *s) av_log(s, AV_LOG_WARNING, "Global SIDX option will be ignored as streaming is enabled\n"); c->global_sidx = 0; } + if (c->frag_type == FRAG_TYPE_NONE && c->streaming) { + av_log(s, AV_LOG_VERBOSE, "Changing frag_type from none to every_frame as streaming is enabled\n"); + c->frag_type = FRAG_TYPE_EVERY_FRAME; + } + + if (c->write_prft && !c->utc_timing_url) { + av_log(s, AV_LOG_WARNING, "Producer Reference Time element option will be ignored as utc_timing_url is not set\n"); + c->write_prft = 0; + } + + if (c->write_prft && !c->streaming) { + av_log(s, AV_LOG_WARNING, "Producer Reference Time element option will be ignored as streaming is not enabled\n"); + c->write_prft = 0; + } + + if (c->target_latency && !c->write_prft) { + av_log(s, AV_LOG_WARNING, "Target latency option will be ignored as Producer Reference Time element will not be written\n"); + c->target_latency = 0; + } av_strlcpy(c->dirname, s->url, sizeof(c->dirname)); ptr = strrchr(c->dirname, '/'); @@ -1225,10 +1407,6 @@ static int dash_init(AVFormatContext *s) dict_copy_entry(&as->metadata, s->streams[i]->metadata, "language"); dict_copy_entry(&as->metadata, s->streams[i]->metadata, "role"); - ctx = avformat_alloc_context(); - if (!ctx) - return AVERROR(ENOMEM); - if (c->init_seg_name) { os->init_seg_name = av_strireplace(c->init_seg_name, "$ext$", os->extension_name); if (!os->init_seg_name) @@ -1261,10 +1439,13 @@ static int dash_init(AVFormatContext *s) } } + os->ctx = ctx = avformat_alloc_context(); + if (!ctx) + return AVERROR(ENOMEM); + ctx->oformat = av_guess_format(os->format_name, NULL, NULL); if (!ctx->oformat) return AVERROR_MUXER_NOT_FOUND; - os->ctx = ctx; ctx->interrupt_callback = s->interrupt_callback; ctx->opaque = s->opaque; ctx->io_close = s->io_close; @@ -1280,6 +1461,18 @@ static int dash_init(AVFormatContext *s) ctx->avoid_negative_ts = s->avoid_negative_ts; ctx->flags = s->flags; + os->parser = av_parser_init(st->codecpar->codec_id); + if (os->parser) { + os->parser_avctx = avcodec_alloc_context3(NULL); + if (!os->parser_avctx) + return AVERROR(ENOMEM); + ret = avcodec_parameters_to_context(os->parser_avctx, st->codecpar); + if (ret < 0) + return ret; + // We only want to parse frame headers + os->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; + } + if (c->single_file) { if (os->single_file_name) ff_dash_fill_tmpl_params(os->initfile, sizeof(os->initfile), os->single_file_name, i, 0, os->bit_rate, 0); @@ -1303,24 +1496,55 @@ static int dash_init(AVFormatContext *s) return ret; os->init_start_pos = 0; - if (c->format_options_str) { - ret = av_dict_parse_string(&opts, c->format_options_str, "=", ":", 0); - if (ret < 0) - return ret; + av_dict_copy(&opts, c->format_options, 0); + if (!as->seg_duration) + as->seg_duration = c->seg_duration; + if (!as->frag_duration) + as->frag_duration = c->frag_duration; + if (as->frag_type < 0) + as->frag_type = c->frag_type; + os->seg_duration = as->seg_duration; + os->frag_duration = as->frag_duration; + os->frag_type = as->frag_type; + + if (c->profile & MPD_PROFILE_DVB && (os->seg_duration > 15000000 || os->seg_duration < 960000)) { + av_log(s, AV_LOG_ERROR, "Segment duration %"PRId64" is outside the allowed range for DVB-DASH profile\n", os->seg_duration); + return AVERROR(EINVAL); + } + + if (os->frag_type == FRAG_TYPE_DURATION && !os->frag_duration) { + av_log(s, AV_LOG_WARNING, "frag_type set to duration for stream %d but no frag_duration set\n", i); + os->frag_type = c->streaming ? FRAG_TYPE_EVERY_FRAME : FRAG_TYPE_NONE; + } + if (os->frag_type == FRAG_TYPE_DURATION && os->frag_duration > os->seg_duration) { + av_log(s, AV_LOG_ERROR, "Fragment duration %"PRId64" is longer than Segment duration %"PRId64"\n", os->frag_duration, os->seg_duration); + return AVERROR(EINVAL); + } + if (os->frag_type == FRAG_TYPE_PFRAMES && (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO || !os->parser)) { + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && !os->parser) + av_log(s, AV_LOG_WARNING, "frag_type set to P-Frame reordering, but no parser found for stream %d\n", i); + os->frag_type = c->streaming ? FRAG_TYPE_EVERY_FRAME : FRAG_TYPE_NONE; } if (os->segment_type == SEGMENT_TYPE_MP4) { if (c->streaming) - // frag_every_frame : Allows lower latency streaming // skip_sidx : Reduce bitrate overhead // skip_trailer : Avoids growing memory usage with time - av_dict_set(&opts, "movflags", "frag_every_frame+dash+delay_moov+skip_sidx+skip_trailer", 0); + av_dict_set(&opts, "movflags", "+dash+delay_moov+skip_sidx+skip_trailer", AV_DICT_APPEND); else { if (c->global_sidx) - av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov+global_sidx+skip_trailer", 0); + av_dict_set(&opts, "movflags", "+dash+delay_moov+global_sidx+skip_trailer", AV_DICT_APPEND); else - av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov+skip_trailer", 0); + av_dict_set(&opts, "movflags", "+dash+delay_moov+skip_trailer", AV_DICT_APPEND); } + if (os->frag_type == FRAG_TYPE_EVERY_FRAME) + av_dict_set(&opts, "movflags", "+frag_every_frame", AV_DICT_APPEND); + else + av_dict_set(&opts, "movflags", "+frag_custom", AV_DICT_APPEND); + if (os->frag_type == FRAG_TYPE_DURATION) + av_dict_set_int(&opts, "frag_duration", os->frag_duration, 0); + if (c->write_prft) + av_dict_set(&opts, "write_prft", "wallclock", 0); } else { av_dict_set_int(&opts, "cluster_time_limit", c->seg_duration / 1000, 0); av_dict_set_int(&opts, "cluster_size_limit", 5 * 1024 * 1024, 0); // set a large cluster size limit @@ -1344,6 +1568,7 @@ static int dash_init(AVFormatContext *s) s->avoid_negative_ts = ctx->avoid_negative_ts; if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { AVRational avg_frame_rate = s->streams[i]->avg_frame_rate; + AVRational par; if (avg_frame_rate.num > 0) { if (av_cmp_q(avg_frame_rate, as->min_frame_rate) < 0) as->min_frame_rate = avg_frame_rate; @@ -1352,6 +1577,27 @@ static int dash_init(AVFormatContext *s) } else { as->ambiguous_frame_rate = 1; } + + if (st->codecpar->width > as->max_width) + as->max_width = st->codecpar->width; + if (st->codecpar->height > as->max_height) + as->max_height = st->codecpar->height; + + if (st->sample_aspect_ratio.num) + os->sar = st->sample_aspect_ratio; + else + os->sar = (AVRational){1,1}; + av_reduce(&par.num, &par.den, + st->codecpar->width * (int64_t)os->sar.num, + st->codecpar->height * (int64_t)os->sar.den, + 1024 * 1024); + + if (as->par.num && av_cmp_q(par, as->par)) { + av_log(s, AV_LOG_ERROR, "Conflicting stream par values in Adaptation Set %d\n", os->as_idx); + return AVERROR(EINVAL); + } + as->par = par; + c->has_video = 1; } @@ -1370,8 +1616,11 @@ static int dash_init(AVFormatContext *s) av_log(s, AV_LOG_WARNING, "no video stream and no seg duration set\n"); return AVERROR(EINVAL); } + if (!c->has_video && c->frag_type == FRAG_TYPE_PFRAMES) + av_log(s, AV_LOG_WARNING, "no video stream and P-frame fragmentation set\n"); c->nr_of_streams_flushed = 0; + c->target_latency_refid = -1; return 0; } @@ -1581,7 +1830,7 @@ static int dash_flush(AVFormatContext *s, int final, int stream) c->streams[stream].first_pts, s->streams[stream]->time_base, AV_TIME_BASE_Q); - next_exp_index = (pts_diff / c->seg_duration) + 1; + next_exp_index = (pts_diff / c->streams[stream].seg_duration) + 1; } } @@ -1597,6 +1846,9 @@ static int dash_flush(AVFormatContext *s, int final, int stream) // Flush all audio streams as well, in sync with video keyframes, // but not the other video streams. if (stream >= 0 && i != stream) { + if (s->streams[stream]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO && + s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO) + continue; if (s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) continue; // Make sure we don't flush audio streams multiple times, when @@ -1629,12 +1881,17 @@ static int dash_flush(AVFormatContext *s, int final, int stream) } } + os->last_duration = FFMAX(os->last_duration, av_rescale_q(os->max_pts - os->start_pts, + st->time_base, + AV_TIME_BASE_Q)); + if (!os->muxer_overhead) os->muxer_overhead = ((int64_t) (range_length - os->total_pkt_size) * 8 * AV_TIME_BASE) / av_rescale_q(os->max_pts - os->start_pts, st->time_base, AV_TIME_BASE_Q); os->total_pkt_size = 0; + os->total_pkt_duration = 0; if (!os->bit_rate) { // calculate average bitrate of first segment @@ -1700,6 +1957,7 @@ static int dash_write_packet(AVFormatContext *s, AVPacket *pkt) DASHContext *c = s->priv_data; AVStream *st = s->streams[pkt->stream_index]; OutputStream *os = &c->streams[pkt->stream_index]; + AdaptationSet *as = &c->as[os->as_idx - 1]; int64_t seg_end_duration, elapsed_duration; int ret; @@ -1725,38 +1983,60 @@ static int dash_write_packet(AVFormatContext *s, AVPacket *pkt) pkt->dts = 0; } - if (os->first_pts == AV_NOPTS_VALUE) + if (os->first_pts == AV_NOPTS_VALUE) { + int side_data_size; + AVProducerReferenceTime *prft = (AVProducerReferenceTime *)av_packet_get_side_data(pkt, AV_PKT_DATA_PRFT, + &side_data_size); + if (prft && side_data_size == sizeof(AVProducerReferenceTime) && !prft->flags) { + os->producer_reference_time = prft->wallclock; + if (c->target_latency_refid < 0) + c->target_latency_refid = pkt->stream_index; + } os->first_pts = pkt->pts; + } os->last_pts = pkt->pts; if (!c->availability_start_time[0]) { int64_t start_time_us = av_gettime(); c->start_time_s = start_time_us / 1000000; - format_date_now(c->availability_start_time, - sizeof(c->availability_start_time)); + format_date(c->availability_start_time, + sizeof(c->availability_start_time), start_time_us); } - if (!os->availability_time_offset && pkt->duration) { - int64_t frame_duration = av_rescale_q(pkt->duration, st->time_base, - AV_TIME_BASE_Q); - os->availability_time_offset = ((double) c->seg_duration - + if (!os->packets_written) + os->availability_time_offset = 0; + + if (!os->availability_time_offset && + ((os->frag_type == FRAG_TYPE_DURATION && os->seg_duration != os->frag_duration) || + (os->frag_type == FRAG_TYPE_EVERY_FRAME && pkt->duration))) { + int64_t frame_duration = 0; + + switch (os->frag_type) { + case FRAG_TYPE_DURATION: + frame_duration = os->frag_duration; + break; + case FRAG_TYPE_EVERY_FRAME: + frame_duration = av_rescale_q(pkt->duration, st->time_base, AV_TIME_BASE_Q); + break; + } + + os->availability_time_offset = ((double) os->seg_duration - frame_duration) / AV_TIME_BASE; + as->max_frag_duration = FFMAX(frame_duration, as->max_frag_duration); } if (c->use_template && !c->use_timeline) { elapsed_duration = pkt->pts - os->first_pts; - seg_end_duration = (int64_t) os->segment_index * c->seg_duration; + seg_end_duration = (int64_t) os->segment_index * os->seg_duration; } else { elapsed_duration = pkt->pts - os->start_pts; - seg_end_duration = c->seg_duration; + seg_end_duration = os->seg_duration; } - if ((!c->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) && - pkt->flags & AV_PKT_FLAG_KEY && os->packets_written && + if (pkt->flags & AV_PKT_FLAG_KEY && os->packets_written && av_compare_ts(elapsed_duration, st->time_base, seg_end_duration, AV_TIME_BASE_Q) >= 0) { - int64_t prev_duration = c->last_duration; - + if (!c->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { c->last_duration = av_rescale_q(pkt->pts - os->start_pts, st->time_base, AV_TIME_BASE_Q); @@ -1764,14 +2044,20 @@ static int dash_write_packet(AVFormatContext *s, AVPacket *pkt) st->time_base, AV_TIME_BASE_Q); - if ((!c->use_timeline || !c->use_template) && prev_duration) { - if (c->last_duration < prev_duration*9/10 || - c->last_duration > prev_duration*11/10) { + if ((!c->use_timeline || !c->use_template) && os->last_duration) { + if (c->last_duration < os->last_duration*9/10 || + c->last_duration > os->last_duration*11/10) { av_log(s, AV_LOG_WARNING, "Segment durations differ too much, enable use_timeline " "and use_template, or keep a stricter keyframe interval\n"); } } + } + + if (c->write_prft && os->producer_reference_time && !os->producer_reference_time_str[0]) + format_date(os->producer_reference_time_str, + sizeof(os->producer_reference_time_str), + os->producer_reference_time); if ((ret = dash_flush(s, 0, pkt->stream_index)) < 0) return ret; @@ -1790,11 +2076,49 @@ static int dash_write_packet(AVFormatContext *s, AVPacket *pkt) os->max_pts = pkt->pts + pkt->duration; else os->max_pts = FFMAX(os->max_pts, pkt->pts + pkt->duration); - os->packets_written++; - os->total_pkt_size += pkt->size; + + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && + os->frag_type == FRAG_TYPE_PFRAMES && + os->packets_written) { + uint8_t *data; + int size; + + av_assert0(os->parser); + av_parser_parse2(os->parser, os->parser_avctx, + &data, &size, pkt->data, pkt->size, + pkt->pts, pkt->dts, pkt->pos); + + if ((os->parser->pict_type == AV_PICTURE_TYPE_P && + st->codecpar->video_delay && + !(os->last_flags & AV_PKT_FLAG_KEY)) || + pkt->flags & AV_PKT_FLAG_KEY) { + ret = av_write_frame(os->ctx, NULL); + if (ret < 0) + return ret; + + if (!os->availability_time_offset) { + int64_t frag_duration = av_rescale_q(os->total_pkt_duration, st->time_base, + AV_TIME_BASE_Q); + os->availability_time_offset = ((double) os->seg_duration - + frag_duration) / AV_TIME_BASE; + as->max_frag_duration = FFMAX(frag_duration, as->max_frag_duration); + } + } + } + + if (pkt->flags & AV_PKT_FLAG_KEY && (os->packets_written || os->nb_segments) && !os->gop_size) { + os->gop_size = os->last_duration + av_rescale_q(os->total_pkt_duration, st->time_base, AV_TIME_BASE_Q); + c->max_gop_size = FFMAX(c->max_gop_size, os->gop_size); + } + if ((ret = ff_write_chained(os->ctx, 0, pkt, s, 0)) < 0) return ret; + os->packets_written++; + os->total_pkt_size += pkt->size; + os->total_pkt_duration += pkt->duration; + os->last_flags = pkt->flags; + if (!os->init_range_length) flush_init_segment(s, os); @@ -1916,6 +2240,12 @@ static const AVOption options[] = { { "min_seg_duration", "minimum segment duration (in microseconds) (will be deprecated)", OFFSET(min_seg_duration), AV_OPT_TYPE_INT, { .i64 = 5000000 }, 0, INT_MAX, E }, #endif { "seg_duration", "segment duration (in seconds, fractional value can be set)", OFFSET(seg_duration), AV_OPT_TYPE_DURATION, { .i64 = 5000000 }, 0, INT_MAX, E }, + { "frag_duration", "fragment duration (in seconds, fractional value can be set)", OFFSET(frag_duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT_MAX, E }, + { "frag_type", "set type of interval for fragments", OFFSET(frag_type), AV_OPT_TYPE_INT, {.i64 = FRAG_TYPE_NONE }, 0, FRAG_TYPE_NB - 1, E, "frag_type"}, + { "none", "one fragment per segment", 0, AV_OPT_TYPE_CONST, {.i64 = FRAG_TYPE_NONE }, 0, UINT_MAX, E, "frag_type"}, + { "every_frame", "fragment at every frame", 0, AV_OPT_TYPE_CONST, {.i64 = FRAG_TYPE_EVERY_FRAME }, 0, UINT_MAX, E, "frag_type"}, + { "duration", "fragment at specific time intervals", 0, AV_OPT_TYPE_CONST, {.i64 = FRAG_TYPE_DURATION }, 0, UINT_MAX, E, "frag_type"}, + { "pframes", "fragment at keyframes and following P-Frame reordering (Video only, experimental)", 0, AV_OPT_TYPE_CONST, {.i64 = FRAG_TYPE_PFRAMES }, 0, UINT_MAX, E, "frag_type"}, { "remove_at_exit", "remove all segments when finished", OFFSET(remove_at_exit), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E }, { "use_template", "Use SegmentTemplate instead of SegmentList", OFFSET(use_template), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E }, { "use_timeline", "Use SegmentTimeline in SegmentTemplate", OFFSET(use_timeline), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E }, @@ -1931,7 +2261,7 @@ static const AVOption options[] = { { "streaming", "Enable/Disable streaming mode of output. Each frame will be moof fragment", OFFSET(streaming), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E }, { "timeout", "set timeout for socket I/O operations", OFFSET(timeout), AV_OPT_TYPE_DURATION, { .i64 = -1 }, -1, INT_MAX, .flags = E }, { "index_correction", "Enable/Disable segment index correction logic", OFFSET(index_correction), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E }, - { "format_options","set list of options for the container format (mp4/webm) used for dash", OFFSET(format_options_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E}, + { "format_options","set list of options for the container format (mp4/webm) used for dash", OFFSET(format_options), AV_OPT_TYPE_DICT, {.str = NULL}, 0, 0, E}, { "global_sidx", "Write global SIDX atom. Applicable only for single file, mp4 output, non-streaming mode", OFFSET(global_sidx), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E }, { "dash_segment_type", "set dash segment files type", OFFSET(segment_type_option), AV_OPT_TYPE_INT, {.i64 = SEGMENT_TYPE_AUTO }, 0, SEGMENT_TYPE_NB - 1, E, "segment_type"}, { "auto", "select segment file format based on codec", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_TYPE_AUTO }, 0, UINT_MAX, E, "segment_type"}, @@ -1939,7 +2269,14 @@ static const AVOption options[] = { { "webm", "make segment file in WebM format", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_TYPE_WEBM }, 0, UINT_MAX, E, "segment_type"}, { "ignore_io_errors", "Ignore IO errors during open and write. Useful for long-duration runs with network output", OFFSET(ignore_io_errors), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E }, { "lhls", "Enable Low-latency HLS(Experimental). Adds #EXT-X-PREFETCH tag with current segment's URI", OFFSET(lhls), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E }, + { "ldash", "Enable Low-latency dash. Constrains the value of a few elements", OFFSET(ldash), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E }, { "master_m3u8_publish_rate", "Publish master playlist every after this many segment intervals", OFFSET(master_publish_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, UINT_MAX, E}, + { "write_prft", "Write producer reference time element", OFFSET(write_prft), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, E}, + { "mpd_profile", "Set profiles. Elements and values used in the manifest may be constrained by them", OFFSET(profile), AV_OPT_TYPE_FLAGS, {.i64 = MPD_PROFILE_DASH }, 0, UINT_MAX, E, "mpd_profile"}, + { "dash", "MPEG-DASH ISO Base media file format live profile", 0, AV_OPT_TYPE_CONST, {.i64 = MPD_PROFILE_DASH }, 0, UINT_MAX, E, "mpd_profile"}, + { "dvb_dash", "DVB-DASH profile", 0, AV_OPT_TYPE_CONST, {.i64 = MPD_PROFILE_DVB }, 0, UINT_MAX, E, "mpd_profile"}, + { "http_opts", "HTTP protocol options", OFFSET(http_opts), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E }, + { "target_latency", "Set desired target latency for Low-latency dash", OFFSET(target_latency), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT_MAX, E }, { NULL }, }; diff --git a/libavformat/dfa.c b/libavformat/dfa.c index a5ecfd9b15..d667cd6277 100644 --- a/libavformat/dfa.c +++ b/libavformat/dfa.c @@ -40,7 +40,7 @@ static int dfa_read_header(AVFormatContext *s) { AVIOContext *pb = s->pb; AVStream *st; - int frames; + int frames, ret; int version; uint32_t mspf; @@ -69,8 +69,8 @@ static int dfa_read_header(AVFormatContext *s) avio_skip(pb, 128 - 16); // padding st->duration = frames; - if (ff_alloc_extradata(st->codecpar, 2)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, 2)) < 0) + return ret; AV_WL16(st->codecpar->extradata, version); if (version == 0x100) st->sample_aspect_ratio = (AVRational){2, 1}; diff --git a/libavformat/dhav.c b/libavformat/dhav.c index 120b8e4318..5e9abdb611 100644 --- a/libavformat/dhav.c +++ b/libavformat/dhav.c @@ -40,6 +40,7 @@ typedef struct DHAVContext { int audio_codec; int sample_rate; int64_t last_good_pos; + int64_t duration; int video_stream_index; int audio_stream_index; @@ -47,6 +48,7 @@ typedef struct DHAVContext { typedef struct DHAVStream { int64_t last_timestamp; + int64_t last_time; int64_t pts; } DHAVStream; @@ -66,77 +68,6 @@ static int dhav_probe(const AVProbeData *p) return 0; } -static int dhav_read_header(AVFormatContext *s) -{ - DHAVContext *dhav = s->priv_data; - uint8_t signature[5]; - - ffio_ensure_seekback(s->pb, 5); - avio_read(s->pb, signature, sizeof(signature)); - if (!memcmp(signature, "DAHUA", 5)) { - avio_skip(s->pb, 0x400 - 5); - dhav->last_good_pos = avio_tell(s->pb); - } else { - if (!memcmp(signature, "DHAV", 4)) { - avio_seek(s->pb, -5, SEEK_CUR); - dhav->last_good_pos = avio_tell(s->pb); - } else if (s->pb->seekable) { - avio_seek(s->pb, avio_size(s->pb) - 8, SEEK_SET); - while (avio_rl32(s->pb) == MKTAG('d','h','a','v')) { - int seek_back; - - seek_back = avio_rl32(s->pb) + 8; - dhav->last_good_pos = avio_tell(s->pb); - if (dhav->last_good_pos < seek_back) - break; - avio_seek(s->pb, -seek_back, SEEK_CUR); - } - } - } - - s->ctx_flags |= AVFMTCTX_NOHEADER; - dhav->video_stream_index = -1; - dhav->audio_stream_index = -1; - - return 0; -} - -static int64_t get_pts(AVFormatContext *s, DHAVStream *st) -{ - DHAVContext *dhav = s->priv_data; - /* - int year, month, day, hour, min, sec; - struct tm timeinfo; - - sec = dhav->date & 0x3F; - min = (dhav->date >> 6) & 0x3F; - hour = (dhav->date >> 12) & 0x1F; - day = (dhav->date >> 17) & 0x1F; - month = (dhav->date >> 22) & 0x0F; - year = ((dhav->date >> 26) & 0x3F) + 2000; - - timeinfo.tm_year = year - 1900; - timeinfo.tm_mon = month - 1; - timeinfo.tm_mday = day; - timeinfo.tm_hour = hour; - timeinfo.tm_min = min; - timeinfo.tm_sec = sec;*/ - - if (st->last_timestamp == AV_NOPTS_VALUE) { - st->last_timestamp = dhav->timestamp; - } - - if (st->last_timestamp <= dhav->timestamp) { - st->pts += dhav->timestamp - st->last_timestamp; - } else { - st->pts += 65535 + dhav->timestamp - st->last_timestamp; - } - - st->last_timestamp = dhav->timestamp; - - return st->pts; -} - static const uint32_t sample_rates[] = { 8000, 4000, 8000, 11025, 16000, 20000, 22050, 32000, 44100, 48000, @@ -264,15 +195,15 @@ static int read_chunk(AVFormatContext *s) dhav->frame_subnumber = avio_r8(s->pb); dhav->frame_number = avio_rl32(s->pb); frame_length = avio_rl32(s->pb); + dhav->date = avio_rl32(s->pb); if (frame_length < 24) return AVERROR_INVALIDDATA; if (dhav->type == 0xf1) { - ret = avio_skip(s->pb, frame_length - 16); + ret = avio_skip(s->pb, frame_length - 20); return ret < 0 ? ret : 0; } - dhav->date = avio_rl32(s->pb); dhav->timestamp = avio_rl16(s->pb); ext_length = avio_r8(s->pb); avio_skip(s->pb, 1); // checksum @@ -286,10 +217,125 @@ static int read_chunk(AVFormatContext *s) return frame_length - 8 - (end - start); } +static void get_timeinfo(unsigned date, struct tm *timeinfo) +{ + int year, month, day, hour, min, sec; + + sec = date & 0x3F; + min = (date >> 6) & 0x3F; + hour = (date >> 12) & 0x1F; + day = (date >> 17) & 0x1F; + month = (date >> 22) & 0x0F; + year = ((date >> 26) & 0x3F) + 2000; + + timeinfo->tm_year = year - 1900; + timeinfo->tm_mon = month - 1; + timeinfo->tm_mday = day; + timeinfo->tm_hour = hour; + timeinfo->tm_min = min; + timeinfo->tm_sec = sec; +} + +static int64_t get_duration(AVFormatContext *s) +{ + DHAVContext *dhav = s->priv_data; + int64_t start_pos = avio_tell(s->pb); + int64_t start = 0, end = 0; + struct tm timeinfo; + + if (!s->pb->seekable) + return 0; + + avio_seek(s->pb, avio_size(s->pb) - 8, SEEK_SET); + if (avio_rl32(s->pb) == MKTAG('d','h','a','v')) { + int seek_back = avio_rl32(s->pb); + + avio_seek(s->pb, -seek_back, SEEK_CUR); + read_chunk(s); + get_timeinfo(dhav->date, &timeinfo); + end = av_timegm(&timeinfo) * 1000LL; + } else { + avio_seek(s->pb, start_pos, SEEK_SET); + return 0; + } + + avio_seek(s->pb, start_pos, SEEK_SET); + + read_chunk(s); + get_timeinfo(dhav->date, &timeinfo); + start = av_timegm(&timeinfo) * 1000LL; + + avio_seek(s->pb, start_pos, SEEK_SET); + + return end - start; +} + +static int dhav_read_header(AVFormatContext *s) +{ + DHAVContext *dhav = s->priv_data; + uint8_t signature[5]; + + ffio_ensure_seekback(s->pb, 5); + avio_read(s->pb, signature, sizeof(signature)); + if (!memcmp(signature, "DAHUA", 5)) { + avio_skip(s->pb, 0x400 - 5); + dhav->last_good_pos = avio_tell(s->pb); + } else { + if (!memcmp(signature, "DHAV", 4)) { + avio_seek(s->pb, -5, SEEK_CUR); + dhav->last_good_pos = avio_tell(s->pb); + } else if (s->pb->seekable) { + avio_seek(s->pb, avio_size(s->pb) - 8, SEEK_SET); + while (avio_rl32(s->pb) == MKTAG('d','h','a','v')) { + int seek_back; + + seek_back = avio_rl32(s->pb) + 8; + dhav->last_good_pos = avio_tell(s->pb); + avio_seek(s->pb, -seek_back, SEEK_CUR); + } + avio_seek(s->pb, dhav->last_good_pos, SEEK_SET); + } + } + + dhav->duration = get_duration(s); + dhav->last_good_pos = avio_tell(s->pb); + s->ctx_flags |= AVFMTCTX_NOHEADER; + dhav->video_stream_index = -1; + dhav->audio_stream_index = -1; + + return 0; +} + +static int64_t get_pts(AVFormatContext *s, int stream_index) +{ + DHAVStream *dst = s->streams[stream_index]->priv_data; + DHAVContext *dhav = s->priv_data; + struct tm timeinfo; + time_t t; + + get_timeinfo(dhav->date, &timeinfo); + + t = av_timegm(&timeinfo); + if (dst->last_time == t) { + int64_t diff = dhav->timestamp - dst->last_timestamp; + + if (diff < 0) + diff += 65535; + dst->pts += diff; + } else { + dst->pts = t * 1000LL; + } + + dst->last_time = t; + dst->last_timestamp = dhav->timestamp; + + return dst->pts; +} + static int dhav_read_packet(AVFormatContext *s, AVPacket *pkt) { DHAVContext *dhav = s->priv_data; - int ret, stream_index; + int size, ret, stream_index; retry: while ((ret = read_chunk(s)) == 0) @@ -315,6 +361,7 @@ retry: case 0xc: st->codecpar->codec_id = AV_CODEC_ID_HEVC; break; default: avpriv_request_sample(s, "Unknown video codec %X\n", dhav->video_codec); } + st->duration = dhav->duration; st->codecpar->width = dhav->width; st->codecpar->height = dhav->height; st->avg_frame_rate.num = dhav->frame_rate; @@ -322,7 +369,7 @@ retry: st->priv_data = dst = av_mallocz(sizeof(DHAVStream)); if (!st->priv_data) return AVERROR(ENOMEM); - dst->last_timestamp = AV_NOPTS_VALUE; + dst->last_time = AV_NOPTS_VALUE; dhav->video_stream_index = st->index; avpriv_set_pts_info(st, 64, 1, 1000); @@ -347,12 +394,13 @@ retry: case 0x0d: st->codecpar->codec_id = AV_CODEC_ID_ADPCM_MS; break; default: avpriv_request_sample(s, "Unknown audio codec %X\n", dhav->audio_codec); } + st->duration = dhav->duration; st->codecpar->channels = dhav->audio_channels; st->codecpar->sample_rate = dhav->sample_rate; st->priv_data = dst = av_mallocz(sizeof(DHAVStream)); if (!st->priv_data) return AVERROR(ENOMEM); - dst->last_timestamp = AV_NOPTS_VALUE; + dst->last_time = AV_NOPTS_VALUE; dhav->audio_stream_index = st->index; avpriv_set_pts_info(st, 64, 1, 1000); @@ -366,15 +414,16 @@ retry: goto retry; } - ret = av_get_packet(s->pb, pkt, ret); + size = ret; + ret = av_get_packet(s->pb, pkt, size); if (ret < 0) return ret; pkt->stream_index = stream_index; if (dhav->type != 0xfc) pkt->flags |= AV_PKT_FLAG_KEY; - if (pkt->stream_index >= 0) - pkt->pts = get_pts(s, s->streams[pkt->stream_index]->priv_data); pkt->duration = 1; + if (pkt->stream_index >= 0) + pkt->pts = get_pts(s, pkt->stream_index); pkt->pos = dhav->last_good_pos; if (avio_rl32(s->pb) == MKTAG('d','h','a','v')) avio_skip(s->pb, 4); @@ -402,7 +451,7 @@ static int dhav_read_seek(AVFormatContext *s, int stream_index, DHAVStream *dst = st->priv_data; dst->pts = pts; - dst->last_timestamp = AV_NOPTS_VALUE; + dst->last_time = AV_NOPTS_VALUE; } dhav->last_good_pos = avio_tell(s->pb); @@ -418,5 +467,5 @@ AVInputFormat ff_dhav_demuxer = { .read_packet = dhav_read_packet, .read_seek = dhav_read_seek, .extensions = "dav", - .flags = AVFMT_GENERIC_INDEX | AVFMT_NO_BYTE_SEEK, + .flags = AVFMT_GENERIC_INDEX | AVFMT_NO_BYTE_SEEK | AVFMT_TS_DISCONT | AVFMT_TS_NONSTRICT, }; diff --git a/libavformat/dsfdec.c b/libavformat/dsfdec.c index 9be206af84..52cddab2c8 100644 --- a/libavformat/dsfdec.c +++ b/libavformat/dsfdec.c @@ -169,8 +169,8 @@ static int dsf_read_packet(AVFormatContext *s, AVPacket *pkt) if (packet_size <= 0 || skip_size <= 0) return AVERROR_INVALIDDATA; - if (av_new_packet(pkt, packet_size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, packet_size)) < 0) + return ret; dst = pkt->data; for (ch = 0; ch < st->codecpar->channels; ch++) { ret = avio_read(pb, dst, packet_size / st->codecpar->channels); diff --git a/libavformat/dxa.c b/libavformat/dxa.c index 298cda05d7..994078e633 100644 --- a/libavformat/dxa.c +++ b/libavformat/dxa.c @@ -179,8 +179,8 @@ static int dxa_read_packet(AVFormatContext *s, AVPacket *pkt) tag = AV_RL32(buf); switch (tag) { case MKTAG('N', 'U', 'L', 'L'): - if(av_new_packet(pkt, 4 + pal_size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, 4 + pal_size)) < 0) + return ret; pkt->stream_index = 0; if(pal_size) memcpy(pkt->data, pal, pal_size); memcpy(pkt->data + pal_size, buf, 4); @@ -204,8 +204,9 @@ static int dxa_read_packet(AVFormatContext *s, AVPacket *pkt) size); return AVERROR_INVALIDDATA; } - if(av_new_packet(pkt, size + DXA_EXTRA_SIZE + pal_size) < 0) - return AVERROR(ENOMEM); + ret = av_new_packet(pkt, size + DXA_EXTRA_SIZE + pal_size); + if (ret < 0) + return ret; memcpy(pkt->data + pal_size, buf, DXA_EXTRA_SIZE); ret = avio_read(s->pb, pkt->data + DXA_EXTRA_SIZE + pal_size, size); if(ret != size){ diff --git a/libavformat/ffmetaenc.c b/libavformat/ffmetaenc.c index a9adbb1d19..800fb1887c 100644 --- a/libavformat/ffmetaenc.c +++ b/libavformat/ffmetaenc.c @@ -54,7 +54,6 @@ static int write_header(AVFormatContext *s) avio_write(s->pb, ID_STRING, sizeof(ID_STRING) - 1); avio_w8(s->pb, '1'); // version avio_w8(s->pb, '\n'); - avio_flush(s->pb); return 0; } diff --git a/libavformat/fifo.c b/libavformat/fifo.c index b403ba717b..7b37fff6da 100644 --- a/libavformat/fifo.c +++ b/libavformat/fifo.c @@ -36,7 +36,6 @@ typedef struct FifoContext { AVFormatContext *avf; char *format; - char *format_options_str; AVDictionary *format_options; int queue_size; @@ -490,16 +489,6 @@ static int fifo_init(AVFormatContext *avf) return AVERROR(EINVAL); } - if (fifo->format_options_str) { - ret = av_dict_parse_string(&fifo->format_options, fifo->format_options_str, - "=", ":", 0); - if (ret < 0) { - av_log(avf, AV_LOG_ERROR, "Could not parse format options list '%s'\n", - fifo->format_options_str); - return ret; - } - } - oformat = av_guess_format(fifo->format, avf->url, NULL); if (!oformat) { ret = AVERROR_MUXER_NOT_FOUND; @@ -604,7 +593,6 @@ static void fifo_deinit(AVFormatContext *avf) { FifoContext *fifo = avf->priv_data; - av_dict_free(&fifo->format_options); avformat_free_context(fifo->avf); av_thread_message_queue_free(&fifo->queue); if (fifo->overflow_flag_lock_initialized) @@ -619,8 +607,8 @@ static const AVOption options[] = { {"queue_size", "Size of fifo queue", OFFSET(queue_size), AV_OPT_TYPE_INT, {.i64 = FIFO_DEFAULT_QUEUE_SIZE}, 1, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM}, - {"format_opts", "Options to be passed to underlying muxer", OFFSET(format_options_str), - AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM}, + {"format_opts", "Options to be passed to underlying muxer", OFFSET(format_options), + AV_OPT_TYPE_DICT, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM}, {"drop_pkts_on_overflow", "Drop packets on fifo queue overflow not to block encoder", OFFSET(drop_pkts_on_overflow), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM}, diff --git a/libavformat/fitsdec.c b/libavformat/fitsdec.c index c7d1edec60..30e34fc4d5 100644 --- a/libavformat/fitsdec.c +++ b/libavformat/fitsdec.c @@ -157,11 +157,11 @@ static int fits_read_packet(AVFormatContext *s, AVPacket *pkt) av_bprint_init(&avbuf, FITS_BLOCK_SIZE, AV_BPRINT_SIZE_UNLIMITED); while ((ret = is_image(s, fits, &header, &avbuf, &size)) == 0) { + av_bprint_finalize(&avbuf, NULL); pos = avio_skip(s->pb, size); if (pos < 0) return pos; - av_bprint_finalize(&avbuf, NULL); av_bprint_init(&avbuf, FITS_BLOCK_SIZE, AV_BPRINT_SIZE_UNLIMITED); avpriv_fits_header_init(&header, STATE_XTENSION); } diff --git a/libavformat/flacenc.c b/libavformat/flacenc.c index abbed38f89..1aae0c97e0 100644 --- a/libavformat/flacenc.c +++ b/libavformat/flacenc.c @@ -349,7 +349,6 @@ static int flac_write_trailer(struct AVFormatContext *s) avio_seek(pb, 8, SEEK_SET); avio_write(pb, streaminfo, FLAC_STREAMINFO_SIZE); avio_seek(pb, file_size, SEEK_SET); - avio_flush(pb); } else { av_log(s, AV_LOG_WARNING, "unable to rewrite FLAC header.\n"); } diff --git a/libavformat/flic.c b/libavformat/flic.c index d7844ce04f..d2a5cf995c 100644 --- a/libavformat/flic.c +++ b/libavformat/flic.c @@ -89,7 +89,7 @@ static int flic_read_header(AVFormatContext *s) AVIOContext *pb = s->pb; unsigned char header[FLIC_HEADER_SIZE]; AVStream *st, *ast; - int speed; + int speed, ret; int magic_number; unsigned char preamble[FLIC_PREAMBLE_SIZE]; @@ -125,8 +125,8 @@ static int flic_read_header(AVFormatContext *s) } /* send over the whole 128-byte FLIC header */ - if (ff_alloc_extradata(st->codecpar, FLIC_HEADER_SIZE)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, FLIC_HEADER_SIZE)) < 0) + return ret; memcpy(st->codecpar->extradata, header, FLIC_HEADER_SIZE); /* peek at the preamble to detect TFTD videos - they seem to always start with an audio chunk */ @@ -175,9 +175,8 @@ static int flic_read_header(AVFormatContext *s) avio_seek(pb, 12, SEEK_SET); /* send over abbreviated FLIC header chunk */ - av_freep(&st->codecpar->extradata); - if (ff_alloc_extradata(st->codecpar, 12)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, 12)) < 0) + return ret; memcpy(st->codecpar->extradata, header, 12); } else if (magic_number == FLIC_FILE_MAGIC_1) { @@ -216,10 +215,9 @@ static int flic_read_packet(AVFormatContext *s, magic = AV_RL16(&preamble[4]); if (((magic == FLIC_CHUNK_MAGIC_1) || (magic == FLIC_CHUNK_MAGIC_2)) && size > FLIC_PREAMBLE_SIZE) { - if (av_new_packet(pkt, size)) { - ret = AVERROR(EIO); - break; - } + if ((ret = av_new_packet(pkt, size)) < 0) + return ret; + pkt->stream_index = flic->video_stream_index; pkt->pts = flic->frame_number++; pkt->pos = avio_tell(pb); @@ -232,10 +230,8 @@ static int flic_read_packet(AVFormatContext *s, } packet_read = 1; } else if (magic == FLIC_TFTD_CHUNK_AUDIO) { - if (av_new_packet(pkt, size)) { - ret = AVERROR(EIO); - break; - } + if ((ret = av_new_packet(pkt, size)) < 0) + return ret; /* skip useless 10B sub-header (yes, it's not accounted for in the chunk header) */ avio_skip(pb, 10); diff --git a/libavformat/flvdec.c b/libavformat/flvdec.c index 6bfe6248d6..7c3e5b06c6 100644 --- a/libavformat/flvdec.c +++ b/libavformat/flvdec.c @@ -795,12 +795,12 @@ static int flv_read_close(AVFormatContext *s) static int flv_get_extradata(AVFormatContext *s, AVStream *st, int size) { + int ret; if (!size) return 0; - av_freep(&st->codecpar->extradata); - if (ff_get_extradata(s, st->codecpar, s->pb, size) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_get_extradata(s, st->codecpar, s->pb, size)) < 0) + return ret; st->internal->need_context_update = 1; return 0; } diff --git a/libavformat/flvenc.c b/libavformat/flvenc.c index 872050a74f..1aaf0333ca 100644 --- a/libavformat/flvenc.c +++ b/libavformat/flvenc.c @@ -887,7 +887,7 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) unsigned ts; int size = pkt->size; uint8_t *data = NULL; - int flags = -1, flags_size, ret; + int flags = -1, flags_size, ret = 0; int64_t cur_offset = avio_tell(pb); if (par->codec_type == AVMEDIA_TYPE_AUDIO && !pkt->size) { @@ -908,14 +908,10 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) int side_size = 0; uint8_t *side = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size); if (side && side_size > 0 && (side_size != par->extradata_size || memcmp(side, par->extradata, side_size))) { - av_free(par->extradata); - par->extradata = av_mallocz(side_size + AV_INPUT_BUFFER_PADDING_SIZE); - if (!par->extradata) { - par->extradata_size = 0; - return AVERROR(ENOMEM); - } + ret = ff_alloc_extradata(par, side_size); + if (ret < 0) + return ret; memcpy(par->extradata, side, side_size); - par->extradata_size = side_size; flv_write_codec_header(s, par, pkt->dts); } } @@ -996,7 +992,8 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) if (size + flags_size >= 1<<24) { av_log(s, AV_LOG_ERROR, "Too large packet with size %u >= %u\n", size + flags_size, 1<<24); - return AVERROR(EINVAL); + ret = AVERROR(EINVAL); + goto fail; } avio_wb24(pb, size + flags_size); @@ -1061,15 +1058,17 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) case AVMEDIA_TYPE_VIDEO: flv->videosize += (avio_tell(pb) - cur_offset); flv->lasttimestamp = flv->acurframeindex / flv->framerate; + flv->acurframeindex++; if (pkt->flags & AV_PKT_FLAG_KEY) { - double ts = flv->acurframeindex / flv->framerate; + double ts = flv->lasttimestamp; int64_t pos = cur_offset; - flv->lastkeyframetimestamp = flv->acurframeindex / flv->framerate; + flv->lastkeyframetimestamp = ts; flv->lastkeyframelocation = pos; - flv_append_keyframe_info(s, flv, ts, pos); + ret = flv_append_keyframe_info(s, flv, ts, pos); + if (ret < 0) + goto fail; } - flv->acurframeindex++; break; case AVMEDIA_TYPE_AUDIO: @@ -1081,10 +1080,10 @@ static int flv_write_packet(AVFormatContext *s, AVPacket *pkt) break; } } - +fail: av_free(data); - return pb->error; + return ret; } static int flv_check_bitstream(struct AVFormatContext *s, const AVPacket *pkt) diff --git a/libavformat/framehash.c b/libavformat/framehash.c index 3ae9092c61..8d90793d7c 100644 --- a/libavformat/framehash.c +++ b/libavformat/framehash.c @@ -45,7 +45,6 @@ int ff_framehash_write_header(AVFormatContext *s) avio_printf(s->pb, "#sar %d: %d/%d\n", i, st->sample_aspect_ratio.num, st->sample_aspect_ratio.den); break; } - avio_flush(s->pb); } return 0; } diff --git a/libavformat/fsb.c b/libavformat/fsb.c index faad6b16fd..fd3e484371 100644 --- a/libavformat/fsb.c +++ b/libavformat/fsb.c @@ -41,6 +41,7 @@ static int fsb_read_header(AVFormatContext *s) int64_t offset; AVCodecParameters *par; AVStream *st = avformat_new_stream(s, NULL); + int ret; avio_skip(pb, 3); // "FSB" version = avio_r8(pb) - '0'; @@ -86,9 +87,9 @@ static int fsb_read_header(AVFormatContext *s) par->block_align = 8 * par->channels; if (par->channels > INT_MAX / 32) return AVERROR_INVALIDDATA; - ff_alloc_extradata(par, 32 * par->channels); - if (!par->extradata) - return AVERROR(ENOMEM); + ret = ff_alloc_extradata(par, 32 * par->channels); + if (ret < 0) + return ret; avio_seek(pb, 0x68, SEEK_SET); for (c = 0; c < par->channels; c++) { avio_read(pb, par->extradata + 32 * c, 32); @@ -130,18 +131,18 @@ static int fsb_read_header(AVFormatContext *s) switch (par->codec_id) { case AV_CODEC_ID_XMA2: - ff_alloc_extradata(par, 34); - if (!par->extradata) - return AVERROR(ENOMEM); + ret = ff_alloc_extradata(par, 34); + if (ret < 0) + return ret; memset(par->extradata, 0, 34); par->block_align = 2048; break; case AV_CODEC_ID_ADPCM_THP: if (par->channels > INT_MAX / 32) return AVERROR_INVALIDDATA; - ff_alloc_extradata(par, 32 * par->channels); - if (!par->extradata) - return AVERROR(ENOMEM); + ret = ff_alloc_extradata(par, 32 * par->channels); + if (ret < 0) + return ret; avio_seek(pb, 0x80, SEEK_SET); for (c = 0; c < par->channels; c++) { avio_read(pb, par->extradata + 32 * c, 32); diff --git a/libavformat/gxfenc.c b/libavformat/gxfenc.c index ad9ddea887..e09b8d7625 100644 --- a/libavformat/gxfenc.c +++ b/libavformat/gxfenc.c @@ -834,7 +834,6 @@ static int gxf_write_header(AVFormatContext *s) gxf->packet_count = 3; - avio_flush(pb); return 0; } @@ -864,13 +863,11 @@ static int gxf_write_trailer(AVFormatContext *s) return ret; gxf_write_flt_packet(s); gxf_write_umf_packet(s); - avio_flush(pb); /* update duration in all map packets */ for (i = 1; i < gxf->map_offsets_nb; i++) { avio_seek(pb, gxf->map_offsets[i], SEEK_SET); if ((ret = gxf_write_map_packet(s, 1)) < 0) return ret; - avio_flush(pb); } avio_seek(pb, end, SEEK_SET); diff --git a/libavformat/hashenc.c b/libavformat/hashenc.c index 34a8fd1f50..ce609f6efa 100644 --- a/libavformat/hashenc.c +++ b/libavformat/hashenc.c @@ -152,7 +152,6 @@ static int hash_write_trailer(struct AVFormatContext *s) av_hash_final_hex(c->hashes[i], buf + strlen(buf), sizeof(buf) - strlen(buf)); av_strlcatf(buf, sizeof(buf), "\n"); avio_write(s->pb, buf, strlen(buf)); - avio_flush(s->pb); } return 0; @@ -325,7 +324,6 @@ static int framehash_write_packet(struct AVFormatContext *s, AVPacket *pkt) } avio_printf(s->pb, "\n"); - avio_flush(s->pb); return 0; } diff --git a/libavformat/hls.c b/libavformat/hls.c index 21353bbad7..538af0dbf8 100644 --- a/libavformat/hls.c +++ b/libavformat/hls.c @@ -1663,7 +1663,7 @@ static int save_avio_options(AVFormatContext *s) { HLSContext *c = s->priv_data; static const char * const opts[] = { - "headers", "http_proxy", "user_agent", "cookies", "referer", "rw_timeout", NULL }; + "headers", "http_proxy", "user_agent", "cookies", "referer", "rw_timeout", "icy", NULL }; const char * const * opt = opts; uint8_t *buf; int ret = 0; @@ -2201,9 +2201,8 @@ static int hls_read_packet(AVFormatContext *s, AVPacket *pkt) ist = pls->ctx->streams[pls->pkt.stream_index]; st = pls->main_streams[pls->pkt.stream_index]; - *pkt = pls->pkt; + av_packet_move_ref(pkt, &pls->pkt); pkt->stream_index = st->index; - reset_packet(&c->playlists[minplaylist]->pkt); if (pkt->dts != AV_NOPTS_VALUE) c->cur_timestamp = av_rescale_q(pkt->dts, diff --git a/libavformat/hlsenc.c b/libavformat/hlsenc.c index 6f36a23cbe..d130f03ea6 100644 --- a/libavformat/hlsenc.c +++ b/libavformat/hlsenc.c @@ -199,7 +199,6 @@ typedef struct HLSContext { int64_t max_seg_size; // every segment file max size char *baseurl; - char *format_options_str; char *vtt_format_options_str; char *subtitle_filename; AVDictionary *format_options; @@ -460,7 +459,6 @@ static int flush_dynbuf(VariantStream *vs, int *range_length) // flush av_write_frame(ctx, NULL); - avio_flush(ctx->pb); // write out to file *range_length = avio_close_dyn_buf(ctx->pb, &vs->temp_buffer); @@ -835,47 +833,41 @@ static int hls_mux_init(AVFormatContext *s, VariantStream *vs) vs->packets_written = 0; vs->init_range_length = 0; - set_http_options(s, &options, hls); + if ((ret = avio_open_dyn_buf(&oc->pb)) < 0) return ret; if (hls->segment_type == SEGMENT_TYPE_FMP4) { + set_http_options(s, &options, hls); if (byterange_mode) { ret = hlsenc_io_open(s, &vs->out, vs->basename, &options); } else { ret = hlsenc_io_open(s, &vs->out, vs->base_output_dirname, &options); } + av_dict_free(&options); } - av_dict_free(&options); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Failed to open segment '%s'\n", vs->fmp4_init_filename); return ret; } - if (hls->format_options_str) { - ret = av_dict_parse_string(&hls->format_options, hls->format_options_str, "=", ":", 0); - if (ret < 0) { - av_log(s, AV_LOG_ERROR, "Could not parse format options list '%s'\n", - hls->format_options_str); - return ret; - } - } - - av_dict_copy(&options, hls->format_options, 0); if (hls->segment_type == SEGMENT_TYPE_FMP4) { + int remaining_options; + + av_dict_copy(&options, hls->format_options, 0); av_dict_set(&options, "fflags", "-autobsf", 0); av_dict_set(&options, "movflags", "+frag_custom+dash+delay_moov", AV_DICT_APPEND); ret = avformat_init_output(oc, &options); + remaining_options = av_dict_count(options); + av_dict_free(&options); if (ret < 0) return ret; - if (av_dict_count(options)) { - av_log(s, AV_LOG_ERROR, "Some of the provided format options in '%s' are not recognized\n", hls->format_options_str); - av_dict_free(&options); + if (remaining_options) { + av_log(s, AV_LOG_ERROR, "Some of the provided format options are not recognized\n"); return AVERROR(EINVAL); } } avio_flush(oc->pb); - av_dict_free(&options); return 0; } @@ -1650,8 +1642,6 @@ static int hls_start(AVFormatContext *s, VariantStream *vs) } vs->number++; - set_http_options(s, &options, c); - proto = avio_find_protocol_name(oc->url); use_temp_file = proto && !strcmp(proto, "file") && (c->flags & HLS_TEMP_FILE); @@ -1702,6 +1692,7 @@ static int hls_start(AVFormatContext *s, VariantStream *vs) av_opt_set(oc->priv_data, "pat_period", period, 0); } if (c->flags & HLS_SINGLE_FILE) { + set_http_options(s, &options, c); if ((err = hlsenc_io_open(s, &vs->out, oc->url, &options)) < 0) { if (c->ignore_io_errors) err = 0; @@ -1873,7 +1864,7 @@ static int parse_variant_stream_mapstring(AVFormatContext *s) VariantStream *vs; int stream_index, i, j; enum AVMediaType codec_type; - int nb_varstreams, nb_streams; + int nb_varstreams = 0, nb_streams; char *p, *q, *saveptr1, *saveptr2, *varstr, *keyval; const char *val; @@ -1898,13 +1889,14 @@ static int parse_variant_stream_mapstring(AVFormatContext *s) q = p; while (av_strtok(q, " \t", &saveptr1)) { q = NULL; - hls->nb_varstreams++; + nb_varstreams++; } av_freep(&p); - hls->var_streams = av_mallocz(sizeof(*hls->var_streams) * hls->nb_varstreams); + hls->var_streams = av_mallocz(sizeof(*hls->var_streams) * nb_varstreams); if (!hls->var_streams) return AVERROR(ENOMEM); + hls->nb_varstreams = nb_varstreams; p = hls->var_stream_map; nb_varstreams = 0; @@ -1934,6 +1926,7 @@ static int parse_variant_stream_mapstring(AVFormatContext *s) while (keyval = av_strtok(varstr, ",", &saveptr2)) { varstr = NULL; if (av_strstart(keyval, "language:", &val)) { + av_free(vs->language); vs->language = av_strdup(val); if (!vs->language) return AVERROR(ENOMEM); @@ -1944,16 +1937,19 @@ static int parse_variant_stream_mapstring(AVFormatContext *s) hls->has_default_key = 1; continue; } else if (av_strstart(keyval, "name:", &val)) { + av_free(vs->varname); vs->varname = av_strdup(val); if (!vs->varname) return AVERROR(ENOMEM); continue; } else if (av_strstart(keyval, "agroup:", &val)) { + av_free(vs->agroup); vs->agroup = av_strdup(val); if (!vs->agroup) return AVERROR(ENOMEM); continue; } else if (av_strstart(keyval, "ccgroup:", &val)) { + av_free(vs->ccgroup); vs->ccgroup = av_strdup(val); if (!vs->ccgroup) return AVERROR(ENOMEM); @@ -2009,7 +2005,7 @@ static int parse_variant_stream_mapstring(AVFormatContext *s) static int parse_cc_stream_mapstring(AVFormatContext *s) { HLSContext *hls = s->priv_data; - int nb_ccstreams; + int nb_ccstreams = 0; char *p, *q, *ccstr, *keyval; char *saveptr1 = NULL, *saveptr2 = NULL; const char *val; @@ -2022,13 +2018,14 @@ static int parse_cc_stream_mapstring(AVFormatContext *s) q = p; while (av_strtok(q, " \t", &saveptr1)) { q = NULL; - hls->nb_ccstreams++; + nb_ccstreams++; } av_freep(&p); - hls->cc_streams = av_mallocz(sizeof(*hls->cc_streams) * hls->nb_ccstreams); + hls->cc_streams = av_mallocz(sizeof(*hls->cc_streams) * nb_ccstreams); if (!hls->cc_streams) return AVERROR(ENOMEM); + hls->nb_ccstreams = nb_ccstreams; p = hls->cc_stream_map; nb_ccstreams = 0; @@ -2044,14 +2041,17 @@ static int parse_cc_stream_mapstring(AVFormatContext *s) ccstr = NULL; if (av_strstart(keyval, "ccgroup:", &val)) { + av_free(ccs->ccgroup); ccs->ccgroup = av_strdup(val); if (!ccs->ccgroup) return AVERROR(ENOMEM); } else if (av_strstart(keyval, "instreamid:", &val)) { + av_free(ccs->instreamid); ccs->instreamid = av_strdup(val); if (!ccs->instreamid) return AVERROR(ENOMEM); } else if (av_strstart(keyval, "language:", &val)) { + av_free(ccs->language); ccs->language = av_strdup(val); if (!ccs->language) return AVERROR(ENOMEM); @@ -2079,7 +2079,7 @@ static int parse_cc_stream_mapstring(AVFormatContext *s) return AVERROR(EINVAL); } } else { - av_log(s, AV_LOG_ERROR, "Invalid instream ID %s, supported are CCn or SERIVICEn\n", + av_log(s, AV_LOG_ERROR, "Invalid instream ID %s, supported are CCn or SERVICEn\n", ccs->instreamid); return AVERROR(EINVAL); } @@ -2104,18 +2104,16 @@ static int update_variant_stream_info(AVFormatContext *s) return parse_variant_stream_mapstring(s); } else { //By default, a single variant stream with all the codec streams is created - hls->nb_varstreams = 1; - hls->var_streams = av_mallocz(sizeof(*hls->var_streams) * - hls->nb_varstreams); + hls->var_streams = av_mallocz(sizeof(*hls->var_streams)); if (!hls->var_streams) return AVERROR(ENOMEM); + hls->nb_varstreams = 1; hls->var_streams[0].var_stream_idx = 0; hls->var_streams[0].nb_streams = s->nb_streams; hls->var_streams[0].streams = av_mallocz(sizeof(AVStream *) * hls->var_streams[0].nb_streams); if (!hls->var_streams[0].streams) { - av_freep(&hls->var_streams); return AVERROR(ENOMEM); } @@ -2123,7 +2121,6 @@ static int update_variant_stream_info(AVFormatContext *s) if (hls->nb_ccstreams) { hls->var_streams[0].ccgroup = av_strdup(hls->cc_streams[0].ccgroup); if (!hls->var_streams[0].ccgroup) { - av_freep(&hls->var_streams); return AVERROR(ENOMEM); } } @@ -2239,7 +2236,6 @@ static int hls_write_packet(AVFormatContext *s, AVPacket *pkt) int use_temp_file = 0; uint8_t *buffer = NULL; VariantStream *vs = NULL; - AVDictionary *options = NULL; char *old_filename = NULL; for (i = 0; i < hls->nb_varstreams; i++) { @@ -2341,11 +2337,6 @@ static int hls_write_packet(AVFormatContext *s, AVPacket *pkt) use_temp_file = proto && !strcmp(proto, "file") && (hls->flags & HLS_TEMP_FILE); } - // look to rename the asset name - if (use_temp_file) { - av_dict_set(&options, "mpegts_flags", "resend_headers", 0); - } - if (hls->flags & HLS_SINGLE_FILE) { ret = flush_dynbuf(vs, &range_length); av_freep(&vs->temp_buffer); @@ -2354,8 +2345,8 @@ static int hls_write_packet(AVFormatContext *s, AVPacket *pkt) } vs->size = range_length; } else { - set_http_options(s, &options, hls); if ((hls->max_seg_size > 0 && (vs->size >= hls->max_seg_size)) || !byterange_mode) { + AVDictionary *options = NULL; char *filename = NULL; if (hls->key_info_file || hls->encrypt) { av_dict_set(&options, "encryption_key", hls->key_string, 0); @@ -2365,12 +2356,21 @@ static int hls_write_packet(AVFormatContext *s, AVPacket *pkt) filename = av_asprintf("%s", oc->url); } if (!filename) { + av_dict_free(&options); return AVERROR(ENOMEM); } + + // look to rename the asset name + if (use_temp_file) + av_dict_set(&options, "mpegts_flags", "resend_headers", 0); + + set_http_options(s, &options, hls); + ret = hlsenc_io_open(s, &vs->out, filename, &options); if (ret < 0) { av_log(s, hls->ignore_io_errors ? AV_LOG_WARNING : AV_LOG_ERROR, "Failed to open file '%s'\n", filename); + av_dict_free(&options); return hls->ignore_io_errors ? 0 : ret; } if (hls->segment_type == SEGMENT_TYPE_FMP4) { @@ -2378,6 +2378,7 @@ static int hls_write_packet(AVFormatContext *s, AVPacket *pkt) } ret = flush_dynbuf(vs, &range_length); if (ret < 0) { + av_dict_free(&options); return ret; } ret = hlsenc_io_close(s, &vs->out, filename); @@ -2389,6 +2390,7 @@ static int hls_write_packet(AVFormatContext *s, AVPacket *pkt) reflush_dynbuf(vs, &range_length); ret = hlsenc_io_close(s, &vs->out, filename); } + av_dict_free(&options); av_freep(&vs->temp_buffer); av_freep(&filename); } @@ -2545,7 +2547,6 @@ static int hls_write_trailer(struct AVFormatContext *s) if (!vs->init_range_length) { uint8_t *buffer = NULL; av_write_frame(oc, NULL); /* Flush any buffered data */ - avio_flush(oc->pb); range_length = avio_close_dyn_buf(oc->pb, &buffer); avio_write(vs->out, buffer, range_length); @@ -2631,7 +2632,6 @@ failed: } ffio_free_dyn_buf(&oc->pb); - vs->avf = NULL; av_free(old_filename); } @@ -2732,7 +2732,7 @@ static int hls_init(AVFormatContext *s) char b[15]; struct tm *p, tmbuf; if (!(p = localtime_r(&t, &tmbuf))) - return AVERROR(ENOMEM); + return AVERROR(errno); if (!strftime(b, sizeof(b), "%Y%m%d%H%M%S", p)) return AVERROR(ENOMEM); hls->start_sequence = strtoll(b, NULL, 10); @@ -2766,13 +2766,6 @@ static int hls_init(AVFormatContext *s) time(&now0); vs->initial_prog_date_time = now0; } - if (hls->format_options_str) { - ret = av_dict_parse_string(&hls->format_options, hls->format_options_str, "=", ":", 0); - if (ret < 0) { - av_log(s, AV_LOG_ERROR, "Could not parse format options list '%s'\n", hls->format_options_str); - goto fail; - } - } for (j = 0; j < vs->nb_streams; j++) { vs->has_video += vs->streams[j]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO; @@ -2799,7 +2792,7 @@ static int hls_init(AVFormatContext *s) if (vs->has_subtitle) { vs->vtt_oformat = av_guess_format("webvtt", NULL, NULL); - if (!vs->oformat) { + if (!vs->vtt_oformat) { ret = AVERROR_MUXER_NOT_FOUND; goto fail; } @@ -2907,22 +2900,21 @@ static int hls_init(AVFormatContext *s) ret = AVERROR(ENOMEM); goto fail; } - vs->vtt_m3u8_name = av_malloc(vtt_basename_size); - if (!vs->vtt_m3u8_name ) { - ret = AVERROR(ENOMEM); - goto fail; - } av_strlcpy(vs->vtt_basename, vs->m3u8_name, vtt_basename_size); p = strrchr(vs->vtt_basename, '.'); if (p) *p = '\0'; if ( hls->subtitle_filename ) { - av_freep(&vs->vtt_m3u8_name); ret = format_name(hls->subtitle_filename, &vs->vtt_m3u8_name, i, vs->varname); if (ret < 0) goto fail; } else { + vs->vtt_m3u8_name = av_malloc(vtt_basename_size); + if (!vs->vtt_m3u8_name) { + ret = AVERROR(ENOMEM); + goto fail; + } strcpy(vs->vtt_m3u8_name, vs->vtt_basename); av_strlcat(vs->vtt_m3u8_name, "_vtt.m3u8", vtt_basename_size); } @@ -2981,7 +2973,7 @@ static const AVOption options[] = { {"hls_init_time", "set segment length in seconds at init list", OFFSET(init_time), AV_OPT_TYPE_FLOAT, {.dbl = 0}, 0, FLT_MAX, E}, {"hls_list_size", "set maximum number of playlist entries", OFFSET(max_nb_segments), AV_OPT_TYPE_INT, {.i64 = 5}, 0, INT_MAX, E}, {"hls_delete_threshold", "set number of unreferenced segments to keep before deleting", OFFSET(hls_delete_threshold), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, E}, - {"hls_ts_options","set hls mpegts list of options for the container format used for hls", OFFSET(format_options_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E}, + {"hls_ts_options","set hls mpegts list of options for the container format used for hls", OFFSET(format_options), AV_OPT_TYPE_DICT, {.str = NULL}, 0, 0, E}, {"hls_vtt_options","set hls vtt list of options for the container format used for hls", OFFSET(vtt_format_options_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E}, #if FF_API_HLS_WRAP {"hls_wrap", "set number after which the index wraps (will be deprecated)", OFFSET(wrap), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E}, diff --git a/libavformat/icecast.c b/libavformat/icecast.c index d2198b78ec..7d8f92fe73 100644 --- a/libavformat/icecast.c +++ b/libavformat/icecast.c @@ -89,7 +89,7 @@ static int icecast_open(URLContext *h, const char *uri, int flags) // URI part variables char h_url[1024], host[1024], auth[1024], path[1024]; - char *headers = NULL, *user = NULL; + char *headers, *user = NULL; int port, ret; AVBPrint bp; @@ -105,15 +105,16 @@ static int icecast_open(URLContext *h, const char *uri, int flags) cat_header(&bp, "Ice-Genre", s->genre); cat_header(&bp, "Ice-Public", s->public ? "1" : "0"); if (!av_bprint_is_complete(&bp)) { - ret = AVERROR(ENOMEM); - goto cleanup; + av_bprint_finalize(&bp, NULL); + return AVERROR(ENOMEM); } - av_bprint_finalize(&bp, &headers); + if ((ret = av_bprint_finalize(&bp, &headers)) < 0) + return ret; // Set options av_dict_set(&opt_dict, "method", s->legacy_icecast ? "SOURCE" : "PUT", 0); av_dict_set(&opt_dict, "auth_type", "basic", 0); - av_dict_set(&opt_dict, "headers", headers, 0); + av_dict_set(&opt_dict, "headers", headers, AV_DICT_DONT_STRDUP_VAL); av_dict_set(&opt_dict, "chunked_post", "0", 0); av_dict_set(&opt_dict, "send_expect_100", s->legacy_icecast ? "-1" : "1", 0); if (NOT_EMPTY(s->content_type)) @@ -169,7 +170,6 @@ static int icecast_open(URLContext *h, const char *uri, int flags) cleanup: av_freep(&user); - av_freep(&headers); av_dict_free(&opt_dict); return ret; diff --git a/libavformat/icoenc.c b/libavformat/icoenc.c index 975c3466bf..a7df8b72bc 100644 --- a/libavformat/icoenc.c +++ b/libavformat/icoenc.c @@ -106,8 +106,6 @@ static int ico_write_header(AVFormatContext *s) if (!ico->images) return AVERROR(ENOMEM); - avio_flush(pb); - return 0; } diff --git a/libavformat/id3v1.c b/libavformat/id3v1.c index 19be42121d..eb66098f51 100644 --- a/libavformat/id3v1.c +++ b/libavformat/id3v1.c @@ -92,7 +92,7 @@ const char * const ff_id3v1_genre_str[ID3v1_GENRE_MAX + 1] = { [64] = "Native American", [65] = "Cabaret", [66] = "New Wave", - [67] = "Psychadelic", /* sic, the misspelling is used in the specification */ + [67] = "Psychedelic", [68] = "Rave", [69] = "Showtunes", [70] = "Trailer", @@ -110,7 +110,7 @@ const char * const ff_id3v1_genre_str[ID3v1_GENRE_MAX + 1] = { [82] = "National Folk", [83] = "Swing", [84] = "Fast Fusion", - [85] = "Bebob", + [85] = "Bebop", [86] = "Latin", [87] = "Revival", [88] = "Celtic", @@ -148,20 +148,20 @@ const char * const ff_id3v1_genre_str[ID3v1_GENRE_MAX + 1] = { [120] = "Duet", [121] = "Punk Rock", [122] = "Drum Solo", - [123] = "A capella", + [123] = "A Cappella", [124] = "Euro-House", [125] = "Dance Hall", [126] = "Goa", [127] = "Drum & Bass", [128] = "Club-House", - [129] = "Hardcore", + [129] = "Hardcore Techno", [130] = "Terror", [131] = "Indie", [132] = "BritPop", [133] = "Negerpunk", [134] = "Polsk Punk", [135] = "Beat", - [136] = "Christian Gangsta", + [136] = "Christian Gangsta Rap", [137] = "Heavy Metal", [138] = "Black Metal", [139] = "Crossover", @@ -171,8 +171,52 @@ const char * const ff_id3v1_genre_str[ID3v1_GENRE_MAX + 1] = { [143] = "Salsa", [144] = "Thrash Metal", [145] = "Anime", - [146] = "JPop", - [147] = "SynthPop", + [146] = "Jpop", + [147] = "Synthpop", + [148] = "Abstract", + [149] = "Art Rock", + [150] = "Baroque", + [151] = "Bhangra", + [152] = "Big Beat", + [153] = "Breakbeat", + [154] = "Chillout", + [155] = "Downtempo", + [156] = "Dub", + [157] = "EBM", + [158] = "Eclectic", + [159] = "Electro", + [160] = "Electroclash", + [161] = "Emo", + [162] = "Experimental", + [163] = "Garage", + [164] = "Global", + [165] = "IDM", + [166] = "Illbient", + [167] = "Industro-Goth", + [168] = "Jam Band", + [169] = "Krautrock", + [170] = "Leftfield", + [171] = "Lounge", + [172] = "Math Rock", + [173] = "New Romantic", + [174] = "Nu-Breakz", + [175] = "Post-Punk", + [176] = "Post-Rock", + [177] = "Psytrance", + [178] = "Shoegaze", + [179] = "Space Rock", + [180] = "Trop Rock", + [181] = "World Music", + [182] = "Neoclassical", + [183] = "Audiobook", + [184] = "Audio Theatre", + [185] = "Neue Deutsche Welle", + [186] = "Podcast", + [187] = "Indie Rock", + [188] = "G-Funk", + [189] = "Dubstep", + [190] = "Garage Rock", + [191] = "Psybient" }; static void get_string(AVFormatContext *s, const char *key, diff --git a/libavformat/id3v1.h b/libavformat/id3v1.h index d5dca35873..b3ad16df6c 100644 --- a/libavformat/id3v1.h +++ b/libavformat/id3v1.h @@ -26,7 +26,7 @@ #define ID3v1_TAG_SIZE 128 -#define ID3v1_GENRE_MAX 147 +#define ID3v1_GENRE_MAX 191 /** * ID3v1 genres diff --git a/libavformat/id3v2enc.c b/libavformat/id3v2enc.c index 9040501869..5d821ea4db 100644 --- a/libavformat/id3v2enc.c +++ b/libavformat/id3v2enc.c @@ -65,11 +65,11 @@ static void id3v2_encode_string(AVIOContext *pb, const uint8_t *str, static int id3v2_put_ttag(ID3v2EncContext *id3, AVIOContext *avioc, const char *str1, const char *str2, uint32_t tag, enum ID3v2Encoding enc) { - int len; + int len, ret; uint8_t *pb; AVIOContext *dyn_buf; - if (avio_open_dyn_buf(&dyn_buf) < 0) - return AVERROR(ENOMEM); + if ((ret = avio_open_dyn_buf(&dyn_buf)) < 0) + return ret; /* check if the strings are ASCII-only and use UTF16 only if * they're not */ @@ -103,7 +103,7 @@ static int id3v2_put_ttag(ID3v2EncContext *id3, AVIOContext *avioc, const char * */ static int id3v2_put_priv(ID3v2EncContext *id3, AVIOContext *avioc, const char *key, const char *data) { - int len; + int len, ret; uint8_t *pb; AVIOContext *dyn_buf; @@ -111,8 +111,8 @@ static int id3v2_put_priv(ID3v2EncContext *id3, AVIOContext *avioc, const char * return 0; } - if (avio_open_dyn_buf(&dyn_buf) < 0) - return AVERROR(ENOMEM); + if ((ret = avio_open_dyn_buf(&dyn_buf)) < 0) + return ret; // owner + null byte. avio_write(dyn_buf, key, strlen(key) + 1); @@ -268,15 +268,15 @@ static int write_ctoc(AVFormatContext *s, ID3v2EncContext *id3, int enc) if ((ret = avio_open_dyn_buf(&dyn_bc)) < 0) return ret; - id3->len += avio_put_str(dyn_bc, "toc"); + avio_put_str(dyn_bc, "toc"); avio_w8(dyn_bc, 0x03); avio_w8(dyn_bc, s->nb_chapters); for (int i = 0; i < s->nb_chapters; i++) { snprintf(name, 122, "ch%d", i); - id3->len += avio_put_str(dyn_bc, name); + avio_put_str(dyn_bc, name); } len = avio_get_dyn_buf(dyn_bc, &dyn_buf); - id3->len += 16 + ID3v2_HEADER_SIZE; + id3->len += len + ID3v2_HEADER_SIZE; avio_wb32(s->pb, MKBETAG('C', 'T', 'O', 'C')); avio_wb32(s->pb, len); @@ -359,7 +359,7 @@ int ff_id3v2_write_apic(AVFormatContext *s, ID3v2EncContext *id3, AVPacket *pkt) const char *mimetype = NULL, *desc = ""; int enc = id3->version == 3 ? ID3v2_ENCODING_UTF16BOM : ID3v2_ENCODING_UTF8; - int i, len, type = 0; + int i, len, type = 0, ret; /* get the mimetype*/ while (mime->id != AV_CODEC_ID_NONE) { @@ -393,8 +393,8 @@ int ff_id3v2_write_apic(AVFormatContext *s, ID3v2EncContext *id3, AVPacket *pkt) enc = ID3v2_ENCODING_ISO8859; /* start writing */ - if (avio_open_dyn_buf(&dyn_buf) < 0) - return AVERROR(ENOMEM); + if ((ret = avio_open_dyn_buf(&dyn_buf)) < 0) + return ret; avio_w8(dyn_buf, enc); avio_put_str(dyn_buf, mimetype); diff --git a/libavformat/idroqdec.c b/libavformat/idroqdec.c index 1db4cce6f0..16aa2a146e 100644 --- a/libavformat/idroqdec.c +++ b/libavformat/idroqdec.c @@ -205,8 +205,9 @@ static int roq_read_packet(AVFormatContext *s, } /* load up the packet */ - if (av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE)) - return AVERROR(EIO); + ret = av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE); + if (ret < 0) + return ret; /* copy over preamble */ memcpy(pkt->data, preamble, RoQ_CHUNK_PREAMBLE_SIZE); diff --git a/libavformat/idroqenc.c b/libavformat/idroqenc.c index 8122efef83..261f21939c 100644 --- a/libavformat/idroqenc.c +++ b/libavformat/idroqenc.c @@ -55,7 +55,6 @@ static int roq_write_header(struct AVFormatContext *s) } avio_write(s->pb, header, 8); - avio_flush(s->pb); return 0; } diff --git a/libavformat/iff.c b/libavformat/iff.c index 2a3729f97e..9cee31a86b 100644 --- a/libavformat/iff.c +++ b/libavformat/iff.c @@ -525,10 +525,10 @@ static int iff_read_header(AVFormatContext *s) data_size); return AVERROR_INVALIDDATA; } - st->codecpar->extradata_size = data_size + IFF_EXTRA_VIDEO_SIZE; - st->codecpar->extradata = av_malloc(data_size + IFF_EXTRA_VIDEO_SIZE + AV_INPUT_BUFFER_PADDING_SIZE); - if (!st->codecpar->extradata) - return AVERROR(ENOMEM); + res = ff_alloc_extradata(st->codecpar, + data_size + IFF_EXTRA_VIDEO_SIZE); + if (res < 0) + return res; if (avio_read(pb, st->codecpar->extradata + IFF_EXTRA_VIDEO_SIZE, data_size) < 0) { av_freep(&st->codecpar->extradata); st->codecpar->extradata_size = 0; @@ -771,10 +771,9 @@ static int iff_read_header(AVFormatContext *s) iff->transparency = transparency; if (!st->codecpar->extradata) { - st->codecpar->extradata_size = IFF_EXTRA_VIDEO_SIZE; - st->codecpar->extradata = av_malloc(IFF_EXTRA_VIDEO_SIZE + AV_INPUT_BUFFER_PADDING_SIZE); - if (!st->codecpar->extradata) - return AVERROR(ENOMEM); + int ret = ff_alloc_extradata(st->codecpar, IFF_EXTRA_VIDEO_SIZE); + if (ret < 0) + return ret; } av_assert0(st->codecpar->extradata_size >= IFF_EXTRA_VIDEO_SIZE); buf = st->codecpar->extradata; diff --git a/libavformat/ilbc.c b/libavformat/ilbc.c index ba11953b59..01c7112ad1 100644 --- a/libavformat/ilbc.c +++ b/libavformat/ilbc.c @@ -49,7 +49,6 @@ static int ilbc_write_header(AVFormatContext *s) av_log(s, AV_LOG_ERROR, "Unsupported mode\n"); return AVERROR(EINVAL); } - avio_flush(pb); return 0; } diff --git a/libavformat/img2.h b/libavformat/img2.h index 0e5b374a6b..5fd8ff77fc 100644 --- a/libavformat/img2.h +++ b/libavformat/img2.h @@ -61,6 +61,7 @@ typedef struct VideoDemuxData { int start_number_range; int frame_size; int ts_from_file; + int export_path_metadata; /**< enabled when set to 1. */ } VideoDemuxData; typedef struct IdStrMap { diff --git a/libavformat/img2dec.c b/libavformat/img2dec.c index f8b4a655a5..37ee1bb746 100644 --- a/libavformat/img2dec.c +++ b/libavformat/img2dec.c @@ -374,6 +374,33 @@ int ff_img_read_header(AVFormatContext *s1) return 0; } +/** + * Add this frame's source path and basename to packet's sidedata + * as a dictionary, so it can be used by filters like 'drawtext'. + */ +static int add_filename_as_pkt_side_data(char *filename, AVPacket *pkt) { + uint8_t* metadata; + int metadata_len; + AVDictionary *d = NULL; + char *packed_metadata = NULL; + + av_dict_set(&d, "lavf.image2dec.source_path", filename, 0); + av_dict_set(&d, "lavf.image2dec.source_basename", av_basename(filename), 0); + + packed_metadata = av_packet_pack_dictionary(d, &metadata_len); + av_dict_free(&d); + if (!packed_metadata) + return AVERROR(ENOMEM); + if (!(metadata = av_packet_new_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA, metadata_len))) { + av_freep(&packed_metadata); + return AVERROR(ENOMEM); + } + memcpy(metadata, packed_metadata, metadata_len); + av_freep(&packed_metadata); + + return 0; +} + int ff_img_read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoDemuxData *s = s1->priv_data; @@ -486,6 +513,17 @@ int ff_img_read_packet(AVFormatContext *s1, AVPacket *pkt) if (s->is_pipe) pkt->pos = avio_tell(f[0]); + /* + * export_path_metadata must be explicitly enabled via + * command line options for path metadata to be exported + * as packet side_data. + */ + if (!s->is_pipe && s->export_path_metadata == 1) { + res = add_filename_as_pkt_side_data(filename, pkt); + if (res < 0) + goto fail; + } + pkt->size = 0; for (i = 0; i < 3; i++) { if (f[i]) { @@ -585,6 +623,7 @@ const AVOption ff_img_options[] = { { "none", "none", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 2, DEC, "ts_type" }, { "sec", "second precision", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 2, DEC, "ts_type" }, { "ns", "nano second precision", 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 2, DEC, "ts_type" }, + { "export_path_metadata", "enable metadata containing input path information", OFFSET(export_path_metadata), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, DEC }, \ COMMON_OPTIONS }; diff --git a/libavformat/img2enc.c b/libavformat/img2enc.c index bec4bf81dd..5ee99890ce 100644 --- a/libavformat/img2enc.c +++ b/libavformat/img2enc.c @@ -35,7 +35,6 @@ typedef struct VideoMuxData { const AVClass *class; /**< Class for private options. */ int img_number; - int is_pipe; int split_planes; /**< use independent file for each Y, U, V plane */ char path[1024]; char tmp[4][1024]; @@ -55,12 +54,6 @@ static int write_header(AVFormatContext *s) av_strlcpy(img->path, s->url, sizeof(img->path)); - /* find format */ - if (s->oformat->flags & AVFMT_NOFILE) - img->is_pipe = 0; - else - img->is_pipe = 1; - if (st->codecpar->codec_id == AV_CODEC_ID_GIF) { img->muxer = "gif"; } else if (st->codecpar->codec_id == AV_CODEC_ID_FITS) { @@ -78,60 +71,109 @@ static int write_header(AVFormatContext *s) return 0; } +static int write_muxed_file(AVFormatContext *s, AVIOContext *pb, AVPacket *pkt) +{ + VideoMuxData *img = s->priv_data; + AVCodecParameters *par = s->streams[pkt->stream_index]->codecpar; + AVStream *st; + AVPacket pkt2 = {0}; + AVFormatContext *fmt = NULL; + int ret; + + /* URL is not used directly as we are overriding the IO context later. */ + ret = avformat_alloc_output_context2(&fmt, NULL, img->muxer, s->url); + if (ret < 0) + return ret; + st = avformat_new_stream(fmt, NULL); + if (!st) { + avformat_free_context(fmt); + return AVERROR(ENOMEM); + } + st->id = pkt->stream_index; + + fmt->pb = pb; + + ret = av_packet_ref(&pkt2, pkt); + if (ret < 0) + goto out; + pkt2.stream_index = 0; + + if ((ret = avcodec_parameters_copy(st->codecpar, par)) < 0 || + (ret = avformat_write_header(fmt, NULL)) < 0 || + (ret = av_interleaved_write_frame(fmt, &pkt2)) < 0 || + (ret = av_write_trailer(fmt))) {} + +out: + av_packet_unref(&pkt2); + avformat_free_context(fmt); + return ret; +} + +static int write_packet_pipe(AVFormatContext *s, AVPacket *pkt) +{ + VideoMuxData *img = s->priv_data; + if (img->muxer) { + int ret = write_muxed_file(s, s->pb, pkt); + if (ret < 0) + return ret; + } else { + avio_write(s->pb, pkt->data, pkt->size); + } + img->img_number++; + return 0; +} + static int write_packet(AVFormatContext *s, AVPacket *pkt) { VideoMuxData *img = s->priv_data; - AVIOContext *pb[4]; + AVIOContext *pb[4] = {0}; char filename[1024]; AVCodecParameters *par = s->streams[pkt->stream_index]->codecpar; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(par->format); - int i; + int ret, i; int nb_renames = 0; - if (!img->is_pipe) { - if (img->update) { - av_strlcpy(filename, img->path, sizeof(filename)); - } else if (img->use_strftime) { - time_t now0; - struct tm *tm, tmpbuf; - time(&now0); - tm = localtime_r(&now0, &tmpbuf); - if (!strftime(filename, sizeof(filename), img->path, tm)) { - av_log(s, AV_LOG_ERROR, "Could not get frame filename with strftime\n"); - return AVERROR(EINVAL); - } - } else if (img->frame_pts) { - if (av_get_frame_filename2(filename, sizeof(filename), img->path, pkt->pts, AV_FRAME_FILENAME_FLAGS_MULTIPLE) < 0) { - av_log(s, AV_LOG_ERROR, "Cannot write filename by pts of the frames."); - return AVERROR(EINVAL); - } - } else if (av_get_frame_filename2(filename, sizeof(filename), img->path, - img->img_number, - AV_FRAME_FILENAME_FLAGS_MULTIPLE) < 0 && - img->img_number > 1) { - av_log(s, AV_LOG_ERROR, - "Could not get frame filename number %d from pattern '%s'. " - "Use '-frames:v 1' for a single image, or '-update' option, or use a pattern such as %%03d within the filename.\n", - img->img_number, img->path); + if (img->update) { + av_strlcpy(filename, img->path, sizeof(filename)); + } else if (img->use_strftime) { + time_t now0; + struct tm *tm, tmpbuf; + time(&now0); + tm = localtime_r(&now0, &tmpbuf); + if (!strftime(filename, sizeof(filename), img->path, tm)) { + av_log(s, AV_LOG_ERROR, "Could not get frame filename with strftime\n"); return AVERROR(EINVAL); } - for (i = 0; i < 4; i++) { - snprintf(img->tmp[i], sizeof(img->tmp[i]), "%s.tmp", filename); - av_strlcpy(img->target[i], filename, sizeof(img->target[i])); - if (s->io_open(s, &pb[i], img->use_rename ? img->tmp[i] : filename, AVIO_FLAG_WRITE, NULL) < 0) { - av_log(s, AV_LOG_ERROR, "Could not open file : %s\n", img->use_rename ? img->tmp[i] : filename); - return AVERROR(EIO); - } - - if (!img->split_planes || i+1 >= desc->nb_components) - break; - filename[strlen(filename) - 1] = "UVAx"[i]; + } else if (img->frame_pts) { + if (av_get_frame_filename2(filename, sizeof(filename), img->path, pkt->pts, AV_FRAME_FILENAME_FLAGS_MULTIPLE) < 0) { + av_log(s, AV_LOG_ERROR, "Cannot write filename by pts of the frames."); + return AVERROR(EINVAL); } - if (img->use_rename) - nb_renames = i + 1; - } else { - pb[0] = s->pb; + } else if (av_get_frame_filename2(filename, sizeof(filename), img->path, + img->img_number, + AV_FRAME_FILENAME_FLAGS_MULTIPLE) < 0 && + img->img_number > 1) { + av_log(s, AV_LOG_ERROR, + "Could not get frame filename number %d from pattern '%s'. " + "Use '-frames:v 1' for a single image, or '-update' option, or use a pattern such as %%03d within the filename.\n", + img->img_number, img->path); + return AVERROR(EINVAL); } + for (i = 0; i < 4; i++) { + snprintf(img->tmp[i], sizeof(img->tmp[i]), "%s.tmp", filename); + av_strlcpy(img->target[i], filename, sizeof(img->target[i])); + if (s->io_open(s, &pb[i], img->use_rename ? img->tmp[i] : filename, AVIO_FLAG_WRITE, NULL) < 0) { + av_log(s, AV_LOG_ERROR, "Could not open file : %s\n", img->use_rename ? img->tmp[i] : filename); + ret = AVERROR(EIO); + goto fail; + } + + if (!img->split_planes || i+1 >= desc->nb_components) + break; + filename[strlen(filename) - 1] = "UVAx"[i]; + } + if (img->use_rename) + nb_renames = i + 1; if (img->split_planes) { int ysize = par->width * par->height; @@ -150,50 +192,28 @@ static int write_packet(AVFormatContext *s, AVPacket *pkt) ff_format_io_close(s, &pb[3]); } } else if (img->muxer) { - int ret; - AVStream *st; - AVPacket pkt2 = {0}; - AVFormatContext *fmt = NULL; - - av_assert0(!img->split_planes); - - ret = avformat_alloc_output_context2(&fmt, NULL, img->muxer, s->url); + ret = write_muxed_file(s, pb[0], pkt); if (ret < 0) - return ret; - st = avformat_new_stream(fmt, NULL); - if (!st) { - avformat_free_context(fmt); - return AVERROR(ENOMEM); - } - st->id = pkt->stream_index; - - fmt->pb = pb[0]; - if ((ret = av_packet_ref(&pkt2, pkt)) < 0 || - (ret = avcodec_parameters_copy(st->codecpar, s->streams[0]->codecpar)) < 0 || - (ret = avformat_write_header(fmt, NULL)) < 0 || - (ret = av_interleaved_write_frame(fmt, &pkt2)) < 0 || - (ret = av_write_trailer(fmt)) < 0) { - av_packet_unref(&pkt2); - avformat_free_context(fmt); - return ret; - } - av_packet_unref(&pkt2); - avformat_free_context(fmt); + goto fail; } else { avio_write(pb[0], pkt->data, pkt->size); } avio_flush(pb[0]); - if (!img->is_pipe) { - ff_format_io_close(s, &pb[0]); - for (i = 0; i < nb_renames; i++) { - int ret = ff_rename(img->tmp[i], img->target[i], s); - if (ret < 0) - return ret; - } + ff_format_io_close(s, &pb[0]); + for (i = 0; i < nb_renames; i++) { + int ret = ff_rename(img->tmp[i], img->target[i], s); + if (ret < 0) + return ret; } img->img_number++; return 0; + +fail: + for (i = 0; i < FF_ARRAY_ELEMS(pb); i++) + if (pb[i]) + ff_format_io_close(s, &pb[i]); + return ret; } static int query_codec(enum AVCodecID id, int std_compliance) @@ -248,7 +268,7 @@ AVOutputFormat ff_image2pipe_muxer = { .priv_data_size = sizeof(VideoMuxData), .video_codec = AV_CODEC_ID_MJPEG, .write_header = write_header, - .write_packet = write_packet, + .write_packet = write_packet_pipe, .query_codec = query_codec, .flags = AVFMT_NOTIMESTAMPS | AVFMT_NODIMENSIONS }; diff --git a/libavformat/internal.h b/libavformat/internal.h index ec9a29907a..4d04a21871 100644 --- a/libavformat/internal.h +++ b/libavformat/internal.h @@ -243,7 +243,7 @@ void ff_read_frame_flush(AVFormatContext *s); #define NTP_OFFSET_US (NTP_OFFSET * 1000000ULL) /** Get the current time since NTP epoch in microseconds. */ -uint64_t ff_ntp_time(void); +uint64_t ff_ntp_time(int64_t timestamp); /** * Get the NTP time stamp formatted as per the RFC-5905. diff --git a/libavformat/isom.c b/libavformat/isom.c index edd0d81063..824e811177 100644 --- a/libavformat/isom.c +++ b/libavformat/isom.c @@ -371,6 +371,7 @@ const AVCodecTag ff_codec_movaudio_tags[] = { { AV_CODEC_ID_FLAC, MKTAG('f', 'L', 'a', 'C') }, /* nonstandard */ { AV_CODEC_ID_TRUEHD, MKTAG('m', 'l', 'p', 'a') }, /* mp4ra.org */ { AV_CODEC_ID_OPUS, MKTAG('O', 'p', 'u', 's') }, /* mp4ra.org */ + { AV_CODEC_ID_MPEGH_3D_AUDIO, MKTAG('m', 'h', 'm', '1') }, /* MPEG-H 3D Audio bitstream */ { AV_CODEC_ID_NONE, 0 }, }; diff --git a/libavformat/jacosubenc.c b/libavformat/jacosubenc.c index 0954f5f058..77575c6b3c 100644 --- a/libavformat/jacosubenc.c +++ b/libavformat/jacosubenc.c @@ -25,7 +25,6 @@ static int jacosub_write_header(AVFormatContext *s) if (par->extradata_size) { avio_write(s->pb, par->extradata, par->extradata_size - 1); - avio_flush(s->pb); } return 0; } diff --git a/libavformat/jvdec.c b/libavformat/jvdec.c index 17ada7b0f1..551f8069e6 100644 --- a/libavformat/jvdec.c +++ b/libavformat/jvdec.c @@ -168,6 +168,7 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) JVDemuxContext *jv = s->priv_data; AVIOContext *pb = s->pb; AVStream *ast = s->streams[0]; + int ret; while (!avio_feof(s->pb) && jv->pts < ast->nb_index_entries) { const AVIndexEntry *e = ast->index_entries + jv->pts; @@ -177,8 +178,8 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) case JV_AUDIO: jv->state++; if (jvf->audio_size) { - if (av_get_packet(s->pb, pkt, jvf->audio_size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_get_packet(s->pb, pkt, jvf->audio_size)) < 0) + return ret; pkt->stream_index = 0; pkt->pts = e->timestamp; pkt->flags |= AV_PKT_FLAG_KEY; @@ -187,10 +188,9 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) case JV_VIDEO: jv->state++; if (jvf->video_size || jvf->palette_size) { - int ret; int size = jvf->video_size + jvf->palette_size; - if (av_new_packet(pkt, size + JV_PREAMBLE_SIZE)) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, size + JV_PREAMBLE_SIZE)) < 0) + return ret; AV_WL32(pkt->data, jvf->video_size); pkt->data[4] = jvf->video_type; diff --git a/libavformat/libmodplug.c b/libavformat/libmodplug.c index d4f78d99b1..6a32618e6f 100644 --- a/libavformat/libmodplug.c +++ b/libavformat/libmodplug.c @@ -270,6 +270,7 @@ static void write_text(uint8_t *dst, const char *s, int linesize, int x, int y) static int modplug_read_packet(AVFormatContext *s, AVPacket *pkt) { ModPlugContext *modplug = s->priv_data; + int ret; if (modplug->video_stream) { modplug->video_switch ^= 1; // one video packet for one audio packet @@ -285,8 +286,8 @@ static int modplug_read_packet(AVFormatContext *s, AVPacket *pkt) var_values[VAR_PATTERN] = ModPlug_GetCurrentPattern(modplug->f); var_values[VAR_ROW ] = ModPlug_GetCurrentRow (modplug->f); - if (av_new_packet(pkt, modplug->fsize) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, modplug->fsize)) < 0) + return ret; pkt->stream_index = 1; memset(pkt->data, 0, modplug->fsize); @@ -318,8 +319,8 @@ static int modplug_read_packet(AVFormatContext *s, AVPacket *pkt) } } - if (av_new_packet(pkt, AUDIO_PKT_SIZE) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, AUDIO_PKT_SIZE)) < 0) + return ret; if (modplug->video_stream) pkt->pts = pkt->dts = modplug->packet_count++ * modplug->ts_per_packet; diff --git a/libavformat/libsrt.c b/libavformat/libsrt.c index 2fdfe8e203..16975b6d94 100644 --- a/libavformat/libsrt.c +++ b/libavformat/libsrt.c @@ -62,9 +62,11 @@ typedef struct SRTContext { int64_t maxbw; int pbkeylen; char *passphrase; +#if SRT_VERSION_VALUE >= 0x010302 int enforced_encryption; int kmrefreshrate; int kmpreannounce; +#endif int mss; int ffs; int ipttl; @@ -105,9 +107,11 @@ static const AVOption libsrt_options[] = { { "maxbw", "Maximum bandwidth (bytes per second) that the connection can use", OFFSET(maxbw), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, .flags = D|E }, { "pbkeylen", "Crypto key len in bytes {16,24,32} Default: 16 (128-bit)", OFFSET(pbkeylen), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 32, .flags = D|E }, { "passphrase", "Crypto PBKDF2 Passphrase size[0,10..64] 0:disable crypto", OFFSET(passphrase), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = D|E }, - { "enforced_encryption", "Enforces that both connection parties have the same passphrase set ", OFFSET(enforced_encryption), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, .flags = D|E }, +#if SRT_VERSION_VALUE >= 0x010302 + { "enforced_encryption", "Enforces that both connection parties have the same passphrase set", OFFSET(enforced_encryption), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, .flags = D|E }, { "kmrefreshrate", "The number of packets to be transmitted after which the encryption key is switched to a new key", OFFSET(kmrefreshrate), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, .flags = D|E }, { "kmpreannounce", "The interval between when a new encryption key is sent and when switchover occurs", OFFSET(kmpreannounce), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, .flags = D|E }, +#endif { "mss", "The Maximum Segment Size", OFFSET(mss), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1500, .flags = D|E }, { "ffs", "Flight flag size (window size) (in bytes)", OFFSET(ffs), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, .flags = D|E }, { "ipttl", "IP Time To Live", OFFSET(ipttl), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 255, .flags = D|E }, @@ -327,12 +331,15 @@ static int libsrt_set_options_pre(URLContext *h, int fd) (s->maxbw >= 0 && libsrt_setsockopt(h, fd, SRTO_MAXBW, "SRTO_MAXBW", &s->maxbw, sizeof(s->maxbw)) < 0) || (s->pbkeylen >= 0 && libsrt_setsockopt(h, fd, SRTO_PBKEYLEN, "SRTO_PBKEYLEN", &s->pbkeylen, sizeof(s->pbkeylen)) < 0) || (s->passphrase && libsrt_setsockopt(h, fd, SRTO_PASSPHRASE, "SRTO_PASSPHRASE", s->passphrase, strlen(s->passphrase)) < 0) || - (s->enforced_encryption >= 0 && libsrt_setsockopt(h, fd, SRTO_ENFORCEDENCRYPTION, "SRTO_ENFORCEDENCRYPTION", &s->enforced_encryption, sizeof(s->enforced_encryption)) < 0) || +#if SRT_VERSION_VALUE >= 0x010302 + /* SRTO_STRICTENC == SRTO_ENFORCEDENCRYPTION (53), but for compatibility, we used SRTO_STRICTENC */ + (s->enforced_encryption >= 0 && libsrt_setsockopt(h, fd, SRTO_STRICTENC, "SRTO_STRICTENC", &s->enforced_encryption, sizeof(s->enforced_encryption)) < 0) || (s->kmrefreshrate >= 0 && libsrt_setsockopt(h, fd, SRTO_KMREFRESHRATE, "SRTO_KMREFRESHRATE", &s->kmrefreshrate, sizeof(s->kmrefreshrate)) < 0) || (s->kmpreannounce >= 0 && libsrt_setsockopt(h, fd, SRTO_KMPREANNOUNCE, "SRTO_KMPREANNOUNCE", &s->kmpreannounce, sizeof(s->kmpreannounce)) < 0) || - (s->mss >= 0 && libsrt_setsockopt(h, fd, SRTO_MSS, "SRTO_MMS", &s->mss, sizeof(s->mss)) < 0) || +#endif + (s->mss >= 0 && libsrt_setsockopt(h, fd, SRTO_MSS, "SRTO_MSS", &s->mss, sizeof(s->mss)) < 0) || (s->ffs >= 0 && libsrt_setsockopt(h, fd, SRTO_FC, "SRTO_FC", &s->ffs, sizeof(s->ffs)) < 0) || - (s->ipttl >= 0 && libsrt_setsockopt(h, fd, SRTO_IPTTL, "SRTO_UPTTL", &s->ipttl, sizeof(s->ipttl)) < 0) || + (s->ipttl >= 0 && libsrt_setsockopt(h, fd, SRTO_IPTTL, "SRTO_IPTTL", &s->ipttl, sizeof(s->ipttl)) < 0) || (s->iptos >= 0 && libsrt_setsockopt(h, fd, SRTO_IPTOS, "SRTO_IPTOS", &s->iptos, sizeof(s->iptos)) < 0) || (s->latency >= 0 && libsrt_setsockopt(h, fd, SRTO_LATENCY, "SRTO_LATENCY", &latency, sizeof(latency)) < 0) || (s->rcvlatency >= 0 && libsrt_setsockopt(h, fd, SRTO_RCVLATENCY, "SRTO_RCVLATENCY", &rcvlatency, sizeof(rcvlatency)) < 0) || diff --git a/libavformat/matroskadec.c b/libavformat/matroskadec.c index 72624dc3f1..4d7fdab99f 100644 --- a/libavformat/matroskadec.c +++ b/libavformat/matroskadec.c @@ -1599,6 +1599,7 @@ static int matroska_decode_buffer(uint8_t **buf, int *buf_size, #if CONFIG_LZO case MATROSKA_TRACK_ENCODING_COMP_LZO: do { + int insize = isize; olen = pkt_size *= 3; newpktdata = av_realloc(pkt_data, pkt_size + AV_LZO_OUTPUT_PADDING + AV_INPUT_BUFFER_PADDING_SIZE); @@ -1607,7 +1608,7 @@ static int matroska_decode_buffer(uint8_t **buf, int *buf_size, goto failed; } pkt_data = newpktdata; - result = av_lzo1x_decode(pkt_data, &olen, data, &isize); + result = av_lzo1x_decode(pkt_data, &olen, data, &insize); } while (result == AV_LZO_OUTPUT_FULL && pkt_size < 10000000); if (result) { result = AVERROR_INVALIDDATA; @@ -2402,8 +2403,8 @@ static int matroska_parse_tracks(AVFormatContext *s) if (key_id_base64) { /* export encryption key id as base64 metadata tag */ - av_dict_set(&st->metadata, "enc_key_id", key_id_base64, 0); - av_freep(&key_id_base64); + av_dict_set(&st->metadata, "enc_key_id", key_id_base64, + AV_DICT_DONT_STRDUP_VAL); } if (!strcmp(track->codec_id, "V_MS/VFW/FOURCC") && @@ -4110,8 +4111,8 @@ static int webm_dash_manifest_cues(AVFormatContext *s, int64_t init_range) } end += ret; } - av_dict_set(&s->streams[0]->metadata, CUE_TIMESTAMPS, buf, 0); - av_free(buf); + av_dict_set(&s->streams[0]->metadata, CUE_TIMESTAMPS, + buf, AV_DICT_DONT_STRDUP_VAL); return 0; } @@ -4136,8 +4137,8 @@ static int webm_dash_manifest_read_header(AVFormatContext *s) if (!matroska->is_live) { buf = av_asprintf("%g", matroska->duration); if (!buf) return AVERROR(ENOMEM); - av_dict_set(&s->streams[0]->metadata, DURATION, buf, 0); - av_free(buf); + av_dict_set(&s->streams[0]->metadata, DURATION, + buf, AV_DICT_DONT_STRDUP_VAL); // initialization range // 5 is the offset of Cluster ID. diff --git a/libavformat/matroskaenc.c b/libavformat/matroskaenc.c index 570f361351..953421435d 100644 --- a/libavformat/matroskaenc.c +++ b/libavformat/matroskaenc.c @@ -218,7 +218,7 @@ static int ebml_num_size(uint64_t num) * Write a number in EBML variable length format. * * @param bytes The number of bytes that need to be used to write the number. - * If zero, any number of bytes can be used. + * If zero, the minimal number of bytes will be used. */ static void put_ebml_num(AVIOContext *pb, uint64_t num, int bytes) { @@ -228,10 +228,9 @@ static void put_ebml_num(AVIOContext *pb, uint64_t num, int bytes) av_assert0(num < (1ULL << 56) - 1); if (bytes == 0) - // don't care how many bytes are used, so use the min bytes = needed_bytes; - // the bytes needed to write the given size would exceed the bytes - // that we need to use, so write unknown size. This shouldn't happen. + // The bytes needed to write the given size must not exceed + // the bytes that we ought to use. av_assert0(bytes >= needed_bytes); num |= 1ULL << bytes * 7; @@ -749,9 +748,8 @@ static int mkv_write_native_codecprivate(AVFormatContext *s, AVIOContext *pb, return ff_isom_write_avcc(dyn_cp, par->extradata, par->extradata_size); case AV_CODEC_ID_HEVC: - ff_isom_write_hvcc(dyn_cp, par->extradata, - par->extradata_size, 0); - return 0; + return ff_isom_write_hvcc(dyn_cp, par->extradata, + par->extradata_size, 0); case AV_CODEC_ID_AV1: if (par->extradata_size) return ff_isom_write_av1c(dyn_cp, par->extradata, @@ -2002,8 +2000,6 @@ static int mkv_write_header(AVFormatContext *s) mkv->cur_audio_pkt.size = 0; mkv->cluster_pos = -1; - avio_flush(pb); - // start a new cluster every 5 MB or 5 sec, or 32k / 1 sec for streaming or // after 4k and on a keyframe if (pb->seekable & AVIO_SEEKABLE_NORMAL) { @@ -2167,9 +2163,9 @@ static void mkv_write_block(AVFormatContext *s, AVIOContext *pb, av_free(data); if (blockid == MATROSKA_ID_BLOCK && !keyframe) { - put_ebml_sint(pb, MATROSKA_ID_BLOCKREFERENCE, track->last_timestamp); + put_ebml_sint(pb, MATROSKA_ID_BLOCKREFERENCE, track->last_timestamp - ts); } - track->last_timestamp = ts - mkv->cluster_pts; + track->last_timestamp = ts; if (discard_padding) { put_ebml_sint(pb, MATROSKA_ID_DISCARDPADDING, discard_padding); @@ -2239,7 +2235,7 @@ static void mkv_end_cluster(AVFormatContext *s) end_ebml_master_crc32(s->pb, &mkv->cluster_bc, mkv); mkv->cluster_pos = -1; - avio_flush(s->pb); + avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT); } static int mkv_check_new_extra_data(AVFormatContext *s, AVPacket *pkt) @@ -2264,7 +2260,6 @@ static int mkv_check_new_extra_data(AVFormatContext *s, AVPacket *pkt) return ret; if (!output_sample_rate) output_sample_rate = track->sample_rate; // Space is already reserved, so it's this or a void element. - av_freep(&par->extradata); ret = ff_alloc_extradata(par, side_data_size); if (ret < 0) return ret; diff --git a/libavformat/microdvddec.c b/libavformat/microdvddec.c index ca9086afe9..08e6fca09c 100644 --- a/libavformat/microdvddec.c +++ b/libavformat/microdvddec.c @@ -117,10 +117,11 @@ static int microdvd_read_header(AVFormatContext *s) continue; } if (!st->codecpar->extradata && sscanf(line, "{DEFAULT}{}%c", &c) == 1) { - st->codecpar->extradata = av_strdup(line + 11); - if (!st->codecpar->extradata) - return AVERROR(ENOMEM); - st->codecpar->extradata_size = strlen(st->codecpar->extradata) + 1; + int ret, size = strlen(line + 11); + ret = ff_alloc_extradata(st->codecpar, size); + if (ret < 0) + return ret; + memcpy(st->codecpar->extradata, line + 11, size); continue; } } diff --git a/libavformat/microdvdenc.c b/libavformat/microdvdenc.c index 04f475b645..1cd215d8de 100644 --- a/libavformat/microdvdenc.c +++ b/libavformat/microdvdenc.c @@ -36,7 +36,7 @@ static int microdvd_write_header(struct AVFormatContext *s) if (par->extradata && par->extradata_size > 0) { avio_write(s->pb, "{DEFAULT}{}", 11); avio_write(s->pb, par->extradata, par->extradata_size); - avio_flush(s->pb); + avio_w8(s->pb, '\n'); } avpriv_set_pts_info(s->streams[0], 64, framerate.num, framerate.den); @@ -51,7 +51,7 @@ static int microdvd_write_packet(AVFormatContext *avf, AVPacket *pkt) else avio_printf(avf->pb, "{%"PRId64"}", pkt->pts + pkt->duration); avio_write(avf->pb, pkt->data, pkt->size); - avio_write(avf->pb, "\n", 1); + avio_w8(avf->pb, '\n'); return 0; } diff --git a/libavformat/mm.c b/libavformat/mm.c index 83b3c200c6..d40fd12acc 100644 --- a/libavformat/mm.c +++ b/libavformat/mm.c @@ -142,6 +142,7 @@ static int read_packet(AVFormatContext *s, AVIOContext *pb = s->pb; unsigned char preamble[MM_PREAMBLE_SIZE]; unsigned int type, length; + int ret; while(1) { @@ -161,8 +162,8 @@ static int read_packet(AVFormatContext *s, case MM_TYPE_INTRA_HHV : case MM_TYPE_INTER_HHV : /* output preamble + data */ - if (av_new_packet(pkt, length + MM_PREAMBLE_SIZE)) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, length + MM_PREAMBLE_SIZE)) < 0) + return ret; memcpy(pkt->data, preamble, MM_PREAMBLE_SIZE); if (avio_read(pb, pkt->data + MM_PREAMBLE_SIZE, length) != length) return AVERROR(EIO); @@ -174,8 +175,8 @@ static int read_packet(AVFormatContext *s, return 0; case MM_TYPE_AUDIO : - if (av_get_packet(s->pb, pkt, length)<0) - return AVERROR(ENOMEM); + if ((ret = av_get_packet(s->pb, pkt, length)) < 0) + return ret; pkt->stream_index = 1; pkt->pts = mm->audio_pts++; return 0; diff --git a/libavformat/mmf.c b/libavformat/mmf.c index 917113066a..e4768db064 100644 --- a/libavformat/mmf.c +++ b/libavformat/mmf.c @@ -123,8 +123,6 @@ static int mmf_write_header(AVFormatContext *s) avpriv_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codecpar->sample_rate); - avio_flush(pb); - return 0; } @@ -173,8 +171,6 @@ static int mmf_write_trailer(AVFormatContext *s) avio_write(pb, "\x00\x00\x00\x00", 4); avio_seek(pb, pos, SEEK_SET); - - avio_flush(pb); } return 0; } diff --git a/libavformat/mov.c b/libavformat/mov.c index 7553a7fdfc..589576b529 100644 --- a/libavformat/mov.c +++ b/libavformat/mov.c @@ -1129,8 +1129,8 @@ static int mov_read_ftyp(MOVContext *c, AVIOContext *pb, MOVAtom atom) return ret; } comp_brands_str[comp_brand_size] = 0; - av_dict_set(&c->fc->metadata, "compatible_brands", comp_brands_str, 0); - av_freep(&comp_brands_str); + av_dict_set(&c->fc->metadata, "compatible_brands", + comp_brands_str, AV_DICT_DONT_STRDUP_VAL); return 0; } @@ -1838,7 +1838,6 @@ static int mov_read_wave(MOVContext *c, AVIOContext *pb, MOVAtom atom) st->codecpar->codec_id == AV_CODEC_ID_QDMC || st->codecpar->codec_id == AV_CODEC_ID_SPEEX) { // pass all frma atom to codec, needed at least for QDMC and QDM2 - av_freep(&st->codecpar->extradata); ret = ff_get_extradata(c->fc, st->codecpar, pb, atom.size); if (ret < 0) return ret; @@ -1905,7 +1904,6 @@ static int mov_read_glbl(MOVContext *c, AVIOContext *pb, MOVAtom atom) av_log(c->fc, AV_LOG_WARNING, "ignoring multiple glbl\n"); return 0; } - av_freep(&st->codecpar->extradata); ret = ff_get_extradata(c->fc, st->codecpar, pb, atom.size); if (ret < 0) return ret; @@ -1938,7 +1936,6 @@ static int mov_read_dvc1(MOVContext *c, AVIOContext *pb, MOVAtom atom) return 0; avio_seek(pb, 6, SEEK_CUR); - av_freep(&st->codecpar->extradata); ret = ff_get_extradata(c->fc, st->codecpar, pb, atom.size - 7); if (ret < 0) return ret; @@ -1966,7 +1963,6 @@ static int mov_read_strf(MOVContext *c, AVIOContext *pb, MOVAtom atom) return AVERROR_INVALIDDATA; avio_skip(pb, 40); - av_freep(&st->codecpar->extradata); ret = ff_get_extradata(c->fc, st->codecpar, pb, atom.size - 40); if (ret < 0) return ret; @@ -2255,7 +2251,7 @@ static int mov_rewrite_dvd_sub_extradata(AVStream *st) { char buf[256] = {0}; uint8_t *src = st->codecpar->extradata; - int i; + int i, ret; if (st->codecpar->extradata_size != 64) return 0; @@ -2275,12 +2271,9 @@ static int mov_rewrite_dvd_sub_extradata(AVStream *st) if (av_strlcat(buf, "\n", sizeof(buf)) >= sizeof(buf)) return 0; - av_freep(&st->codecpar->extradata); - st->codecpar->extradata_size = 0; - st->codecpar->extradata = av_mallocz(strlen(buf) + AV_INPUT_BUFFER_PADDING_SIZE); - if (!st->codecpar->extradata) - return AVERROR(ENOMEM); - st->codecpar->extradata_size = strlen(buf); + ret = ff_alloc_extradata(st->codecpar, strlen(buf)); + if (ret < 0) + return ret; memcpy(st->codecpar->extradata, buf, st->codecpar->extradata_size); return 0; @@ -5801,8 +5794,8 @@ static int mov_read_uuid(MOVContext *c, AVIOContext *pb, MOVAtom atom) return AVERROR_INVALIDDATA; } buffer[len] = '\0'; - av_dict_set(&c->fc->metadata, "xmp", buffer, 0); - av_free(buffer); + av_dict_set(&c->fc->metadata, "xmp", + buffer, AV_DICT_DONT_STRDUP_VAL); } else { // skip all uuid atom, which makes it fast for long uuid-xmp file ret = avio_skip(pb, len); @@ -6676,6 +6669,7 @@ static int cenc_filter(MOVContext *mov, AVStream* st, MOVStreamContext *sc, AVPa static int mov_read_dops(MOVContext *c, AVIOContext *pb, MOVAtom atom) { const int OPUS_SEEK_PREROLL_MS = 80; + int ret; AVStream *st; size_t size; uint16_t pre_skip; @@ -6696,8 +6690,8 @@ static int mov_read_dops(MOVContext *c, AVIOContext *pb, MOVAtom atom) /* OpusSpecificBox size plus magic for Ogg OpusHead header. */ size = atom.size + 8; - if (ff_alloc_extradata(st->codecpar, size)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, size)) < 0) + return ret; AV_WL32(st->codecpar->extradata, MKTAG('O','p','u','s')); AV_WL32(st->codecpar->extradata + 4, MKTAG('H','e','a','d')); diff --git a/libavformat/movenc.c b/libavformat/movenc.c index dd144ae20a..a2cfc59b89 100644 --- a/libavformat/movenc.c +++ b/libavformat/movenc.c @@ -72,6 +72,7 @@ static const AVOption options[] = { { "disable_chpl", "Disable Nero chapter atom", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_DISABLE_CHPL}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, { "default_base_moof", "Set the default-base-is-moof flag in tfhd atoms", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_DEFAULT_BASE_MOOF}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, { "dash", "Write DASH compatible fragmented MP4", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_DASH}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, + { "cmaf", "Write CMAF compatible fragmented MP4", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_CMAF}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, { "frag_discont", "Signal that the next fragment is discontinuous from earlier ones", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_FRAG_DISCONT}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, { "delay_moov", "Delay writing the initial moov until the first fragment is cut, or until the first fragment flush", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_DELAY_MOOV}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, { "global_sidx", "Write a global sidx index at the start of the file", 0, AV_OPT_TYPE_CONST, {.i64 = FF_MOV_FLAG_GLOBAL_SIDX}, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM, "movflags" }, @@ -2493,6 +2494,7 @@ static int mov_write_stbl_tag(AVFormatContext *s, AVIOContext *pb, MOVMuxContext mov_write_stts_tag(pb, track); if ((track->par->codec_type == AVMEDIA_TYPE_VIDEO || track->par->codec_id == AV_CODEC_ID_TRUEHD || + track->par->codec_id == AV_CODEC_ID_MPEGH_3D_AUDIO || track->par->codec_tag == MKTAG('r','t','p',' ')) && track->has_keyframes && track->has_keyframes < track->entry) mov_write_stss_tag(pb, track, MOV_SYNC_SAMPLE); @@ -2760,10 +2762,28 @@ static int mov_write_minf_tag(AVFormatContext *s, AVIOContext *pb, MOVMuxContext return update_size(pb, pos); } +static int64_t calc_pts_duration(MOVMuxContext *mov, MOVTrack *track) +{ + if (track->tag == MKTAG('t','m','c','d')) { + // tmcd tracks gets track_duration set in mov_write_moov_tag from + // another track's duration, while the end_pts may be left at zero. + // Calculate the pts duration for that track instead. + return av_rescale(calc_pts_duration(mov, &mov->tracks[track->src_track]), + track->timescale, mov->tracks[track->src_track].timescale); + } + if (track->end_pts != AV_NOPTS_VALUE && + track->start_dts != AV_NOPTS_VALUE && + track->start_cts != AV_NOPTS_VALUE) { + return track->end_pts - (track->start_dts + track->start_cts); + } + return track->track_duration; +} + static int mov_write_mdhd_tag(AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track) { - int version = track->track_duration < INT32_MAX ? 0 : 1; + int64_t duration = calc_pts_duration(mov, track); + int version = duration < INT32_MAX ? 0 : 1; if (track->mode == MODE_ISM) version = 1; @@ -2785,7 +2805,7 @@ static int mov_write_mdhd_tag(AVIOContext *pb, MOVMuxContext *mov, else if (!track->entry) (version == 1) ? avio_wb64(pb, 0) : avio_wb32(pb, 0); else - (version == 1) ? avio_wb64(pb, track->track_duration) : avio_wb32(pb, track->track_duration); /* duration */ + (version == 1) ? avio_wb64(pb, duration) : avio_wb32(pb, duration); /* duration */ avio_wb16(pb, track->language); /* language */ avio_wb16(pb, 0); /* reserved (quality) */ @@ -2835,8 +2855,9 @@ static void write_matrix(AVIOContext *pb, int16_t a, int16_t b, int16_t c, static int mov_write_tkhd_tag(AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track, AVStream *st) { - int64_t duration = av_rescale_rnd(track->track_duration, MOV_TIMESCALE, - track->timescale, AV_ROUND_UP); + int64_t duration = av_rescale_rnd(calc_pts_duration(mov, track), + MOV_TIMESCALE, track->timescale, + AV_ROUND_UP); int version = duration < INT32_MAX ? 0 : 1; int flags = MOV_TKHD_FLAG_IN_MOVIE; int rotation = 0; @@ -2982,8 +3003,9 @@ static int mov_write_tapt_tag(AVIOContext *pb, MOVTrack *track) static int mov_write_edts_tag(AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track) { - int64_t duration = av_rescale_rnd(track->track_duration, MOV_TIMESCALE, - track->timescale, AV_ROUND_UP); + int64_t duration = av_rescale_rnd(calc_pts_duration(mov, track), + MOV_TIMESCALE, track->timescale, + AV_ROUND_UP); int version = duration < INT32_MAX ? 0 : 1; int entry_size, entry_count, size; int64_t delay, start_ct = track->start_cts; @@ -3269,7 +3291,8 @@ static int mov_write_mvhd_tag(AVIOContext *pb, MOVMuxContext *mov) for (i = 0; i < mov->nb_streams; i++) { if (mov->tracks[i].entry > 0 && mov->tracks[i].timescale) { - int64_t max_track_len_temp = av_rescale_rnd(mov->tracks[i].track_duration, + int64_t max_track_len_temp = av_rescale_rnd( + calc_pts_duration(mov, &mov->tracks[i]), MOV_TIMESCALE, mov->tracks[i].timescale, AV_ROUND_UP); @@ -4209,6 +4232,9 @@ static int mov_write_tfhd_tag(AVIOContext *pb, MOVMuxContext *mov, flags &= ~MOV_TFHD_BASE_DATA_OFFSET; flags |= MOV_TFHD_DEFAULT_BASE_IS_MOOF; } + /* CMAF requires all values to be explicit in tfhd atoms */ + if (mov->flags & FF_MOV_FLAG_CMAF) + flags |= MOV_TFHD_STSD_ID; /* Don't set a default sample size, the silverlight player refuses * to play files with that set. Don't set a default sample duration, @@ -4216,7 +4242,7 @@ static int mov_write_tfhd_tag(AVIOContext *pb, MOVMuxContext *mov, * file format says it MUST NOT be set. */ if (track->mode == MODE_ISM) flags &= ~(MOV_TFHD_DEFAULT_SIZE | MOV_TFHD_DEFAULT_DURATION | - MOV_TFHD_BASE_DATA_OFFSET); + MOV_TFHD_BASE_DATA_OFFSET | MOV_TFHD_STSD_ID); avio_wb32(pb, 0); /* size placeholder */ ffio_wfourcc(pb, "tfhd"); @@ -4226,6 +4252,9 @@ static int mov_write_tfhd_tag(AVIOContext *pb, MOVMuxContext *mov, avio_wb32(pb, track->track_id); /* track-id */ if (flags & MOV_TFHD_BASE_DATA_OFFSET) avio_wb64(pb, moof_offset); + if (flags & MOV_TFHD_STSD_ID) { + avio_wb32(pb, 1); + } if (flags & MOV_TFHD_DEFAULT_DURATION) { track->default_duration = get_cluster_duration(track, 0); avio_wb32(pb, track->default_duration); @@ -4608,6 +4637,7 @@ static int mov_write_prft_tag(AVIOContext *pb, MOVMuxContext *mov, int tracks) { int64_t pos = avio_tell(pb), pts_us, ntp_ts; MOVTrack *first_track; + int flags = 24; /* PRFT should be associated with at most one track. So, choosing only the * first track. */ @@ -4626,7 +4656,11 @@ static int mov_write_prft_tag(AVIOContext *pb, MOVMuxContext *mov, int tracks) } if (mov->write_prft == MOV_PRFT_SRC_WALLCLOCK) { - ntp_ts = ff_get_formatted_ntp_time(ff_ntp_time()); + if (first_track->cluster[0].prft.wallclock) { + ntp_ts = ff_get_formatted_ntp_time(ff_ntp_time(first_track->cluster[0].prft.wallclock)); + flags = first_track->cluster[0].prft.flags; + } else + ntp_ts = ff_get_formatted_ntp_time(ff_ntp_time(av_gettime())); } else if (mov->write_prft == MOV_PRFT_SRC_PTS) { pts_us = av_rescale_q(first_track->cluster[0].pts, first_track->st->time_base, AV_TIME_BASE_Q); @@ -4640,7 +4674,7 @@ static int mov_write_prft_tag(AVIOContext *pb, MOVMuxContext *mov, int tracks) avio_wb32(pb, 0); // Size place holder ffio_wfourcc(pb, "prft"); // Type avio_w8(pb, 1); // Version - avio_wb24(pb, 0); // Flags + avio_wb24(pb, flags); // Flags avio_wb32(pb, first_track->track_id); // reference track ID avio_wb64(pb, ntp_ts); // NTP time stamp avio_wb64(pb, first_track->cluster[0].pts); //media time @@ -4740,27 +4774,11 @@ static int mov_write_mdat_tag(AVIOContext *pb, MOVMuxContext *mov) return 0; } -/* TODO: This needs to be more general */ -static int mov_write_ftyp_tag(AVIOContext *pb, AVFormatContext *s) +static void mov_write_ftyp_tag_internal(AVIOContext *pb, AVFormatContext *s, + int has_h264, int has_video, int write_minor) { MOVMuxContext *mov = s->priv_data; - int64_t pos = avio_tell(pb); - int has_h264 = 0, has_video = 0; int minor = 0x200; - int i; - - for (i = 0; i < s->nb_streams; i++) { - AVStream *st = s->streams[i]; - if (is_cover_image(st)) - continue; - if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) - has_video = 1; - if (st->codecpar->codec_id == AV_CODEC_ID_H264) - has_h264 = 1; - } - - avio_wb32(pb, 0); /* size */ - ffio_wfourcc(pb, "ftyp"); if (mov->major_brand && strlen(mov->major_brand) >= 4) ffio_wfourcc(pb, mov->major_brand); @@ -4772,6 +4790,9 @@ static int mov_write_ftyp_tag(AVIOContext *pb, AVFormatContext *s) minor = has_h264 ? 0x20000 : 0x10000; } else if (mov->mode == MODE_PSP) ffio_wfourcc(pb, "MSNV"); + else if (mov->mode == MODE_MP4 && mov->flags & FF_MOV_FLAG_FRAGMENT && + mov->flags & FF_MOV_FLAG_NEGATIVE_CTS_OFFSETS) + ffio_wfourcc(pb, "iso6"); // Required when using signed CTS offsets in trun boxes else if (mov->mode == MODE_MP4 && mov->flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF) ffio_wfourcc(pb, "iso5"); // Required when using default-base-is-moof else if (mov->mode == MODE_MP4 && mov->flags & FF_MOV_FLAG_NEGATIVE_CTS_OFFSETS) @@ -4787,31 +4808,68 @@ static int mov_write_ftyp_tag(AVIOContext *pb, AVFormatContext *s) else ffio_wfourcc(pb, "qt "); - avio_wb32(pb, minor); + if (write_minor) + avio_wb32(pb, minor); +} - if (mov->mode == MODE_MOV) - ffio_wfourcc(pb, "qt "); - else if (mov->mode == MODE_ISM) { - ffio_wfourcc(pb, "piff"); - } else if (!(mov->flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF)) { - ffio_wfourcc(pb, "isom"); - ffio_wfourcc(pb, "iso2"); - if (has_h264) - ffio_wfourcc(pb, "avc1"); +static int mov_write_ftyp_tag(AVIOContext *pb, AVFormatContext *s) +{ + MOVMuxContext *mov = s->priv_data; + int64_t pos = avio_tell(pb); + int has_h264 = 0, has_video = 0; + int i; + + for (i = 0; i < s->nb_streams; i++) { + AVStream *st = s->streams[i]; + if (is_cover_image(st)) + continue; + if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) + has_video = 1; + if (st->codecpar->codec_id == AV_CODEC_ID_H264) + has_h264 = 1; } - // We add tfdt atoms when fragmenting, signal this with the iso6 compatible - // brand. This is compatible with users that don't understand tfdt. - if (mov->flags & FF_MOV_FLAG_FRAGMENT && mov->mode != MODE_ISM) - ffio_wfourcc(pb, "iso6"); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "ftyp"); - if (mov->mode == MODE_3GP) - ffio_wfourcc(pb, has_h264 ? "3gp6":"3gp4"); - else if (mov->mode & MODE_3G2) - ffio_wfourcc(pb, has_h264 ? "3g2b":"3g2a"); - else if (mov->mode == MODE_PSP) - ffio_wfourcc(pb, "MSNV"); - else if (mov->mode == MODE_MP4) + // Write major brand + mov_write_ftyp_tag_internal(pb, s, has_h264, has_video, 1); + // Write the major brand as the first compatible brand as well + mov_write_ftyp_tag_internal(pb, s, has_h264, has_video, 0); + + // Write compatible brands, ensuring that we don't write the major brand as a + // compatible brand a second time. + if (mov->mode == MODE_ISM) { + ffio_wfourcc(pb, "piff"); + } else if (mov->mode != MODE_MOV) { + // We add tfdt atoms when fragmenting, signal this with the iso6 compatible + // brand, if not already the major brand. This is compatible with users that + // don't understand tfdt. + if (mov->mode == MODE_MP4) { + if (mov->flags & FF_MOV_FLAG_CMAF) + ffio_wfourcc(pb, "cmfc"); + if (mov->flags & FF_MOV_FLAG_FRAGMENT && !(mov->flags & FF_MOV_FLAG_NEGATIVE_CTS_OFFSETS)) + ffio_wfourcc(pb, "iso6"); + } else { + if (mov->flags & FF_MOV_FLAG_FRAGMENT) + ffio_wfourcc(pb, "iso6"); + if (mov->flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF) + ffio_wfourcc(pb, "iso5"); + else if (mov->flags & FF_MOV_FLAG_NEGATIVE_CTS_OFFSETS) + ffio_wfourcc(pb, "iso4"); + } + // Brands prior to iso5 can't be signaled when using default-base-is-moof + if (!(mov->flags & FF_MOV_FLAG_DEFAULT_BASE_MOOF)) { + // write isom for mp4 only if it it's not the major brand already. + if (mov->mode != MODE_MP4 || mov->flags & FF_MOV_FLAG_NEGATIVE_CTS_OFFSETS) + ffio_wfourcc(pb, "isom"); + ffio_wfourcc(pb, "iso2"); + if (has_h264) + ffio_wfourcc(pb, "avc1"); + } + } + + if (mov->mode == MODE_MP4) ffio_wfourcc(pb, "mp41"); if (mov->flags & FF_MOV_FLAG_DASH && mov->flags & FF_MOV_FLAG_GLOBAL_SIDX) @@ -5129,7 +5187,7 @@ static int mov_flush_fragment(AVFormatContext *s, int force) if (mov->flags & FF_MOV_FLAG_DELAY_MOOV) { if (mov->flags & FF_MOV_FLAG_GLOBAL_SIDX) mov->reserved_header_pos = avio_tell(s->pb); - avio_flush(s->pb); + avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT); mov->moov_written = 1; return 0; } @@ -5153,7 +5211,7 @@ static int mov_flush_fragment(AVFormatContext *s, int force) mov->tracks[i].entry = 0; mov->tracks[i].end_reliable = 0; } - avio_flush(s->pb); + avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT); return 0; } @@ -5218,7 +5276,7 @@ static int mov_flush_fragment(AVFormatContext *s, int force) } if (write_moof) { - avio_flush(s->pb); + avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT); mov_write_moof_tag(s->pb, mov, moof_tracks, mdat_size); mov->fragments++; @@ -5250,7 +5308,7 @@ static int mov_flush_fragment(AVFormatContext *s, int force) mov->mdat_size = 0; - avio_flush(s->pb); + avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT); return 0; } @@ -5312,8 +5370,10 @@ int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt) AVIOContext *pb = s->pb; MOVTrack *trk = &mov->tracks[pkt->stream_index]; AVCodecParameters *par = trk->par; + AVProducerReferenceTime *prft; unsigned int samples_in_chunk = 0; int size = pkt->size, ret = 0; + int prft_size; uint8_t *reformatted_data = NULL; ret = check_pkt(s, pkt); @@ -5585,6 +5645,13 @@ int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt) trk->cluster[trk->entry].flags |= MOV_DISPOSABLE_SAMPLE; trk->has_disposable++; } + + prft = (AVProducerReferenceTime *)av_packet_get_side_data(pkt, AV_PKT_DATA_PRFT, &prft_size); + if (prft && prft_size == sizeof(AVProducerReferenceTime)) + memcpy(&trk->cluster[trk->entry].prft, prft, prft_size); + else + memset(&trk->cluster[trk->entry].prft, 0, sizeof(AVProducerReferenceTime)); + trk->entry++; trk->sample_count += samples_in_chunk; mov->mdat_size += size; @@ -6146,6 +6213,9 @@ static int mov_init(AVFormatContext *s) if (mov->flags & FF_MOV_FLAG_DASH) mov->flags |= FF_MOV_FLAG_FRAGMENT | FF_MOV_FLAG_EMPTY_MOOV | FF_MOV_FLAG_DEFAULT_BASE_MOOF; + if (mov->flags & FF_MOV_FLAG_CMAF) + mov->flags |= FF_MOV_FLAG_FRAGMENT | FF_MOV_FLAG_EMPTY_MOOV | + FF_MOV_FLAG_DEFAULT_BASE_MOOF | FF_MOV_FLAG_NEGATIVE_CTS_OFFSETS; if (mov->flags & FF_MOV_FLAG_EMPTY_MOOV && s->flags & AVFMT_FLAG_AUTO_BSF) { av_log(s, AV_LOG_VERBOSE, "Empty MOOV enabled; disabling automatic bitstream filtering\n"); @@ -6172,12 +6242,21 @@ static int mov_init(AVFormatContext *s) s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) mov->use_editlist = 0; } + if (mov->flags & FF_MOV_FLAG_CMAF) { + // CMAF Track requires negative cts offsets without edit lists + mov->use_editlist = 0; + } } if (mov->flags & FF_MOV_FLAG_EMPTY_MOOV && !(mov->flags & FF_MOV_FLAG_DELAY_MOOV) && mov->use_editlist) av_log(s, AV_LOG_WARNING, "No meaningful edit list will be written when using empty_moov without delay_moov\n"); - if (!mov->use_editlist && s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO) + if (mov->flags & FF_MOV_FLAG_CMAF && mov->use_editlist) { + av_log(s, AV_LOG_WARNING, "Edit list enabled; Assuming writing CMAF Track File\n"); + mov->flags &= ~FF_MOV_FLAG_NEGATIVE_CTS_OFFSETS; + } + if (!mov->use_editlist && s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO && + !(mov->flags & FF_MOV_FLAG_NEGATIVE_CTS_OFFSETS)) s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_ZERO; /* Clear the omit_tfhd_offset flag if default_base_moof is set; @@ -6556,7 +6635,6 @@ static int mov_write_header(AVFormatContext *s) !(mov->flags & FF_MOV_FLAG_DELAY_MOOV)) { if ((ret = mov_write_moov_tag(pb, mov, s)) < 0) return ret; - avio_flush(pb); mov->moov_written = 1; if (mov->flags & FF_MOV_FLAG_GLOBAL_SIDX) mov->reserved_header_pos = avio_tell(pb); @@ -6844,38 +6922,39 @@ static const AVCodecTag codec_3gp_tags[] = { }; const AVCodecTag codec_mp4_tags[] = { - { AV_CODEC_ID_MPEG4 , MKTAG('m', 'p', '4', 'v') }, - { AV_CODEC_ID_H264 , MKTAG('a', 'v', 'c', '1') }, - { AV_CODEC_ID_H264 , MKTAG('a', 'v', 'c', '3') }, - { AV_CODEC_ID_HEVC , MKTAG('h', 'e', 'v', '1') }, - { AV_CODEC_ID_HEVC , MKTAG('h', 'v', 'c', '1') }, - { AV_CODEC_ID_MPEG2VIDEO , MKTAG('m', 'p', '4', 'v') }, - { AV_CODEC_ID_MPEG1VIDEO , MKTAG('m', 'p', '4', 'v') }, - { AV_CODEC_ID_MJPEG , MKTAG('m', 'p', '4', 'v') }, - { AV_CODEC_ID_PNG , MKTAG('m', 'p', '4', 'v') }, - { AV_CODEC_ID_JPEG2000 , MKTAG('m', 'p', '4', 'v') }, - { AV_CODEC_ID_VC1 , MKTAG('v', 'c', '-', '1') }, - { AV_CODEC_ID_DIRAC , MKTAG('d', 'r', 'a', 'c') }, - { AV_CODEC_ID_TSCC2 , MKTAG('m', 'p', '4', 'v') }, - { AV_CODEC_ID_VP9 , MKTAG('v', 'p', '0', '9') }, - { AV_CODEC_ID_AV1 , MKTAG('a', 'v', '0', '1') }, - { AV_CODEC_ID_AAC , MKTAG('m', 'p', '4', 'a') }, - { AV_CODEC_ID_MP4ALS , MKTAG('m', 'p', '4', 'a') }, - { AV_CODEC_ID_MP3 , MKTAG('m', 'p', '4', 'a') }, - { AV_CODEC_ID_MP2 , MKTAG('m', 'p', '4', 'a') }, - { AV_CODEC_ID_AC3 , MKTAG('a', 'c', '-', '3') }, - { AV_CODEC_ID_EAC3 , MKTAG('e', 'c', '-', '3') }, - { AV_CODEC_ID_DTS , MKTAG('m', 'p', '4', 'a') }, - { AV_CODEC_ID_TRUEHD , MKTAG('m', 'l', 'p', 'a') }, - { AV_CODEC_ID_FLAC , MKTAG('f', 'L', 'a', 'C') }, - { AV_CODEC_ID_OPUS , MKTAG('O', 'p', 'u', 's') }, - { AV_CODEC_ID_VORBIS , MKTAG('m', 'p', '4', 'a') }, - { AV_CODEC_ID_QCELP , MKTAG('m', 'p', '4', 'a') }, - { AV_CODEC_ID_EVRC , MKTAG('m', 'p', '4', 'a') }, - { AV_CODEC_ID_DVD_SUBTITLE, MKTAG('m', 'p', '4', 's') }, - { AV_CODEC_ID_MOV_TEXT , MKTAG('t', 'x', '3', 'g') }, - { AV_CODEC_ID_BIN_DATA , MKTAG('g', 'p', 'm', 'd') }, - { AV_CODEC_ID_NONE , 0 }, + { AV_CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') }, + { AV_CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') }, + { AV_CODEC_ID_H264, MKTAG('a', 'v', 'c', '3') }, + { AV_CODEC_ID_HEVC, MKTAG('h', 'e', 'v', '1') }, + { AV_CODEC_ID_HEVC, MKTAG('h', 'v', 'c', '1') }, + { AV_CODEC_ID_MPEG2VIDEO, MKTAG('m', 'p', '4', 'v') }, + { AV_CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', '4', 'v') }, + { AV_CODEC_ID_MJPEG, MKTAG('m', 'p', '4', 'v') }, + { AV_CODEC_ID_PNG, MKTAG('m', 'p', '4', 'v') }, + { AV_CODEC_ID_JPEG2000, MKTAG('m', 'p', '4', 'v') }, + { AV_CODEC_ID_VC1, MKTAG('v', 'c', '-', '1') }, + { AV_CODEC_ID_DIRAC, MKTAG('d', 'r', 'a', 'c') }, + { AV_CODEC_ID_TSCC2, MKTAG('m', 'p', '4', 'v') }, + { AV_CODEC_ID_VP9, MKTAG('v', 'p', '0', '9') }, + { AV_CODEC_ID_AV1, MKTAG('a', 'v', '0', '1') }, + { AV_CODEC_ID_AAC, MKTAG('m', 'p', '4', 'a') }, + { AV_CODEC_ID_MP4ALS, MKTAG('m', 'p', '4', 'a') }, + { AV_CODEC_ID_MP3, MKTAG('m', 'p', '4', 'a') }, + { AV_CODEC_ID_MP2, MKTAG('m', 'p', '4', 'a') }, + { AV_CODEC_ID_AC3, MKTAG('a', 'c', '-', '3') }, + { AV_CODEC_ID_EAC3, MKTAG('e', 'c', '-', '3') }, + { AV_CODEC_ID_DTS, MKTAG('m', 'p', '4', 'a') }, + { AV_CODEC_ID_TRUEHD, MKTAG('m', 'l', 'p', 'a') }, + { AV_CODEC_ID_FLAC, MKTAG('f', 'L', 'a', 'C') }, + { AV_CODEC_ID_OPUS, MKTAG('O', 'p', 'u', 's') }, + { AV_CODEC_ID_VORBIS, MKTAG('m', 'p', '4', 'a') }, + { AV_CODEC_ID_QCELP, MKTAG('m', 'p', '4', 'a') }, + { AV_CODEC_ID_EVRC, MKTAG('m', 'p', '4', 'a') }, + { AV_CODEC_ID_DVD_SUBTITLE, MKTAG('m', 'p', '4', 's') }, + { AV_CODEC_ID_MOV_TEXT, MKTAG('t', 'x', '3', 'g') }, + { AV_CODEC_ID_BIN_DATA, MKTAG('g', 'p', 'm', 'd') }, + { AV_CODEC_ID_MPEGH_3D_AUDIO, MKTAG('m', 'h', 'm', '1') }, + { AV_CODEC_ID_NONE, 0 }, }; const AVCodecTag codec_ism_tags[] = { diff --git a/libavformat/movenc.h b/libavformat/movenc.h index 68d6f23a5a..6ac106c653 100644 --- a/libavformat/movenc.h +++ b/libavformat/movenc.h @@ -56,6 +56,7 @@ typedef struct MOVIentry { #define MOV_PARTIAL_SYNC_SAMPLE 0x0002 #define MOV_DISPOSABLE_SAMPLE 0x0004 uint32_t flags; + AVProducerReferenceTime prft; } MOVIentry; typedef struct HintSample { @@ -258,6 +259,7 @@ typedef struct MOVMuxContext { #define FF_MOV_FLAG_NEGATIVE_CTS_OFFSETS (1 << 19) #define FF_MOV_FLAG_FRAG_EVERY_FRAME (1 << 20) #define FF_MOV_FLAG_SKIP_SIDX (1 << 21) +#define FF_MOV_FLAG_CMAF (1 << 22) int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt); diff --git a/libavformat/mpc8.c b/libavformat/mpc8.c index e452cd6878..dd13bbd0a4 100644 --- a/libavformat/mpc8.c +++ b/libavformat/mpc8.c @@ -212,7 +212,7 @@ static int mpc8_read_header(AVFormatContext *s) MPCContext *c = s->priv_data; AVIOContext *pb = s->pb; AVStream *st; - int tag = 0; + int tag = 0, ret; int64_t size, pos; c->header_pos = avio_tell(pb); @@ -253,8 +253,8 @@ static int mpc8_read_header(AVFormatContext *s) st->codecpar->codec_id = AV_CODEC_ID_MUSEPACK8; st->codecpar->bits_per_coded_sample = 16; - if (ff_get_extradata(s, st->codecpar, pb, 2) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_get_extradata(s, st->codecpar, pb, 2)) < 0) + return ret; st->codecpar->channels = (st->codecpar->extradata[1] >> 4) + 1; st->codecpar->sample_rate = mpc8_rate[st->codecpar->extradata[0] >> 5]; @@ -277,7 +277,7 @@ static int mpc8_read_header(AVFormatContext *s) static int mpc8_read_packet(AVFormatContext *s, AVPacket *pkt) { MPCContext *c = s->priv_data; - int tag; + int tag, ret; int64_t pos, size; while(!avio_feof(s->pb)){ @@ -291,8 +291,8 @@ static int mpc8_read_packet(AVFormatContext *s, AVPacket *pkt) if (size < 0) return -1; if(tag == TAG_AUDIOPACKET){ - if(av_get_packet(s->pb, pkt, size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_get_packet(s->pb, pkt, size)) < 0) + return ret; pkt->stream_index = 0; pkt->duration = 1; return 0; diff --git a/libavformat/mpeg.c b/libavformat/mpeg.c index e61851bba5..fad7c7fd55 100644 --- a/libavformat/mpeg.c +++ b/libavformat/mpeg.c @@ -920,7 +920,6 @@ static int vobsub_read_packet(AVFormatContext *s, AVPacket *pkt) FFDemuxSubtitlesQueue *q; AVIOContext *pb = vobsub->sub_ctx->pb; int ret, psize, total_read = 0, i; - AVPacket idx_pkt = { 0 }; int64_t min_ts = INT64_MAX; int sid = 0; @@ -935,24 +934,22 @@ static int vobsub_read_packet(AVFormatContext *s, AVPacket *pkt) } } q = &vobsub->q[sid]; - ret = ff_subtitles_queue_read_packet(q, &idx_pkt); + /* The returned packet will have size zero, + * so that it can be directly used with av_grow_packet. */ + ret = ff_subtitles_queue_read_packet(q, pkt); if (ret < 0) return ret; /* compute maximum packet size using the next packet position. This is * useful when the len in the header is non-sense */ if (q->current_sub_idx < q->nb_subs) { - psize = q->subs[q->current_sub_idx].pos - idx_pkt.pos; + psize = q->subs[q->current_sub_idx].pos - pkt->pos; } else { int64_t fsize = avio_size(pb); - psize = fsize < 0 ? 0xffff : fsize - idx_pkt.pos; + psize = fsize < 0 ? 0xffff : fsize - pkt->pos; } - avio_seek(pb, idx_pkt.pos, SEEK_SET); - - av_init_packet(pkt); - pkt->size = 0; - pkt->data = NULL; + avio_seek(pb, pkt->pos, SEEK_SET); do { int n, to_read, startcode; @@ -976,7 +973,7 @@ static int vobsub_read_packet(AVFormatContext *s, AVPacket *pkt) total_read += pkt_size; /* the current chunk doesn't match the stream index (unlikely) */ - if ((startcode & 0x1f) != s->streams[idx_pkt.stream_index]->id) + if ((startcode & 0x1f) != s->streams[pkt->stream_index]->id) break; ret = av_grow_packet(pkt, to_read); @@ -988,16 +985,10 @@ static int vobsub_read_packet(AVFormatContext *s, AVPacket *pkt) pkt->size -= to_read - n; } while (total_read < psize); - pkt->pts = pkt->dts = idx_pkt.pts; - pkt->pos = idx_pkt.pos; - pkt->stream_index = idx_pkt.stream_index; - - av_packet_unref(&idx_pkt); return 0; fail: av_packet_unref(pkt); - av_packet_unref(&idx_pkt); return ret; } diff --git a/libavformat/mpegenc.c b/libavformat/mpegenc.c index f6980231a2..669ff9d152 100644 --- a/libavformat/mpegenc.c +++ b/libavformat/mpegenc.c @@ -928,7 +928,7 @@ static int flush_packet(AVFormatContext *ctx, int stream_index, for (i = 0; i < zero_trail_bytes; i++) avio_w8(ctx->pb, 0x00); - avio_flush(ctx->pb); + avio_write_marker(ctx->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT); s->packet_number++; @@ -957,7 +957,7 @@ static void put_vcd_padding_sector(AVFormatContext *ctx) s->vcd_padding_bytes_written += s->packet_size; - avio_flush(ctx->pb); + avio_write_marker(ctx->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT); /* increasing the packet number is correct. The SCR of the following packs * is calculated from the packet_number and it has to include the padding @@ -1244,7 +1244,6 @@ static int mpeg_mux_end(AVFormatContext *ctx) * it as it is usually not needed by decoders and because it * complicates MPEG stream concatenation. */ // avio_wb32(ctx->pb, ISO_11172_END_CODE); - // avio_flush(ctx->pb); for (i = 0; i < ctx->nb_streams; i++) { stream = ctx->streams[i]->priv_data; diff --git a/libavformat/mpegts.c b/libavformat/mpegts.c index 587ed33327..5c850bc1e5 100644 --- a/libavformat/mpegts.c +++ b/libavformat/mpegts.c @@ -1845,7 +1845,7 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type case 0x56: /* DVB teletext descriptor */ { uint8_t *extradata = NULL; - int language_count = desc_len / 5; + int language_count = desc_len / 5, ret; if (desc_len > 0 && desc_len % 5 != 0) return AVERROR_INVALIDDATA; @@ -1855,9 +1855,9 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type av_assert0(language_count <= sizeof(language) / 4); if (st->codecpar->extradata == NULL) { - if (ff_alloc_extradata(st->codecpar, language_count * 2)) { - return AVERROR(ENOMEM); - } + ret = ff_alloc_extradata(st->codecpar, language_count * 2); + if (ret < 0) + return ret; } if (st->codecpar->extradata_size < language_count * 2) @@ -1890,7 +1890,7 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type * subtitling_type (1 byte), * composition_page_id (2 bytes), * ancillary_page_id (2 bytes) */ - int language_count = desc_len / 8; + int language_count = desc_len / 8, ret; if (desc_len > 0 && desc_len % 8 != 0) return AVERROR_INVALIDDATA; @@ -1906,9 +1906,9 @@ int ff_parse_mpeg2_descriptor(AVFormatContext *fc, AVStream *st, int stream_type av_assert0(language_count <= sizeof(language) / 4); if (st->codecpar->extradata == NULL) { - if (ff_alloc_extradata(st->codecpar, language_count * 5)) { - return AVERROR(ENOMEM); - } + ret = ff_alloc_extradata(st->codecpar, language_count * 5); + if (ret < 0) + return ret; } if (st->codecpar->extradata_size < language_count * 5) @@ -3128,8 +3128,8 @@ static int mpegts_raw_read_packet(AVFormatContext *s, AVPacket *pkt) uint8_t pcr_buf[12]; const uint8_t *data; - if (av_new_packet(pkt, TS_PACKET_SIZE) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, TS_PACKET_SIZE)) < 0) + return ret; ret = read_packet(s, pkt->data, ts->raw_packet_size, &data); pkt->pos = avio_tell(s->pb); if (ret < 0) { diff --git a/libavformat/mpegtsenc.c b/libavformat/mpegtsenc.c index e8dd8b7d56..ccb631d746 100644 --- a/libavformat/mpegtsenc.c +++ b/libavformat/mpegtsenc.c @@ -382,6 +382,8 @@ static int mpegts_write_pmt(AVFormatContext *s, MpegTSService *service) stream_type = STREAM_TYPE_METADATA; break; default: + av_log(s, AV_LOG_WARNING, "Stream %d, codec %s, is muxed as a private data stream " + "and may not be recognized upon reading.\n", i, avcodec_get_name(st->codecpar->codec_id)); stream_type = STREAM_TYPE_PRIVATE_DATA; break; } @@ -1617,7 +1619,7 @@ static int mpegts_write_packet_internal(AVFormatContext *s, AVPacket *pkt) ret = avio_open_dyn_buf(&ts_st->amux->pb); if (ret < 0) - return AVERROR(ENOMEM); + return ret; ret = av_write_frame(ts_st->amux, &pkt2); if (ret < 0) { diff --git a/libavformat/mpjpeg.c b/libavformat/mpjpeg.c index 80f83c5871..0404e86d7f 100644 --- a/libavformat/mpjpeg.c +++ b/libavformat/mpjpeg.c @@ -34,7 +34,6 @@ static int mpjpeg_write_header(AVFormatContext *s) { MPJPEGContext *mpj = s->priv_data; avio_printf(s->pb, "--%s\r\n", mpj->boundary_tag); - avio_flush(s->pb); return 0; } @@ -50,11 +49,6 @@ static int mpjpeg_write_packet(AVFormatContext *s, AVPacket *pkt) return 0; } -static int mpjpeg_write_trailer(AVFormatContext *s) -{ - return 0; -} - static const AVOption options[] = { { "boundary_tag", "Boundary tag", offsetof(MPJPEGContext, boundary_tag), AV_OPT_TYPE_STRING, {.str = BOUNDARY_TAG}, .flags = AV_OPT_FLAG_ENCODING_PARAM }, { NULL }, @@ -77,7 +71,6 @@ AVOutputFormat ff_mpjpeg_muxer = { .video_codec = AV_CODEC_ID_MJPEG, .write_header = mpjpeg_write_header, .write_packet = mpjpeg_write_packet, - .write_trailer = mpjpeg_write_trailer, .flags = AVFMT_NOTIMESTAMPS, .priv_class = &mpjpeg_muxer_class, }; diff --git a/libavformat/mvi.c b/libavformat/mvi.c index 9f90faf56b..ff5c08bf51 100644 --- a/libavformat/mvi.c +++ b/libavformat/mvi.c @@ -45,6 +45,7 @@ static int read_header(AVFormatContext *s) AVIOContext *pb = s->pb; AVStream *ast, *vst; unsigned int version, frames_count, msecs_per_frame, player_version; + int ret; ast = avformat_new_stream(s, NULL); if (!ast) @@ -54,8 +55,8 @@ static int read_header(AVFormatContext *s) if (!vst) return AVERROR(ENOMEM); - if (ff_alloc_extradata(vst->codecpar, 2)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(vst->codecpar, 2)) < 0) + return ret; version = avio_r8(pb); vst->codecpar->extradata[0] = avio_r8(pb); diff --git a/libavformat/mxfenc.c b/libavformat/mxfenc.c index e187b3845d..374a83d069 100644 --- a/libavformat/mxfenc.c +++ b/libavformat/mxfenc.c @@ -1936,7 +1936,7 @@ static int mxf_write_partition(AVFormatContext *s, int bodysid, } if(key) - avio_flush(pb); + avio_write_marker(pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT); return 0; } @@ -2799,7 +2799,6 @@ static int mxf_write_opatom_packet(AVFormatContext *s, AVPacket *pkt, MXFIndexEn mxf->edit_units_count++; avio_write(pb, pkt->data, pkt->size); mxf->body_offset += pkt->size; - avio_flush(pb); return 0; } @@ -2937,8 +2936,6 @@ static int mxf_write_packet(AVFormatContext *s, AVPacket *pkt) mxf->body_offset += 16+4+pkt->size + klv_fill_size(16+4+pkt->size); } - avio_flush(pb); - return 0; } diff --git a/libavformat/network.h b/libavformat/network.h index 7f467304a8..71347e815b 100644 --- a/libavformat/network.h +++ b/libavformat/network.h @@ -50,6 +50,9 @@ #ifndef EINPROGRESS #define EINPROGRESS WSAEINPROGRESS #endif +#ifndef ENOTCONN +#define ENOTCONN WSAENOTCONN +#endif #define getsockopt(a, b, c, d, e) getsockopt(a, b, c, (char*) d, e) #define setsockopt(a, b, c, d, e) setsockopt(a, b, c, (const char*) d, e) diff --git a/libavformat/nutdec.c b/libavformat/nutdec.c index 2b5adcb111..3779dce2a8 100644 --- a/libavformat/nutdec.c +++ b/libavformat/nutdec.c @@ -427,8 +427,10 @@ static int decode_stream_header(NUTContext *nut) GET_V(st->codecpar->extradata_size, tmp < (1 << 30)); if (st->codecpar->extradata_size) { - if (ff_get_extradata(s, st->codecpar, bc, st->codecpar->extradata_size) < 0) - return AVERROR(ENOMEM); + ret = ff_get_extradata(s, st->codecpar, bc, + st->codecpar->extradata_size); + if (ret < 0) + return ret; } if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { diff --git a/libavformat/nutenc.c b/libavformat/nutenc.c index 46dce7722d..44773924dd 100644 --- a/libavformat/nutenc.c +++ b/libavformat/nutenc.c @@ -768,8 +768,6 @@ static int nut_write_header(AVFormatContext *s) if (s->avoid_negative_ts < 0) s->avoid_negative_ts = 1; - avio_flush(bc); - return 0; } diff --git a/libavformat/nuv.c b/libavformat/nuv.c index a1edbf88df..bef0ae4860 100644 --- a/libavformat/nuv.c +++ b/libavformat/nuv.c @@ -74,7 +74,7 @@ static int get_codec_data(AVFormatContext *s, AVIOContext *pb, AVStream *vst, if (!vst && !myth) return 1; // no codec data needed while (!avio_feof(pb)) { - int size, subtype; + int size, subtype, ret; frametype = avio_r8(pb); switch (frametype) { @@ -83,12 +83,8 @@ static int get_codec_data(AVFormatContext *s, AVIOContext *pb, AVStream *vst, avio_skip(pb, 6); size = PKTSIZE(avio_rl32(pb)); if (vst && subtype == 'R') { - if (vst->codecpar->extradata) { - av_freep(&vst->codecpar->extradata); - vst->codecpar->extradata_size = 0; - } - if (ff_get_extradata(NULL, vst->codecpar, pb, size) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_get_extradata(NULL, vst->codecpar, pb, size)) < 0) + return ret; size = 0; if (!myth) return 0; diff --git a/libavformat/oggenc.c b/libavformat/oggenc.c index d3ae07351d..fe89f23e36 100644 --- a/libavformat/oggenc.c +++ b/libavformat/oggenc.c @@ -133,14 +133,13 @@ static int ogg_write_page(AVFormatContext *s, OGGPage *page, int extra_flags) avio_write(pb, page->data, page->size); ogg_update_checksum(s, pb, crc_offset); - avio_flush(pb); size = avio_close_dyn_buf(pb, &buf); if (size < 0) return size; avio_write(s->pb, buf, size); - avio_flush(s->pb); + avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT); av_free(buf); oggstream->page_count--; return 0; diff --git a/libavformat/oggparsecelt.c b/libavformat/oggparsecelt.c index 9c438a096a..f7a88af616 100644 --- a/libavformat/oggparsecelt.c +++ b/libavformat/oggparsecelt.c @@ -37,6 +37,7 @@ static int celt_header(AVFormatContext *s, int idx) AVStream *st = s->streams[idx]; struct oggcelt_private *priv = os->private; uint8_t *p = os->buf + os->pstart; + int ret; if (os->psize == 60 && !memcmp(p, ff_celt_codec.magic, ff_celt_codec.magicsize)) { @@ -48,9 +49,10 @@ static int celt_header(AVFormatContext *s, int idx) priv = av_malloc(sizeof(struct oggcelt_private)); if (!priv) return AVERROR(ENOMEM); - if (ff_alloc_extradata(st->codecpar, 2 * sizeof(uint32_t)) < 0) { + ret = ff_alloc_extradata(st->codecpar, 2 * sizeof(uint32_t)); + if (ret < 0) { av_free(priv); - return AVERROR(ENOMEM); + return ret; } version = AV_RL32(p + 28); /* unused header size field skipped */ diff --git a/libavformat/oggparseflac.c b/libavformat/oggparseflac.c index b5f1416a3c..4e85b05c67 100644 --- a/libavformat/oggparseflac.c +++ b/libavformat/oggparseflac.c @@ -34,7 +34,7 @@ flac_header (AVFormatContext * s, int idx) struct ogg_stream *os = ogg->streams + idx; AVStream *st = s->streams[idx]; GetBitContext gb; - int mdt; + int mdt, ret; if (os->buf[os->pstart] == 0xff) return 0; @@ -50,7 +50,7 @@ flac_header (AVFormatContext * s, int idx) skip_bits_long(&gb, 4*8); /* "FLAC" */ if(get_bits(&gb, 8) != 1) /* unsupported major version */ return -1; - skip_bits_long(&gb, 8 + 16); /* minor version + header count */ + skip_bits(&gb, 8 + 16); /* minor version + header count */ skip_bits_long(&gb, 4*8); /* "fLaC" */ /* METADATA_BLOCK_HEADER */ @@ -61,8 +61,8 @@ flac_header (AVFormatContext * s, int idx) st->codecpar->codec_id = AV_CODEC_ID_FLAC; st->need_parsing = AVSTREAM_PARSE_HEADERS; - if (ff_alloc_extradata(st->codecpar, FLAC_STREAMINFO_SIZE) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, FLAC_STREAMINFO_SIZE)) < 0) + return ret; memcpy(st->codecpar->extradata, streaminfo_start, st->codecpar->extradata_size); samplerate = AV_RB24(st->codecpar->extradata + 10) >> 4; diff --git a/libavformat/oggparseogm.c b/libavformat/oggparseogm.c index b07a5d55ba..469b229995 100644 --- a/libavformat/oggparseogm.c +++ b/libavformat/oggparseogm.c @@ -43,6 +43,7 @@ ogm_header(AVFormatContext *s, int idx) uint64_t time_unit; uint64_t spu; uint32_t size; + int ret; bytestream2_init(&p, os->buf + os->pstart, os->psize); if (!(bytestream2_peek_byte(&p) & 1)) @@ -108,9 +109,8 @@ ogm_header(AVFormatContext *s, int idx) size -= 52; if (bytestream2_get_bytes_left(&p) < size) return AVERROR_INVALIDDATA; - av_freep(&st->codecpar->extradata); - if (ff_alloc_extradata(st->codecpar, size) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, size)) < 0) + return ret; bytestream2_get_buffer(&p, st->codecpar->extradata, st->codecpar->extradata_size); } } diff --git a/libavformat/oggparseopus.c b/libavformat/oggparseopus.c index cd34cf23ba..56b53e74e8 100644 --- a/libavformat/oggparseopus.c +++ b/libavformat/oggparseopus.c @@ -42,6 +42,7 @@ static int opus_header(AVFormatContext *avf, int idx) AVStream *st = avf->streams[idx]; struct oggopus_private *priv = os->private; uint8_t *packet = os->buf + os->pstart; + int ret; if (!priv) { priv = os->private = av_mallocz(sizeof(*priv)); @@ -62,9 +63,8 @@ static int opus_header(AVFormatContext *avf, int idx) /*gain = AV_RL16(packet + 16);*/ /*channel_map = AV_RL8 (packet + 18);*/ - av_freep(&st->codecpar->extradata); - if (ff_alloc_extradata(st->codecpar, os->psize)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, os->psize)) < 0) + return ret; memcpy(st->codecpar->extradata, packet, os->psize); diff --git a/libavformat/oggparsespeex.c b/libavformat/oggparsespeex.c index 27fc99247d..c4fee7e076 100644 --- a/libavformat/oggparsespeex.c +++ b/libavformat/oggparsespeex.c @@ -46,6 +46,7 @@ static int speex_header(AVFormatContext *s, int idx) { struct speex_params *spxp = os->private; AVStream *st = s->streams[idx]; uint8_t *p = os->buf + os->pstart; + int ret; if (!spxp) { spxp = av_mallocz(sizeof(*spxp)); @@ -92,8 +93,8 @@ static int speex_header(AVFormatContext *s, int idx) { if (frames_per_packet) spxp->packet_size *= frames_per_packet; - if (ff_alloc_extradata(st->codecpar, os->psize) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, os->psize)) < 0) + return ret; memcpy(st->codecpar->extradata, p, st->codecpar->extradata_size); avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate); diff --git a/libavformat/oggparsetheora.c b/libavformat/oggparsetheora.c index b0c0edc7a5..87a676fe48 100644 --- a/libavformat/oggparsetheora.c +++ b/libavformat/oggparsetheora.c @@ -65,7 +65,7 @@ static int theora_header(AVFormatContext *s, int idx) /* 0x80"theora" */ skip_bits_long(&gb, 7 * 8); - thp->version = get_bits_long(&gb, 24); + thp->version = get_bits(&gb, 24); if (thp->version < 0x030100) { av_log(s, AV_LOG_ERROR, "Too old or unsupported Theora (%x)\n", thp->version); @@ -79,8 +79,8 @@ static int theora_header(AVFormatContext *s, int idx) skip_bits(&gb, 100); if (thp->version >= 0x030200) { - int width = get_bits_long(&gb, 24); - int height = get_bits_long(&gb, 24); + int width = get_bits(&gb, 24); + int height = get_bits(&gb, 24); if (width <= st->codecpar->width && width > st->codecpar->width - 16 && height <= st->codecpar->height && height > st->codecpar->height - 16) { st->codecpar->width = width; @@ -99,8 +99,8 @@ static int theora_header(AVFormatContext *s, int idx) } avpriv_set_pts_info(st, 64, timebase.num, timebase.den); - st->sample_aspect_ratio.num = get_bits_long(&gb, 24); - st->sample_aspect_ratio.den = get_bits_long(&gb, 24); + st->sample_aspect_ratio.num = get_bits(&gb, 24); + st->sample_aspect_ratio.den = get_bits(&gb, 24); if (thp->version >= 0x030200) skip_bits_long(&gb, 38); diff --git a/libavformat/omadec.c b/libavformat/omadec.c index 60cbf3a87f..9521b6d59e 100644 --- a/libavformat/omadec.c +++ b/libavformat/omadec.c @@ -459,8 +459,8 @@ static int oma_read_header(AVFormatContext *s) /* fake the ATRAC3 extradata * (wav format, makes stream copy to wav work) */ - if (ff_alloc_extradata(st->codecpar, 14)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, 14)) < 0) + return ret; edata = st->codecpar->extradata; AV_WL16(&edata[0], 1); // always 1 diff --git a/libavformat/options_table.h b/libavformat/options_table.h index 432818f80d..e26b512440 100644 --- a/libavformat/options_table.h +++ b/libavformat/options_table.h @@ -82,8 +82,8 @@ static const AVOption avformat_options[] = { {"explode", "abort decoding on minor error detection", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_EXPLODE }, INT_MIN, INT_MAX, D, "err_detect"}, {"ignore_err", "ignore errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_IGNORE_ERR }, INT_MIN, INT_MAX, D, "err_detect"}, {"careful", "consider things that violate the spec, are fast to check and have not been seen in the wild as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CAREFUL }, INT_MIN, INT_MAX, D, "err_detect"}, -{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT }, INT_MIN, INT_MAX, D, "err_detect"}, -{"aggressive", "consider things that a sane encoder shouldn't do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE }, INT_MIN, INT_MAX, D, "err_detect"}, +{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT | AV_EF_CAREFUL }, INT_MIN, INT_MAX, D, "err_detect"}, +{"aggressive", "consider things that a sane encoder shouldn't do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE | AV_EF_COMPLIANT | AV_EF_CAREFUL}, INT_MIN, INT_MAX, D, "err_detect"}, {"use_wallclock_as_timestamps", "use wallclock as timestamps", OFFSET(use_wallclock_as_timestamps), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, D}, {"skip_initial_bytes", "set number of bytes to skip before reading header and frames", OFFSET(skip_initial_bytes), AV_OPT_TYPE_INT64, {.i64 = 0}, 0, INT64_MAX-1, D}, {"correct_ts_overflow", "correct single timestamp overflows", OFFSET(correct_ts_overflow), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, D}, diff --git a/libavformat/paf.c b/libavformat/paf.c index b3c8e786bc..a31d01502b 100644 --- a/libavformat/paf.c +++ b/libavformat/paf.c @@ -194,7 +194,7 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) PAFDemuxContext *p = s->priv_data; AVIOContext *pb = s->pb; uint32_t count, offset; - int size, i; + int size, i, ret; if (p->current_frame >= p->nb_frames) return AVERROR_EOF; @@ -203,8 +203,8 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) return AVERROR_EOF; if (p->got_audio) { - if (av_new_packet(pkt, p->audio_size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, p->audio_size)) < 0) + return ret; memcpy(pkt->data, p->temp_audio_frame, p->audio_size); pkt->duration = PAF_SOUND_SAMPLES * (p->audio_size / PAF_SOUND_FRAME_SIZE); @@ -244,8 +244,8 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) size = p->video_size - p->frames_offset_table[p->current_frame]; - if (av_new_packet(pkt, size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, size)) < 0) + return ret; pkt->stream_index = 0; pkt->duration = 1; diff --git a/libavformat/psxstr.c b/libavformat/psxstr.c index ddc17e35d2..678b9f90ac 100644 --- a/libavformat/psxstr.c +++ b/libavformat/psxstr.c @@ -160,7 +160,7 @@ static int str_read_packet(AVFormatContext *s, AVIOContext *pb = s->pb; StrDemuxContext *str = s->priv_data; unsigned char sector[RAW_CD_SECTOR_SIZE]; - int channel; + int channel, ret; AVPacket *pkt; AVStream *st; @@ -213,8 +213,9 @@ static int str_read_packet(AVFormatContext *s, if(pkt->data) av_log(s, AV_LOG_ERROR, "mismatching sector_count\n"); av_packet_unref(pkt); - if (av_new_packet(pkt, sector_count*VIDEO_DATA_CHUNK_SIZE)) - return AVERROR(EIO); + ret = av_new_packet(pkt, sector_count * VIDEO_DATA_CHUNK_SIZE); + if (ret < 0) + return ret; memset(pkt->data, 0, sector_count*VIDEO_DATA_CHUNK_SIZE); pkt->pos= avio_tell(pb) - RAW_CD_SECTOR_SIZE; @@ -267,8 +268,8 @@ static int str_read_packet(AVFormatContext *s, st->start_time = 0; } pkt = ret_pkt; - if (av_new_packet(pkt, 2304)) - return AVERROR(EIO); + if ((ret = av_new_packet(pkt, 2304)) < 0) + return ret; memcpy(pkt->data,sector+24,2304); pkt->stream_index = diff --git a/libavformat/rawdec.c b/libavformat/rawdec.c index 59b49e3f77..fee016cc7f 100644 --- a/libavformat/rawdec.c +++ b/libavformat/rawdec.c @@ -39,8 +39,8 @@ int ff_raw_read_partial_packet(AVFormatContext *s, AVPacket *pkt) size = raw->raw_packet_size; - if (av_new_packet(pkt, size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, size)) < 0) + return ret; pkt->pos= avio_tell(s->pb); pkt->stream_index = 0; diff --git a/libavformat/riffdec.c b/libavformat/riffdec.c index 5523b31adc..070c42eee3 100644 --- a/libavformat/riffdec.c +++ b/libavformat/riffdec.c @@ -145,7 +145,6 @@ int ff_get_wav_header(AVFormatContext *s, AVIOContext *pb, size -= 22; } if (cbSize > 0) { - av_freep(&par->extradata); if (ff_get_extradata(s, par, pb, cbSize) < 0) return AVERROR(ENOMEM); size -= cbSize; @@ -158,7 +157,6 @@ int ff_get_wav_header(AVFormatContext *s, AVIOContext *pb, int nb_streams, i; size -= 4; - av_freep(&par->extradata); if (ff_get_extradata(s, par, pb, size) < 0) return AVERROR(ENOMEM); nb_streams = AV_RL16(par->extradata + 4); diff --git a/libavformat/rl2.c b/libavformat/rl2.c index 07696965c7..9e10155838 100644 --- a/libavformat/rl2.c +++ b/libavformat/rl2.c @@ -127,8 +127,9 @@ static av_cold int rl2_read_header(AVFormatContext *s) if(signature == RLV3_TAG && back_size > 0) st->codecpar->extradata_size += back_size; - if(ff_get_extradata(s, st->codecpar, pb, st->codecpar->extradata_size) < 0) - return AVERROR(ENOMEM); + ret = ff_get_extradata(s, st->codecpar, pb, st->codecpar->extradata_size); + if (ret < 0) + return ret; /** setup audio stream if present */ if(sound_rate){ diff --git a/libavformat/rmdec.c b/libavformat/rmdec.c index 088bd75b69..a36e693ab2 100644 --- a/libavformat/rmdec.c +++ b/libavformat/rmdec.c @@ -87,9 +87,7 @@ static int rm_read_extradata(AVFormatContext *s, AVIOContext *pb, AVCodecParamet av_log(s, AV_LOG_ERROR, "extradata size %u too large\n", size); return -1; } - if (ff_get_extradata(s, par, pb, size) < 0) - return AVERROR(ENOMEM); - return 0; + return ff_get_extradata(s, par, pb, size); } static void rm_read_metadata(AVFormatContext *s, AVIOContext *pb, int wide) @@ -783,8 +781,8 @@ static int rm_assemble_video_frame(AVFormatContext *s, AVIOContext *pb, return -1; } rm->remaining_len -= len; - if(av_new_packet(pkt, len + 9) < 0) - return AVERROR(EIO); + if ((ret = av_new_packet(pkt, len + 9)) < 0) + return ret; pkt->data[0] = 0; AV_WL32(pkt->data + 1, 1); AV_WL32(pkt->data + 5, 0); @@ -806,8 +804,8 @@ static int rm_assemble_video_frame(AVFormatContext *s, AVIOContext *pb, vst->slices = ((hdr & 0x3F) << 1) + 1; vst->videobufsize = len2 + 8*vst->slices + 1; av_packet_unref(&vst->pkt); //FIXME this should be output. - if(av_new_packet(&vst->pkt, vst->videobufsize) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(&vst->pkt, vst->videobufsize)) < 0) + return ret; memset(vst->pkt.data, 0, vst->pkt.size); vst->videobufpos = 8*vst->slices + 1; vst->cur_slice = 0; diff --git a/libavformat/rmenc.c b/libavformat/rmenc.c index 3bff4daf0a..e137dbc44f 100644 --- a/libavformat/rmenc.c +++ b/libavformat/rmenc.c @@ -360,7 +360,6 @@ static int rm_write_header(AVFormatContext *s) if (rv10_write_header(s, 0, 0)) return AVERROR_INVALIDDATA; - avio_flush(s->pb); return 0; } diff --git a/libavformat/rsd.c b/libavformat/rsd.c index 396a431f34..e23c8abae5 100644 --- a/libavformat/rsd.c +++ b/libavformat/rsd.c @@ -97,9 +97,8 @@ static int rsd_read_header(AVFormatContext *s) switch (par->codec_id) { case AV_CODEC_ID_XMA2: par->block_align = 2048; - ff_alloc_extradata(par, 34); - if (!par->extradata) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(par, 34)) < 0) + return ret; memset(par->extradata, 0, 34); break; case AV_CODEC_ID_ADPCM_PSX: diff --git a/libavformat/rsoenc.c b/libavformat/rsoenc.c index e34e2c64e2..3159f0c427 100644 --- a/libavformat/rsoenc.c +++ b/libavformat/rsoenc.c @@ -60,8 +60,6 @@ static int rso_write_header(AVFormatContext *s) avio_wb16(pb, par->sample_rate); avio_wb16(pb, 0x0000); /* play mode ? (0x0000 = don't loop) */ - avio_flush(pb); - return 0; } diff --git a/libavformat/rtpdec.c b/libavformat/rtpdec.c index e75a34cb93..3d5b200099 100644 --- a/libavformat/rtpdec.c +++ b/libavformat/rtpdec.c @@ -415,7 +415,6 @@ void ff_rtp_send_punch_packets(URLContext *rtp_handle) avio_wb32(pb, 0); /* Timestamp */ avio_wb32(pb, 0); /* SSRC */ - avio_flush(pb); len = avio_close_dyn_buf(pb, &buf); if ((len > 0) && buf) ffurl_write(rtp_handle, buf, len); @@ -430,7 +429,6 @@ void ff_rtp_send_punch_packets(URLContext *rtp_handle) avio_wb16(pb, 1); /* length in words - 1 */ avio_wb32(pb, 0); /* our own SSRC */ - avio_flush(pb); len = avio_close_dyn_buf(pb, &buf); if ((len > 0) && buf) ffurl_write(rtp_handle, buf, len); diff --git a/libavformat/rtpdec_ac3.c b/libavformat/rtpdec_ac3.c index 56a379f86c..dd4a4e1054 100644 --- a/libavformat/rtpdec_ac3.c +++ b/libavformat/rtpdec_ac3.c @@ -62,9 +62,9 @@ static int ac3_handle_packet(AVFormatContext *ctx, PayloadContext *data, av_log(ctx, AV_LOG_ERROR, "Invalid AC3 packet data\n"); return AVERROR_INVALIDDATA; } - if (av_new_packet(pkt, len)) { + if ((err = av_new_packet(pkt, len)) < 0) { av_log(ctx, AV_LOG_ERROR, "Out of memory.\n"); - return AVERROR(ENOMEM); + return err; } pkt->stream_index = st->index; diff --git a/libavformat/rtpdec_amr.c b/libavformat/rtpdec_amr.c index 35d3222811..988b7bddfd 100644 --- a/libavformat/rtpdec_amr.c +++ b/libavformat/rtpdec_amr.c @@ -51,7 +51,7 @@ static int amr_handle_packet(AVFormatContext *ctx, PayloadContext *data, { const uint8_t *frame_sizes = NULL; int frames; - int i; + int i, ret; const uint8_t *speech_data; uint8_t *ptr; @@ -93,9 +93,9 @@ static int amr_handle_packet(AVFormatContext *ctx, PayloadContext *data, speech_data = buf + 1 + frames; /* Everything except the codec mode request byte should be output. */ - if (av_new_packet(pkt, len - 1)) { + if ((ret = av_new_packet(pkt, len - 1)) < 0) { av_log(ctx, AV_LOG_ERROR, "Out of memory\n"); - return AVERROR(ENOMEM); + return ret; } pkt->stream_index = st->index; ptr = pkt->data; diff --git a/libavformat/rtpdec_h263.c b/libavformat/rtpdec_h263.c index 9b71ed7efe..1905b435f8 100644 --- a/libavformat/rtpdec_h263.c +++ b/libavformat/rtpdec_h263.c @@ -30,7 +30,7 @@ int ff_h263_handle_packet(AVFormatContext *ctx, PayloadContext *data, { uint8_t *ptr; uint16_t header; - int startcode, vrc, picture_header; + int startcode, vrc, picture_header, ret; if (len < 2) { av_log(ctx, AV_LOG_ERROR, "Too short H.263 RTP packet\n"); @@ -73,9 +73,9 @@ int ff_h263_handle_packet(AVFormatContext *ctx, PayloadContext *data, return AVERROR_INVALIDDATA; } - if (av_new_packet(pkt, len + startcode)) { + if ((ret = av_new_packet(pkt, len + startcode)) < 0) { av_log(ctx, AV_LOG_ERROR, "Out of memory\n"); - return AVERROR(ENOMEM); + return ret; } pkt->stream_index = st->index; ptr = pkt->data; diff --git a/libavformat/rtpdec_hevc.c b/libavformat/rtpdec_hevc.c index 5a06b2362c..f467104ca5 100644 --- a/libavformat/rtpdec_hevc.c +++ b/libavformat/rtpdec_hevc.c @@ -25,6 +25,7 @@ #include "libavcodec/get_bits.h" #include "avformat.h" +#include "internal.h" #include "rtpdec.h" #include "rtpdec_formats.h" @@ -147,15 +148,9 @@ static av_cold int hevc_parse_sdp_line(AVFormatContext *ctx, int st_index, hevc_sdp_parse_fmtp_config); if (hevc_data->vps_size || hevc_data->sps_size || hevc_data->pps_size || hevc_data->sei_size) { - av_freep(&par->extradata); par->extradata_size = hevc_data->vps_size + hevc_data->sps_size + hevc_data->pps_size + hevc_data->sei_size; - par->extradata = av_malloc(par->extradata_size + - AV_INPUT_BUFFER_PADDING_SIZE); - if (!par->extradata) { - ret = AVERROR(ENOMEM); - par->extradata_size = 0; - } else { + if ((ret = ff_alloc_extradata(par, par->extradata_size)) >= 0) { int pos = 0; memcpy(par->extradata + pos, hevc_data->vps, hevc_data->vps_size); pos += hevc_data->vps_size; @@ -164,8 +159,6 @@ static av_cold int hevc_parse_sdp_line(AVFormatContext *ctx, int st_index, memcpy(par->extradata + pos, hevc_data->pps, hevc_data->pps_size); pos += hevc_data->pps_size; memcpy(par->extradata + pos, hevc_data->sei, hevc_data->sei_size); - pos += hevc_data->sei_size; - memset(par->extradata + pos, 0, AV_INPUT_BUFFER_PADDING_SIZE); } av_freep(&hevc_data->vps); diff --git a/libavformat/rtpdec_latm.c b/libavformat/rtpdec_latm.c index 9087d6bec5..104a00af18 100644 --- a/libavformat/rtpdec_latm.c +++ b/libavformat/rtpdec_latm.c @@ -115,9 +115,8 @@ static int parse_fmtp_config(AVStream *st, const char *value) ret = AVERROR_PATCHWELCOME; goto end; } - av_freep(&st->codecpar->extradata); - if (ff_alloc_extradata(st->codecpar, (get_bits_left(&gb) + 7)/8)) { - ret = AVERROR(ENOMEM); + ret = ff_alloc_extradata(st->codecpar, (get_bits_left(&gb) + 7)/8); + if (ret < 0) { goto end; } for (i = 0; i < st->codecpar->extradata_size; i++) diff --git a/libavformat/rtpdec_mpa_robust.c b/libavformat/rtpdec_mpa_robust.c index f4716edf74..c0355edec2 100644 --- a/libavformat/rtpdec_mpa_robust.c +++ b/libavformat/rtpdec_mpa_robust.c @@ -90,9 +90,9 @@ static int mpa_robust_parse_packet(AVFormatContext *ctx, PayloadContext *data, return AVERROR_INVALIDDATA; } - if (av_new_packet(pkt, adu_size)) { + if ((err = av_new_packet(pkt, adu_size)) < 0) { av_log(ctx, AV_LOG_ERROR, "Out of memory.\n"); - return AVERROR(ENOMEM); + return err; } pkt->stream_index = st->index; @@ -120,9 +120,9 @@ static int mpa_robust_parse_packet(AVFormatContext *ctx, PayloadContext *data, if (!continuation && adu_size <= len) { /* One or more complete frames */ - if (av_new_packet(pkt, adu_size)) { + if ((err = av_new_packet(pkt, adu_size)) < 0) { av_log(ctx, AV_LOG_ERROR, "Out of memory.\n"); - return AVERROR(ENOMEM); + return err; } pkt->stream_index = st->index; diff --git a/libavformat/rtpdec_mpeg12.c b/libavformat/rtpdec_mpeg12.c index 43d9d5854c..e640220ebe 100644 --- a/libavformat/rtpdec_mpeg12.c +++ b/libavformat/rtpdec_mpeg12.c @@ -29,6 +29,7 @@ static int mpeg_parse_packet(AVFormatContext *ctx, PayloadContext *data, int flags) { unsigned int h; + int ret; if (len <= 4) return AVERROR_INVALIDDATA; h = AV_RB32(buf); @@ -41,8 +42,8 @@ static int mpeg_parse_packet(AVFormatContext *ctx, PayloadContext *data, buf += 4; len -= 4; } - if (av_new_packet(pkt, len) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, len)) < 0) + return ret; memcpy(pkt->data, buf, len); pkt->stream_index = st->index; return 0; diff --git a/libavformat/rtpdec_mpeg4.c b/libavformat/rtpdec_mpeg4.c index 08e5b982c1..34c7950bcc 100644 --- a/libavformat/rtpdec_mpeg4.c +++ b/libavformat/rtpdec_mpeg4.c @@ -112,10 +112,10 @@ static void close_context(PayloadContext *data) static int parse_fmtp_config(AVCodecParameters *par, const char *value) { /* decode the hexa encoded parameter */ - int len = ff_hex_to_data(NULL, value); - av_freep(&par->extradata); - if (ff_alloc_extradata(par, len)) - return AVERROR(ENOMEM); + int len = ff_hex_to_data(NULL, value), ret; + + if ((ret = ff_alloc_extradata(par, len)) < 0) + return ret; ff_hex_to_data(par->extradata, value); return 0; } diff --git a/libavformat/rtpdec_qdm2.c b/libavformat/rtpdec_qdm2.c index fa2b1b9302..1eec2da5b4 100644 --- a/libavformat/rtpdec_qdm2.c +++ b/libavformat/rtpdec_qdm2.c @@ -78,6 +78,7 @@ static int qdm2_parse_config(PayloadContext *qdm, AVStream *st, const uint8_t *buf, const uint8_t *end) { const uint8_t *p = buf; + int ret; while (end - p >= 2) { unsigned int item_len = p[0], config_item = p[1]; @@ -104,9 +105,10 @@ static int qdm2_parse_config(PayloadContext *qdm, AVStream *st, case 4: /* stream with extradata */ if (item_len < 30) return AVERROR_INVALIDDATA; - av_freep(&st->codecpar->extradata); - if (ff_alloc_extradata(st->codecpar, 26 + item_len)) { - return AVERROR(ENOMEM); + + ret = ff_alloc_extradata(st->codecpar, 26 + item_len); + if (ret < 0) { + return ret; } AV_WB32(st->codecpar->extradata, 12); memcpy(st->codecpar->extradata + 4, "frma", 4); diff --git a/libavformat/rtpdec_qt.c b/libavformat/rtpdec_qt.c index 77a3ce40be..740c382d07 100644 --- a/libavformat/rtpdec_qt.c +++ b/libavformat/rtpdec_qt.c @@ -48,13 +48,13 @@ static int qt_rtp_parse_packet(AVFormatContext *s, PayloadContext *qt, GetBitContext gb; int packing_scheme, has_payload_desc, has_packet_info, alen, has_marker_bit = flags & RTP_FLAG_MARKER, - keyframe; + keyframe, ret; if (qt->remaining) { int num = qt->pkt.size / qt->bytes_per_frame; - if (av_new_packet(pkt, qt->bytes_per_frame)) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, qt->bytes_per_frame)) < 0) + return ret; pkt->stream_index = st->index; pkt->flags = qt->pkt.flags; memcpy(pkt->data, @@ -208,8 +208,8 @@ static int qt_rtp_parse_packet(AVFormatContext *s, PayloadContext *qt, alen % qt->bytes_per_frame != 0) return AVERROR_INVALIDDATA; /* wrongly padded */ qt->remaining = (alen / qt->bytes_per_frame) - 1; - if (av_new_packet(pkt, qt->bytes_per_frame)) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, qt->bytes_per_frame)) < 0) + return ret; memcpy(pkt->data, buf + avio_tell(&pb), qt->bytes_per_frame); pkt->flags = keyframe ? AV_PKT_FLAG_KEY : 0; pkt->stream_index = st->index; diff --git a/libavformat/rtpdec_svq3.c b/libavformat/rtpdec_svq3.c index 77164dd6f9..ffe21ac4c4 100644 --- a/libavformat/rtpdec_svq3.c +++ b/libavformat/rtpdec_svq3.c @@ -58,10 +58,6 @@ static int svq3_parse_packet (AVFormatContext *s, PayloadContext *sv, len -= 2; if (config_packet) { - - av_freep(&st->codecpar->extradata); - st->codecpar->extradata_size = 0; - if (len < 2 || ff_alloc_extradata(st->codecpar, len + 8)) return AVERROR_INVALIDDATA; diff --git a/libavformat/rtpdec_xiph.c b/libavformat/rtpdec_xiph.c index 574508affb..c2db10dab8 100644 --- a/libavformat/rtpdec_xiph.c +++ b/libavformat/rtpdec_xiph.c @@ -63,7 +63,7 @@ static int xiph_handle_packet(AVFormatContext *ctx, PayloadContext *data, int flags) { - int ident, fragmented, tdt, num_pkts, pkt_len; + int ident, fragmented, tdt, num_pkts, pkt_len, ret; if (!buf) { if (!data->split_buf || data->split_pos + 2 > data->split_buf_len || @@ -77,9 +77,9 @@ static int xiph_handle_packet(AVFormatContext *ctx, PayloadContext *data, av_log(ctx, AV_LOG_ERROR, "Not enough data to return\n"); return AVERROR_INVALIDDATA; } - if (av_new_packet(pkt, pkt_len)) { + if ((ret = av_new_packet(pkt, pkt_len)) < 0) { av_log(ctx, AV_LOG_ERROR, "Out of memory.\n"); - return AVERROR(ENOMEM); + return ret; } pkt->stream_index = st->index; memcpy(pkt->data, data->split_buf + data->split_pos, pkt_len); @@ -123,9 +123,9 @@ static int xiph_handle_packet(AVFormatContext *ctx, PayloadContext *data, len -= 6; if (fragmented == 0) { - if (av_new_packet(pkt, pkt_len)) { + if ((ret = av_new_packet(pkt, pkt_len)) < 0) { av_log(ctx, AV_LOG_ERROR, "Out of memory.\n"); - return AVERROR(ENOMEM); + return ret; } pkt->stream_index = st->index; memcpy(pkt->data, buf, pkt_len); @@ -228,6 +228,7 @@ parse_packed_headers(AVFormatContext *s, { unsigned num_packed, num_headers, length, length1, length2, extradata_alloc; + int ret; uint8_t *ptr; if (packed_headers_end - packed_headers < 9) { @@ -264,9 +265,9 @@ parse_packed_headers(AVFormatContext *s, * -- AV_INPUT_BUFFER_PADDING_SIZE required */ extradata_alloc = length + length/255 + 3 + AV_INPUT_BUFFER_PADDING_SIZE; - if (ff_alloc_extradata(par, extradata_alloc)) { + if ((ret = ff_alloc_extradata(par, extradata_alloc)) < 0) { av_log(s, AV_LOG_ERROR, "Out of memory\n"); - return AVERROR(ENOMEM); + return ret; } ptr = par->extradata; *ptr++ = 2; diff --git a/libavformat/rtpenc.c b/libavformat/rtpenc.c index 63047beccc..b4f2504123 100644 --- a/libavformat/rtpenc.c +++ b/libavformat/rtpenc.c @@ -124,7 +124,7 @@ static int rtp_write_header(AVFormatContext *s1) if (!s->ssrc) s->ssrc = av_get_random_seed(); s->first_packet = 1; - s->first_rtcp_ntp_time = ff_ntp_time(); + s->first_rtcp_ntp_time = ff_ntp_time(av_gettime()); if (s1->start_time_realtime != 0 && s1->start_time_realtime != AV_NOPTS_VALUE) /* Round the NTP time to whole milliseconds. */ s->first_rtcp_ntp_time = (s1->start_time_realtime / 1000) * 1000 + @@ -526,9 +526,9 @@ static int rtp_write_packet(AVFormatContext *s1, AVPacket *pkt) rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) / RTCP_TX_RATIO_DEN; if ((s->first_packet || ((rtcp_bytes >= RTCP_SR_SIZE) && - (ff_ntp_time() - s->last_rtcp_ntp_time > 5000000))) && + (ff_ntp_time(av_gettime()) - s->last_rtcp_ntp_time > 5000000))) && !(s->flags & FF_RTP_FLAG_SKIP_RTCP)) { - rtcp_send_sr(s1, ff_ntp_time(), 0); + rtcp_send_sr(s1, ff_ntp_time(av_gettime()), 0); s->last_octet_count = s->octet_count; s->first_packet = 0; } @@ -642,7 +642,7 @@ static int rtp_write_trailer(AVFormatContext *s1) /* If the caller closes and recreates ->pb, this might actually * be NULL here even if it was successfully allocated at the start. */ if (s1->pb && (s->flags & FF_RTP_FLAG_SEND_BYE)) - rtcp_send_sr(s1, ff_ntp_time(), 1); + rtcp_send_sr(s1, ff_ntp_time(av_gettime()), 1); av_freep(&s->buf); return 0; diff --git a/libavformat/rtpenc.h b/libavformat/rtpenc.h index 62dc9ab10a..e67ce665f1 100644 --- a/libavformat/rtpenc.h +++ b/libavformat/rtpenc.h @@ -21,6 +21,7 @@ #ifndef AVFORMAT_RTPENC_H #define AVFORMAT_RTPENC_H +#include "libavutil/time.h" #include "avformat.h" #include "rtp.h" diff --git a/libavformat/rtsp.c b/libavformat/rtsp.c index 859defa592..cd6fc32a29 100644 --- a/libavformat/rtsp.c +++ b/libavformat/rtsp.c @@ -1319,7 +1319,7 @@ static int rtsp_send_cmd_with_content_async(AVFormatContext *s, char base64buf[AV_BASE64_SIZE(sizeof(buf))]; if (!rt->rtsp_hd_out) - return ENOTCONN; + return AVERROR(ENOTCONN); /* Add in RTSP headers */ out_buf = buf; diff --git a/libavformat/sbgdec.c b/libavformat/sbgdec.c index 4155395da0..de1de271bb 100644 --- a/libavformat/sbgdec.c +++ b/libavformat/sbgdec.c @@ -1327,7 +1327,7 @@ static int generate_intervals(void *log, struct sbg_script *s, int sample_rate, static int encode_intervals(struct sbg_script *s, AVCodecParameters *par, struct ws_intervals *inter) { - int i, edata_size = 4; + int i, edata_size = 4, ret; uint8_t *edata; for (i = 0; i < inter->nb_inter; i++) { @@ -1336,8 +1336,8 @@ static int encode_intervals(struct sbg_script *s, AVCodecParameters *par, if (edata_size < 0) return AVERROR(ENOMEM); } - if (ff_alloc_extradata(par, edata_size)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(par, edata_size)) < 0) + return ret; edata = par->extradata; #define ADD_EDATA32(v) do { AV_WL32(edata, (v)); edata += 4; } while(0) @@ -1446,6 +1446,7 @@ fail: static int sbg_read_packet(AVFormatContext *avf, AVPacket *packet) { int64_t ts, end_ts; + int ret; ts = avf->streams[0]->cur_dts; end_ts = ts + avf->streams[0]->codecpar->frame_size; @@ -1454,8 +1455,8 @@ static int sbg_read_packet(AVFormatContext *avf, AVPacket *packet) end_ts); if (end_ts <= ts) return AVERROR_EOF; - if (av_new_packet(packet, 12) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(packet, 12)) < 0) + return ret; packet->dts = packet->pts = ts; packet->duration = end_ts - ts; AV_WL64(packet->data + 0, ts); diff --git a/libavformat/sccdec.c b/libavformat/sccdec.c index 399be76668..769adc7dd9 100644 --- a/libavformat/sccdec.c +++ b/libavformat/sccdec.c @@ -63,6 +63,7 @@ static int scc_read_header(AVFormatContext *s) SCCContext *scc = s->priv_data; AVStream *st = avformat_new_stream(s, NULL); char line[4096], line2[4096]; + int64_t ts_start, ts_end; int count = 0, ret = 0; ptrdiff_t len2, len; uint8_t out[4096]; @@ -77,14 +78,14 @@ static int scc_read_header(AVFormatContext *s) st->codecpar->codec_id = AV_CODEC_ID_EIA_608; while (!ff_text_eof(&tr)) { - const int64_t pos = ff_text_pos(&tr); + int64_t current_pos, next_pos; char *saveptr = NULL, *lline; int hh1, mm1, ss1, fs1, i; int hh2, mm2, ss2, fs2; - int64_t ts_start, ts_end; AVPacket *sub; if (count == 0) { + current_pos = ff_text_pos(&tr); while (!ff_text_eof(&tr)) { len = ff_subtitles_read_line(&tr, line, sizeof(line)); if (len > 13) @@ -99,6 +100,7 @@ static int scc_read_header(AVFormatContext *s) ts_start = (hh1 * 3600LL + mm1 * 60LL + ss1) * 1000LL + fs1 * 33; + next_pos = ff_text_pos(&tr); while (!ff_text_eof(&tr)) { len2 = ff_subtitles_read_line(&tr, line2, sizeof(line2)); if (len2 > 13) @@ -135,15 +137,19 @@ try_again: if (!sub) return AVERROR(ENOMEM); - sub->pos = pos; + sub->pos = current_pos; sub->pts = ts_start; - sub->duration = FFMAX(1200, ts_end - ts_start); + sub->duration = ts_end - ts_start; memmove(line, line2, sizeof(line)); + current_pos = next_pos; line2[0] = 0; } - if (line[0]) + if (line[0]) { + ts_start = ts_end; + ts_end += 1200; goto try_again; + } ff_subtitles_queue_finalize(s, &scc->q); diff --git a/libavformat/sdr2.c b/libavformat/sdr2.c index 50abdf9397..8893f260d2 100644 --- a/libavformat/sdr2.c +++ b/libavformat/sdr2.c @@ -90,8 +90,8 @@ static int sdr2_read_packet(AVFormatContext *s, AVPacket *pkt) avio_skip(s->pb, 30); if (pos == FIRST) { - if (av_new_packet(pkt, next - 52 + 24) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, next - 52 + 24)) < 0) + return ret; memcpy(pkt->data, header, 24); ret = avio_read(s->pb, pkt->data + 24, next - 52); if (ret < 0) { diff --git a/libavformat/segafilmenc.c b/libavformat/segafilmenc.c index f1048ff808..bd7c03faf5 100644 --- a/libavformat/segafilmenc.c +++ b/libavformat/segafilmenc.c @@ -70,7 +70,7 @@ static int film_write_packet_to_header(AVFormatContext *format_context, FILMPack info2 = pkt->duration; /* The top bit being set indicates a key frame */ if (!pkt->keyframe) - info1 |= (1 << 31); + info1 |= 1U << 31; } /* Write the 16-byte sample info packet to the STAB chunk in the header */ @@ -181,13 +181,24 @@ static int film_init(AVFormatContext *format_context) av_log(format_context, AV_LOG_ERROR, "Sega FILM allows a maximum of one video stream.\n"); return AVERROR(EINVAL); } + if (st->codecpar->codec_id != AV_CODEC_ID_CINEPAK && + st->codecpar->codec_id != AV_CODEC_ID_RAWVIDEO) { + av_log(format_context, AV_LOG_ERROR, + "Incompatible video stream format.\n"); + return AVERROR(EINVAL); + } + if (st->codecpar->format != AV_PIX_FMT_RGB24) { + av_log(format_context, AV_LOG_ERROR, + "Pixel format must be rgb24.\n"); + return AVERROR(EINVAL); + } film->video_index = i; } + } - if (film->video_index == -1) { - av_log(format_context, AV_LOG_ERROR, "No video stream present.\n"); - return AVERROR(EINVAL); - } + if (film->video_index == -1) { + av_log(format_context, AV_LOG_ERROR, "No video stream present.\n"); + return AVERROR(EINVAL); } if (audio != NULL && get_audio_codec_id(audio->codecpar->codec_id) < 0) { @@ -293,11 +304,6 @@ static int film_write_header(AVFormatContext *format_context) } } - if (video->codecpar->format != AV_PIX_FMT_RGB24) { - av_log(format_context, AV_LOG_ERROR, "Pixel format must be rgb24.\n"); - return AVERROR(EINVAL); - } - /* First, write the FILM header; this is very simple */ ffio_wfourcc(pb, "FILM"); @@ -320,9 +326,6 @@ static int film_write_header(AVFormatContext *format_context) case AV_CODEC_ID_RAWVIDEO: ffio_wfourcc(pb, "raw "); break; - default: - av_log(format_context, AV_LOG_ERROR, "Incompatible video stream format.\n"); - return AVERROR(EINVAL); } avio_wb32(pb, video->codecpar->height); @@ -362,8 +365,6 @@ static int film_write_header(AVFormatContext *format_context) avio_wb32(pb, film->packet_count); - avio_flush(pb); - /* Finally, write out each packet's data to the header */ packet = film->start; while (packet != NULL) { diff --git a/libavformat/segment.c b/libavformat/segment.c index e3082063d8..bf9e706c1c 100644 --- a/libavformat/segment.c +++ b/libavformat/segment.c @@ -75,7 +75,6 @@ typedef struct SegmentContext { ff_const59 AVOutputFormat *oformat; AVFormatContext *avf; char *format; ///< format to use for output segment files - char *format_options_str; ///< format options to use for output segment files AVDictionary *format_options; char *list; ///< filename for the segment list file int list_flags; ///< flags affecting list generation @@ -720,15 +719,6 @@ static int seg_init(AVFormatContext *s) } } - if (seg->format_options_str) { - ret = av_dict_parse_string(&seg->format_options, seg->format_options_str, "=", ":", 0); - if (ret < 0) { - av_log(s, AV_LOG_ERROR, "Could not parse format options list '%s'\n", - seg->format_options_str); - return ret; - } - } - if (seg->list) { if (seg->list_type == LIST_TYPE_UNDEFINED) { if (av_match_ext(seg->list, "csv" )) seg->list_type = LIST_TYPE_CSV; @@ -791,7 +781,7 @@ static int seg_init(AVFormatContext *s) ret = avformat_init_output(oc, &options); if (av_dict_count(options)) { av_log(s, AV_LOG_ERROR, - "Some of the provided format options in '%s' are not recognized\n", seg->format_options_str); + "Some of the provided format options are not recognized\n"); av_dict_free(&options); return AVERROR(EINVAL); } @@ -1017,7 +1007,6 @@ fail: if (seg->list) ff_format_io_close(s, &seg->list_pb); - av_dict_free(&seg->format_options); av_opt_free(seg); av_freep(&seg->times); av_freep(&seg->frames); @@ -1060,7 +1049,7 @@ static int seg_check_bitstream(struct AVFormatContext *s, const AVPacket *pkt) static const AVOption options[] = { { "reference_stream", "set reference stream", OFFSET(reference_stream_specifier), AV_OPT_TYPE_STRING, {.str = "auto"}, CHAR_MIN, CHAR_MAX, E }, { "segment_format", "set container format used for the segments", OFFSET(format), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E }, - { "segment_format_options", "set list of options for the container format used for the segments", OFFSET(format_options_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E }, + { "segment_format_options", "set list of options for the container format used for the segments", OFFSET(format_options), AV_OPT_TYPE_DICT, {.str = NULL}, 0, 0, E }, { "segment_list", "set the segment list filename", OFFSET(list), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E }, { "segment_header_filename", "write a single file containing the header", OFFSET(header_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E }, diff --git a/libavformat/sierravmd.c b/libavformat/sierravmd.c index d586fc6ac0..8c2322eda6 100644 --- a/libavformat/sierravmd.c +++ b/libavformat/sierravmd.c @@ -127,8 +127,8 @@ static int vmd_read_header(AVFormatContext *s) vst->codecpar->width >>= 1; vst->codecpar->height >>= 1; } - if (ff_alloc_extradata(vst->codecpar, VMD_HEADER_SIZE)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(vst->codecpar, VMD_HEADER_SIZE)) < 0) + return ret; memcpy(vst->codecpar->extradata, vmd->vmd_header, VMD_HEADER_SIZE); } @@ -283,8 +283,9 @@ static int vmd_read_packet(AVFormatContext *s, if(ffio_limit(pb, frame->frame_size) != frame->frame_size) return AVERROR(EIO); - if (av_new_packet(pkt, frame->frame_size + BYTES_PER_FRAME_RECORD)) - return AVERROR(ENOMEM); + ret = av_new_packet(pkt, frame->frame_size + BYTES_PER_FRAME_RECORD); + if (ret < 0) + return ret; pkt->pos= avio_tell(pb); memcpy(pkt->data, frame->frame_record, BYTES_PER_FRAME_RECORD); if(vmd->is_indeo3 && frame->frame_record[0] == 0x02) diff --git a/libavformat/siff.c b/libavformat/siff.c index 24d5ebb42d..56c5b33c76 100644 --- a/libavformat/siff.c +++ b/libavformat/siff.c @@ -192,6 +192,7 @@ static int siff_read_header(AVFormatContext *s) static int siff_read_packet(AVFormatContext *s, AVPacket *pkt) { SIFFContext *c = s->priv_data; + int ret; if (c->has_video) { unsigned int size; @@ -213,8 +214,8 @@ static int siff_read_packet(AVFormatContext *s, AVPacket *pkt) size = c->pktsize - c->sndsize - c->gmcsize - 2; size = ffio_limit(s->pb, size); - if (av_new_packet(pkt, size + c->gmcsize + 2) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, size + c->gmcsize + 2)) < 0) + return ret; AV_WL16(pkt->data, c->flags); if (c->gmcsize) memcpy(pkt->data + 2, c->gmc, c->gmcsize); diff --git a/libavformat/smacker.c b/libavformat/smacker.c index 239894dad0..6de0e7a0f1 100644 --- a/libavformat/smacker.c +++ b/libavformat/smacker.c @@ -233,13 +233,13 @@ static int smacker_read_header(AVFormatContext *s) /* load trees to extradata, they will be unpacked by decoder */ - if(ff_alloc_extradata(st->codecpar, smk->treesize + 16)){ + if ((ret = ff_alloc_extradata(st->codecpar, smk->treesize + 16)) < 0) { av_log(s, AV_LOG_ERROR, "Cannot allocate %"PRIu32" bytes of extradata\n", smk->treesize + 16); av_freep(&smk->frm_size); av_freep(&smk->frm_flags); - return AVERROR(ENOMEM); + return ret; } ret = avio_read(pb, st->codecpar->extradata + 16, st->codecpar->extradata_size - 16); if(ret != st->codecpar->extradata_size - 16){ @@ -353,8 +353,8 @@ static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt) } if (frame_size < 0 || frame_size >= INT_MAX/2) return AVERROR_INVALIDDATA; - if (av_new_packet(pkt, frame_size + 769)) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, frame_size + 769)) < 0) + return ret; if(smk->frm_size[smk->cur_frame] & 1) palchange |= 2; pkt->data[0] = palchange; @@ -370,8 +370,8 @@ static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt) } else { if (smk->stream_id[smk->curstream] < 0 || !smk->bufs[smk->curstream]) return AVERROR_INVALIDDATA; - if (av_new_packet(pkt, smk->buf_sizes[smk->curstream])) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, smk->buf_sizes[smk->curstream])) < 0) + return ret; memcpy(pkt->data, smk->bufs[smk->curstream], smk->buf_sizes[smk->curstream]); pkt->size = smk->buf_sizes[smk->curstream]; pkt->stream_index = smk->stream_id[smk->curstream]; diff --git a/libavformat/smjpegenc.c b/libavformat/smjpegenc.c index 68a128647e..c3c1a6346c 100644 --- a/libavformat/smjpegenc.c +++ b/libavformat/smjpegenc.c @@ -88,7 +88,6 @@ static int smjpeg_write_header(AVFormatContext *s) } avio_wl32(pb, SMJPEG_HEND); - avio_flush(pb); return 0; } diff --git a/libavformat/smush.c b/libavformat/smush.c index 20352adf94..962eb57ab2 100644 --- a/libavformat/smush.c +++ b/libavformat/smush.c @@ -51,6 +51,7 @@ static int smush_read_header(AVFormatContext *ctx) uint32_t magic, nframes, size, subversion, i; uint32_t width = 0, height = 0, got_audio = 0, read = 0; uint32_t sample_rate, channels, palette[256]; + int ret; magic = avio_rb32(pb); avio_skip(pb, 4); // skip movie size @@ -157,8 +158,8 @@ static int smush_read_header(AVFormatContext *ctx) vst->codecpar->height = height; if (!smush->version) { - if (ff_alloc_extradata(vst->codecpar, 1024 + 2)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(vst->codecpar, 1024 + 2)) < 0) + return ret; AV_WL16(vst->codecpar->extradata, subversion); for (i = 0; i < 256; i++) diff --git a/libavformat/soxenc.c b/libavformat/soxenc.c index 7b37bd4848..ce276f88b5 100644 --- a/libavformat/soxenc.c +++ b/libavformat/soxenc.c @@ -80,8 +80,6 @@ static int sox_write_header(AVFormatContext *s) ffio_fill(pb, 0, comment_size - comment_len); - avio_flush(pb); - return 0; } @@ -101,8 +99,6 @@ static int sox_write_trailer(AVFormatContext *s) } else avio_wb64(pb, num_samples); avio_seek(pb, file_size, SEEK_SET); - - avio_flush(pb); } return 0; diff --git a/libavformat/spdifenc.c b/libavformat/spdifenc.c index 4307942a44..d5f7d91e93 100644 --- a/libavformat/spdifenc.c +++ b/libavformat/spdifenc.c @@ -482,12 +482,11 @@ static int spdif_write_header(AVFormatContext *s) return 0; } -static int spdif_write_trailer(AVFormatContext *s) +static void spdif_deinit(AVFormatContext *s) { IEC61937Context *ctx = s->priv_data; av_freep(&ctx->buffer); av_freep(&ctx->hd_buf); - return 0; } static av_always_inline void spdif_put_16(IEC61937Context *ctx, @@ -560,7 +559,7 @@ AVOutputFormat ff_spdif_muxer = { .video_codec = AV_CODEC_ID_NONE, .write_header = spdif_write_header, .write_packet = spdif_write_packet, - .write_trailer = spdif_write_trailer, + .deinit = spdif_deinit, .flags = AVFMT_NOTIMESTAMPS, .priv_class = &spdif_class, }; diff --git a/libavformat/subtitles.c b/libavformat/subtitles.c index a3240d88a1..172da5de2b 100644 --- a/libavformat/subtitles.c +++ b/libavformat/subtitles.c @@ -211,11 +211,12 @@ void ff_subtitles_queue_finalize(void *log_ctx, FFDemuxSubtitlesQueue *q) int ff_subtitles_queue_read_packet(FFDemuxSubtitlesQueue *q, AVPacket *pkt) { AVPacket *sub = q->subs + q->current_sub_idx; + int ret; if (q->current_sub_idx == q->nb_subs) return AVERROR_EOF; - if (av_packet_ref(pkt, sub) < 0) { - return AVERROR(ENOMEM); + if ((ret = av_packet_ref(pkt, sub)) < 0) { + return ret; } pkt->dts = pkt->pts; diff --git a/libavformat/swfenc.c b/libavformat/swfenc.c index f53db0fb2b..84f924eda5 100644 --- a/libavformat/swfenc.c +++ b/libavformat/swfenc.c @@ -337,7 +337,6 @@ static int swf_write_header(AVFormatContext *s) put_swf_end_tag(s); } - avio_flush(s->pb); return 0; } diff --git a/libavformat/tee.c b/libavformat/tee.c index d91993354b..56669d9d8e 100644 --- a/libavformat/tee.c +++ b/libavformat/tee.c @@ -56,7 +56,6 @@ typedef struct TeeContext { TeeSlave *slaves; int use_fifo; AVDictionary *fifo_options; - char *fifo_options_str; } TeeContext; static const char *const slave_delim = "|"; @@ -67,8 +66,8 @@ static const char *const slave_select_sep = ","; static const AVOption options[] = { {"use_fifo", "Use fifo pseudo-muxer to separate actual muxers from encoder", OFFSET(use_fifo), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM}, - {"fifo_options", "fifo pseudo-muxer options", OFFSET(fifo_options_str), - AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM}, + {"fifo_options", "fifo pseudo-muxer options", OFFSET(fifo_options), + AV_OPT_TYPE_DICT, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM}, {NULL} }; @@ -475,12 +474,6 @@ static int tee_write_header(AVFormatContext *avf) filename++; } - if (tee->fifo_options_str) { - ret = av_dict_parse_string(&tee->fifo_options, tee->fifo_options_str, "=", ":", 0); - if (ret < 0) - goto fail; - } - if (!(tee->slaves = av_mallocz_array(nb_slaves, sizeof(*tee->slaves)))) { ret = AVERROR(ENOMEM); goto fail; diff --git a/libavformat/tiertexseq.c b/libavformat/tiertexseq.c index a89a0a9d61..d7719e5acb 100644 --- a/libavformat/tiertexseq.c +++ b/libavformat/tiertexseq.c @@ -273,8 +273,10 @@ static int seq_read_packet(AVFormatContext *s, AVPacket *pkt) /* video packet */ if (seq->current_pal_data_size + seq->current_video_data_size != 0) { - if (av_new_packet(pkt, 1 + seq->current_pal_data_size + seq->current_video_data_size)) - return AVERROR(ENOMEM); + rc = av_new_packet(pkt, 1 + seq->current_pal_data_size + + seq->current_video_data_size); + if (rc < 0) + return rc; pkt->data[0] = 0; if (seq->current_pal_data_size) { diff --git a/libavformat/tls_openssl.c b/libavformat/tls_openssl.c index 53f8363a12..e305b2465a 100644 --- a/libavformat/tls_openssl.c +++ b/libavformat/tls_openssl.c @@ -48,7 +48,7 @@ typedef struct TLSContext { #endif } TLSContext; -#if HAVE_THREADS +#if HAVE_THREADS && OPENSSL_VERSION_NUMBER < 0x10100000L #include pthread_mutex_t *openssl_mutexes; static void openssl_lock(int mode, int type, const char *file, int line) @@ -79,7 +79,7 @@ int ff_openssl_init(void) SSL_library_init(); SSL_load_error_strings(); #endif -#if HAVE_THREADS +#if HAVE_THREADS && OPENSSL_VERSION_NUMBER < 0x10100000L if (!CRYPTO_get_locking_callback()) { int i; openssl_mutexes = av_malloc_array(sizeof(pthread_mutex_t), CRYPTO_num_locks()); @@ -108,7 +108,7 @@ void ff_openssl_deinit(void) ff_lock_avformat(); openssl_init--; if (!openssl_init) { -#if HAVE_THREADS +#if HAVE_THREADS && OPENSSL_VERSION_NUMBER < 0x10100000L if (CRYPTO_get_locking_callback() == openssl_lock) { int i; CRYPTO_set_locking_callback(NULL); diff --git a/libavformat/ttaenc.c b/libavformat/ttaenc.c index 73c29ae936..4860aab4c1 100644 --- a/libavformat/ttaenc.c +++ b/libavformat/ttaenc.c @@ -154,7 +154,6 @@ static int tta_write_trailer(AVFormatContext *s) tta_queue_flush(s); ff_ape_write_tag(s); - avio_flush(s->pb); return 0; } diff --git a/libavformat/ty.c b/libavformat/ty.c index bbb2e28a93..738a22e7de 100644 --- a/libavformat/ty.c +++ b/libavformat/ty.c @@ -454,7 +454,7 @@ static int demux_video(AVFormatContext *s, TyRecHdr *rec_hdr, AVPacket *pkt) TYDemuxContext *ty = s->priv_data; const int subrec_type = rec_hdr->subrec_type; const int64_t rec_size = rec_hdr->rec_size; - int es_offset1; + int es_offset1, ret; int got_packet = 0; if (subrec_type != 0x02 && subrec_type != 0x0c && @@ -474,8 +474,8 @@ static int demux_video(AVFormatContext *s, TyRecHdr *rec_hdr, AVPacket *pkt) int size = rec_hdr->rec_size - VIDEO_PES_LENGTH - es_offset1; ty->cur_chunk_pos += VIDEO_PES_LENGTH + es_offset1; - if (av_new_packet(pkt, size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, size)) < 0) + return ret; memcpy(pkt->data, ty->chunk + ty->cur_chunk_pos, size); ty->cur_chunk_pos += size; pkt->stream_index = 0; @@ -498,8 +498,8 @@ static int demux_video(AVFormatContext *s, TyRecHdr *rec_hdr, AVPacket *pkt) } if (!got_packet) { - if (av_new_packet(pkt, rec_size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, rec_size)) < 0) + return ret; memcpy(pkt->data, ty->chunk + ty->cur_chunk_pos, rec_size); ty->cur_chunk_pos += rec_size; pkt->stream_index = 0; @@ -578,7 +578,7 @@ static int demux_audio(AVFormatContext *s, TyRecHdr *rec_hdr, AVPacket *pkt) TYDemuxContext *ty = s->priv_data; const int subrec_type = rec_hdr->subrec_type; const int64_t rec_size = rec_hdr->rec_size; - int es_offset1; + int es_offset1, ret; if (subrec_type == 2) { int need = 0; @@ -621,8 +621,8 @@ static int demux_audio(AVFormatContext *s, TyRecHdr *rec_hdr, AVPacket *pkt) ty->pes_buf_cnt = 0; } - if (av_new_packet(pkt, rec_size - need) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, rec_size - need)) < 0) + return ret; memcpy(pkt->data, ty->chunk + ty->cur_chunk_pos, rec_size - need); ty->cur_chunk_pos += rec_size - need; pkt->stream_index = 1; @@ -643,8 +643,8 @@ static int demux_audio(AVFormatContext *s, TyRecHdr *rec_hdr, AVPacket *pkt) } } } else if (subrec_type == 0x03) { - if (av_new_packet(pkt, rec_size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, rec_size)) < 0) + return ret; memcpy(pkt->data, ty->chunk + ty->cur_chunk_pos, rec_size); ty->cur_chunk_pos += rec_size; pkt->stream_index = 1; @@ -674,15 +674,15 @@ static int demux_audio(AVFormatContext *s, TyRecHdr *rec_hdr, AVPacket *pkt) } else if (subrec_type == 0x04) { /* SA Audio with no PES Header */ /* ================================================ */ - if (av_new_packet(pkt, rec_size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, rec_size)) < 0) + return ret; memcpy(pkt->data, ty->chunk + ty->cur_chunk_pos, rec_size); ty->cur_chunk_pos += rec_size; pkt->stream_index = 1; pkt->pts = ty->last_audio_pts; } else if (subrec_type == 0x09) { - if (av_new_packet(pkt, rec_size) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, rec_size)) < 0) + return ret; memcpy(pkt->data, ty->chunk + ty->cur_chunk_pos, rec_size); ty->cur_chunk_pos += rec_size ; pkt->stream_index = 1; diff --git a/libavformat/utils.c b/libavformat/utils.c index 4d18880acb..674757a63f 100644 --- a/libavformat/utils.c +++ b/libavformat/utils.c @@ -77,7 +77,7 @@ const char *avformat_configuration(void) const char *avformat_license(void) { #define LICENSE_PREFIX "libavformat license: " - return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; + return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1]; } int ff_lock_avformat(void) @@ -268,7 +268,6 @@ int ffio_limit(AVIOContext *s, int size) * Return the number of bytes read or an error. */ static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size) { - int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos int orig_size = pkt->size; int ret; @@ -301,7 +300,6 @@ static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size) if (size > 0) pkt->flags |= AV_PKT_FLAG_CORRUPT; - pkt->pos = orig_pos; if (!pkt->size) av_packet_unref(pkt); return pkt->size > orig_size ? pkt->size - orig_size : ret; @@ -1021,7 +1019,8 @@ static int is_intra_only(enum AVCodecID id) const AVCodecDescriptor *d = avcodec_descriptor_get(id); if (!d) return 0; - if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY)) + if ((d->type == AVMEDIA_TYPE_VIDEO || d->type == AVMEDIA_TYPE_AUDIO) && + !(d->props & AV_CODEC_PROP_INTRA_ONLY)) return 0; return 1; } @@ -4685,9 +4684,9 @@ void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx) } } -uint64_t ff_ntp_time(void) +uint64_t ff_ntp_time(int64_t timestamp) { - return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US; + return (timestamp / 1000) * 1000 + NTP_OFFSET_US; } uint64_t ff_get_formatted_ntp_time(uint64_t ntp_time_us) @@ -5450,7 +5449,7 @@ int ff_generate_avci_extradata(AVStream *st) }; const uint8_t *data = NULL; - int size = 0; + int ret, size = 0; if (st->codecpar->width == 1920) { if (st->codecpar->field_order == AV_FIELD_PROGRESSIVE) { @@ -5479,9 +5478,8 @@ int ff_generate_avci_extradata(AVStream *st) if (!size) return 0; - av_freep(&st->codecpar->extradata); - if (ff_alloc_extradata(st->codecpar, size)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, size)) < 0) + return ret; memcpy(st->codecpar->extradata, data, size); return 0; diff --git a/libavformat/vc1test.c b/libavformat/vc1test.c index 3c677931fc..ff57f44b14 100644 --- a/libavformat/vc1test.c +++ b/libavformat/vc1test.c @@ -51,7 +51,7 @@ static int vc1t_read_header(AVFormatContext *s) { AVIOContext *pb = s->pb; AVStream *st; - int frames; + int frames, ret; uint32_t fps; uint32_t size; @@ -67,8 +67,8 @@ static int vc1t_read_header(AVFormatContext *s) st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_id = AV_CODEC_ID_WMV3; - if (ff_get_extradata(s, st->codecpar, pb, VC1_EXTRADATA_SIZE) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_get_extradata(s, st->codecpar, pb, VC1_EXTRADATA_SIZE)) < 0) + return ret; avio_skip(pb, size - 4); st->codecpar->height = avio_rl32(pb); diff --git a/libavformat/vc1testenc.c b/libavformat/vc1testenc.c index cf95d1d80d..1365bdd660 100644 --- a/libavformat/vc1testenc.c +++ b/libavformat/vc1testenc.c @@ -76,7 +76,6 @@ static int vc1test_write_trailer(AVFormatContext *s) if (s->pb->seekable & AVIO_SEEKABLE_NORMAL) { avio_seek(pb, 0, SEEK_SET); avio_wl24(pb, ctx->frames); - avio_flush(pb); } return 0; } diff --git a/libavformat/version.h b/libavformat/version.h index 213b66b45f..0a79868663 100644 --- a/libavformat/version.h +++ b/libavformat/version.h @@ -33,7 +33,7 @@ // Also please add any ticket numbers that you believe might be affected here #define LIBAVFORMAT_VERSION_MAJOR 58 #define LIBAVFORMAT_VERSION_MINOR 35 -#define LIBAVFORMAT_VERSION_MICRO 101 +#define LIBAVFORMAT_VERSION_MICRO 103 #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ LIBAVFORMAT_VERSION_MINOR, \ diff --git a/libavformat/vividas.c b/libavformat/vividas.c index 4ea29d85e3..4f54a4302e 100644 --- a/libavformat/vividas.c +++ b/libavformat/vividas.c @@ -278,7 +278,7 @@ static uint8_t *read_sb_block(AVIOContext *src, unsigned *size, static int track_header(VividasDemuxContext *viv, AVFormatContext *s, uint8_t *buf, int size) { - int i,j; + int i, j, ret; int64_t off; int val_1; int num_video; @@ -391,10 +391,9 @@ static int track_header(VividasDemuxContext *viv, AVFormatContext *s, uint8_t * xd_size += len; } - st->codecpar->extradata_size = 64 + xd_size + xd_size / 255; - if (ff_alloc_extradata(st->codecpar, st->codecpar->extradata_size)) { - return AVERROR(ENOMEM); - } + ret = ff_alloc_extradata(st->codecpar, 64 + xd_size + xd_size / 255); + if (ret < 0) + return ret; p = st->codecpar->extradata; p[0] = 2; diff --git a/libavformat/vqf.c b/libavformat/vqf.c index 755849bac7..2916ee64fa 100644 --- a/libavformat/vqf.c +++ b/libavformat/vqf.c @@ -97,7 +97,7 @@ static int vqf_read_header(AVFormatContext *s) int rate_flag = -1; int header_size; int read_bitrate = 0; - int size; + int size, ret; uint8_t comm_chunk[12]; if (!st) @@ -222,8 +222,8 @@ static int vqf_read_header(AVFormatContext *s) avpriv_set_pts_info(st, 64, size, st->codecpar->sample_rate); /* put first 12 bytes of COMM chunk in extradata */ - if (ff_alloc_extradata(st->codecpar, 12)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, 12)) < 0) + return ret; memcpy(st->codecpar->extradata, comm_chunk, 12); ff_metadata_conv_ctx(s, NULL, vqf_metadata_conv); @@ -237,8 +237,8 @@ static int vqf_read_packet(AVFormatContext *s, AVPacket *pkt) int ret; int size = (c->frame_bit_len - c->remaining_bits + 7)>>3; - if (av_new_packet(pkt, size+2) < 0) - return AVERROR(EIO); + if ((ret = av_new_packet(pkt, size + 2)) < 0) + return ret; pkt->pos = avio_tell(s->pb); pkt->stream_index = 0; diff --git a/libavformat/wavdec.c b/libavformat/wavdec.c index 52194f54ef..575c667452 100644 --- a/libavformat/wavdec.c +++ b/libavformat/wavdec.c @@ -181,7 +181,7 @@ static int wav_parse_fmt_tag(AVFormatContext *s, int64_t size, AVStream **st) static int wav_parse_xma2_tag(AVFormatContext *s, int64_t size, AVStream **st) { AVIOContext *pb = s->pb; - int version, num_streams, i, channels = 0; + int version, num_streams, i, channels = 0, ret; if (size < 36) return AVERROR_INVALIDDATA; @@ -220,9 +220,8 @@ static int wav_parse_xma2_tag(AVFormatContext *s, int64_t size, AVStream **st) avpriv_set_pts_info(*st, 64, 1, (*st)->codecpar->sample_rate); avio_seek(pb, -size, SEEK_CUR); - av_freep(&(*st)->codecpar->extradata); - if (ff_get_extradata(s, (*st)->codecpar, pb, size) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_get_extradata(s, (*st)->codecpar, pb, size)) < 0) + return ret; return 0; } @@ -473,9 +472,9 @@ static int wav_read_header(AVFormatContext *s) vst->codecpar->codec_id = AV_CODEC_ID_SMVJPEG; vst->codecpar->width = avio_rl24(pb); vst->codecpar->height = avio_rl24(pb); - if (ff_alloc_extradata(vst->codecpar, 4)) { + if ((ret = ff_alloc_extradata(vst->codecpar, 4)) < 0) { av_log(s, AV_LOG_ERROR, "Could not allocate extradata.\n"); - return AVERROR(ENOMEM); + return ret; } size = avio_rl24(pb); wav->smv_data_ofs = avio_tell(pb) + (size - 5) * 3; diff --git a/libavformat/wavenc.c b/libavformat/wavenc.c index 159119d693..f6f5710802 100644 --- a/libavformat/wavenc.c +++ b/libavformat/wavenc.c @@ -141,7 +141,7 @@ static void bwf_write_bext_chunk(AVFormatContext *s) ff_end_tag(s->pb, bext); } -static av_cold void peak_free_buffers(AVFormatContext *s) +static av_cold void wav_deinit(AVFormatContext *s) { WAVMuxContext *wav = s->priv_data; @@ -185,7 +185,6 @@ static av_cold int peak_init_writer(AVFormatContext *s) nomem: av_log(s, AV_LOG_ERROR, "Out of memory\n"); - peak_free_buffers(s); return AVERROR(ENOMEM); } @@ -362,8 +361,6 @@ static int wav_write_header(AVFormatContext *s) wav->data = ff_start_tag(pb, "data"); } - avio_flush(pb); - return 0; } @@ -414,17 +411,13 @@ static int wav_write_trailer(AVFormatContext *s) int rf64 = 0; int ret = 0; - avio_flush(pb); - if (s->pb->seekable & AVIO_SEEKABLE_NORMAL) { if (wav->write_peak != PEAK_ONLY && avio_tell(pb) - wav->data < UINT32_MAX) { ff_end_tag(pb, wav->data); - avio_flush(pb); } if (wav->write_peak && wav->peak_output) { ret = peak_write_chunk(s); - avio_flush(pb); } /* update file size */ @@ -436,8 +429,6 @@ static int wav_write_trailer(AVFormatContext *s) avio_seek(pb, 4, SEEK_SET); avio_wl32(pb, (uint32_t)(file_size - 8)); avio_seek(pb, file_size, SEEK_SET); - - avio_flush(pb); } else { av_log(s, AV_LOG_ERROR, "Filesize %"PRId64" invalid for wav, output file will be broken\n", @@ -457,7 +448,6 @@ static int wav_write_trailer(AVFormatContext *s) } else { avio_wl32(pb, number_of_samples); avio_seek(pb, file_size, SEEK_SET); - avio_flush(pb); } } @@ -481,13 +471,9 @@ static int wav_write_trailer(AVFormatContext *s) avio_wl32(pb, -1); avio_seek(pb, file_size, SEEK_SET); - avio_flush(pb); } } - if (wav->write_peak) - peak_free_buffers(s); - return ret; } @@ -527,6 +513,7 @@ AVOutputFormat ff_wav_muxer = { .write_header = wav_write_header, .write_packet = wav_write_packet, .write_trailer = wav_write_trailer, + .deinit = wav_deinit, .flags = AVFMT_TS_NONSTRICT, .codec_tag = (const AVCodecTag* const []){ ff_codec_wav_tags, 0 }, .priv_class = &wav_muxer_class, @@ -610,7 +597,6 @@ static int w64_write_trailer(AVFormatContext *s) } avio_seek(pb, file_size, SEEK_SET); - avio_flush(pb); } return 0; diff --git a/libavformat/webvttenc.c b/libavformat/webvttenc.c index 61b7f54622..cbd989dcb6 100644 --- a/libavformat/webvttenc.c +++ b/libavformat/webvttenc.c @@ -57,7 +57,6 @@ static int webvtt_write_header(AVFormatContext *ctx) avpriv_set_pts_info(s, 64, 1, 1000); avio_printf(pb, "WEBVTT\n"); - avio_flush(pb); return 0; } diff --git a/libavformat/westwood_vqa.c b/libavformat/westwood_vqa.c index c21a3e31f6..a0db854b1c 100644 --- a/libavformat/westwood_vqa.c +++ b/libavformat/westwood_vqa.c @@ -85,7 +85,7 @@ static int wsvqa_read_header(AVFormatContext *s) uint8_t scratch[VQA_PREAMBLE_SIZE]; uint32_t chunk_tag; uint32_t chunk_size; - int fps; + int fps, ret; /* initialize the video decoder stream */ st = avformat_new_stream(s, NULL); @@ -101,8 +101,8 @@ static int wsvqa_read_header(AVFormatContext *s) avio_seek(pb, 20, SEEK_SET); /* the VQA header needs to go to the decoder */ - if (ff_get_extradata(s, st->codecpar, pb, VQA_HEADER_SIZE) < 0) - return AVERROR(ENOMEM); + if ((ret = ff_get_extradata(s, st->codecpar, pb, VQA_HEADER_SIZE)) < 0) + return ret; header = st->codecpar->extradata; st->codecpar->width = AV_RL16(&header[6]); st->codecpar->height = AV_RL16(&header[8]); @@ -214,8 +214,8 @@ static int wsvqa_read_packet(AVFormatContext *s, break; case SND2_TAG: st->codecpar->codec_id = AV_CODEC_ID_ADPCM_IMA_WS; - if (ff_alloc_extradata(st->codecpar, 2)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, 2)) < 0) + return ret; AV_WL16(st->codecpar->extradata, wsvqa->version); break; } diff --git a/libavformat/wtvdec.c b/libavformat/wtvdec.c index 706e8ca38d..67d934f074 100644 --- a/libavformat/wtvdec.c +++ b/libavformat/wtvdec.c @@ -290,7 +290,7 @@ static AVIOContext * wtvfile_open2(AVFormatContext *s, const uint8_t *buf, int b buf += dir_length; } - return 0; + return NULL; } #define wtvfile_open(s, buf, buf_size, filename) \ @@ -904,10 +904,10 @@ static int parse_chunks(AVFormatContext *s, int mode, int64_t seekts, int *len_p wtv->last_valid_pts = wtv->pts; if (wtv->epoch == AV_NOPTS_VALUE || wtv->pts < wtv->epoch) wtv->epoch = wtv->pts; - if (mode == SEEK_TO_PTS && wtv->pts >= seekts) { - avio_skip(pb, WTV_PAD8(len) - consumed); - return 0; - } + if (mode == SEEK_TO_PTS && wtv->pts >= seekts) { + avio_skip(pb, WTV_PAD8(len) - consumed); + return 0; + } } } } else if (!ff_guidcmp(g, ff_data_guid)) { @@ -993,8 +993,10 @@ static int read_header(AVFormatContext *s) } ret = parse_chunks(s, SEEK_TO_DATA, 0, 0); - if (ret < 0) + if (ret < 0) { + wtvfile_close(wtv->pb); return ret; + } avio_seek(wtv->pb, -32, SEEK_CUR); timeline_pos = avio_tell(s->pb); // save before opening another file diff --git a/libavformat/wtvenc.c b/libavformat/wtvenc.c index 4a68b8133f..498bc64019 100644 --- a/libavformat/wtvenc.c +++ b/libavformat/wtvenc.c @@ -823,8 +823,6 @@ static int write_trailer(AVFormatContext *s) avio_seek(pb, 0x5c, SEEK_SET); avio_wl32(pb, file_end_pos >> WTV_SECTOR_BITS); - avio_flush(pb); - av_free(wctx->sp_pairs); av_free(wctx->st_pairs); av_packet_unref(&wctx->thumbnail); diff --git a/libavformat/wvdec.c b/libavformat/wvdec.c index 649791d151..f2bb4c60ba 100644 --- a/libavformat/wvdec.c +++ b/libavformat/wvdec.c @@ -282,8 +282,8 @@ static int wv_read_packet(AVFormatContext *s, AVPacket *pkt) } pos = wc->pos; - if (av_new_packet(pkt, wc->header.blocksize + WV_HEADER_SIZE) < 0) - return AVERROR(ENOMEM); + if ((ret = av_new_packet(pkt, wc->header.blocksize + WV_HEADER_SIZE)) < 0) + return ret; memcpy(pkt->data, wc->block_header, WV_HEADER_SIZE); ret = avio_read(s->pb, pkt->data + WV_HEADER_SIZE, wc->header.blocksize); if (ret != wc->header.blocksize) { diff --git a/libavformat/xmv.c b/libavformat/xmv.c index 7f12956458..0c69d267de 100644 --- a/libavformat/xmv.c +++ b/libavformat/xmv.c @@ -397,8 +397,6 @@ static int xmv_process_packet_header(AVFormatContext *s) av_assert0(xmv->video.stream_index < s->nb_streams); if (vst->codecpar->extradata_size < 4) { - av_freep(&vst->codecpar->extradata); - if ((ret = ff_alloc_extradata(vst->codecpar, 4)) < 0) return ret; } diff --git a/libavformat/xwma.c b/libavformat/xwma.c index b084449296..b5dcb850c0 100644 --- a/libavformat/xwma.c +++ b/libavformat/xwma.c @@ -130,15 +130,15 @@ static int xwma_read_header(AVFormatContext *s) avpriv_request_sample(s, "Unexpected extradata (%d bytes)", st->codecpar->extradata_size); } else if (st->codecpar->codec_id == AV_CODEC_ID_WMAPRO) { - if (ff_alloc_extradata(st->codecpar, 18)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, 18)) < 0) + return ret; memset(st->codecpar->extradata, 0, st->codecpar->extradata_size); st->codecpar->extradata[ 0] = st->codecpar->bits_per_coded_sample; st->codecpar->extradata[14] = 224; } else { - if (ff_alloc_extradata(st->codecpar, 6)) - return AVERROR(ENOMEM); + if ((ret = ff_alloc_extradata(st->codecpar, 6)) < 0) + return ret; memset(st->codecpar->extradata, 0, st->codecpar->extradata_size); /* setup extradata with our experimentally obtained value */ diff --git a/libavutil/avstring.c b/libavutil/avstring.c index 76a13ba3b5..f4b8ed2b45 100644 --- a/libavutil/avstring.c +++ b/libavutil/avstring.c @@ -258,15 +258,17 @@ char *av_strireplace(const char *str, const char *from, const char *to) const char *av_basename(const char *path) { char *p; +#if HAVE_DOS_PATHS + char *q, *d; +#endif if (!path || *path == '\0') return "."; p = strrchr(path, '/'); #if HAVE_DOS_PATHS - char *q = strrchr(path, '\\'); - char *d = strchr(path, ':'); - + q = strrchr(path, '\\'); + d = strchr(path, ':'); p = FFMAX3(p, q, d); #endif diff --git a/libavutil/buffer.c b/libavutil/buffer.c index f0034b026a..6d9cb7428e 100644 --- a/libavutil/buffer.c +++ b/libavutil/buffer.c @@ -20,6 +20,7 @@ #include #include +#include "avassert.h" #include "buffer_internal.h" #include "common.h" #include "mem.h" @@ -355,3 +356,10 @@ AVBufferRef *av_buffer_pool_get(AVBufferPool *pool) return ret; } + +void *av_buffer_pool_buffer_get_opaque(AVBufferRef *ref) +{ + BufferPoolEntry *buf = ref->buffer->opaque; + av_assert0(buf); + return buf->opaque; +} diff --git a/libavutil/buffer.h b/libavutil/buffer.h index 73b6bd0b14..e0f94314f4 100644 --- a/libavutil/buffer.h +++ b/libavutil/buffer.h @@ -284,6 +284,19 @@ void av_buffer_pool_uninit(AVBufferPool **pool); */ AVBufferRef *av_buffer_pool_get(AVBufferPool *pool); +/** + * Query the original opaque parameter of an allocated buffer in the pool. + * + * @param ref a buffer reference to a buffer returned by av_buffer_pool_get. + * @return the opaque parameter set by the buffer allocator function of the + * buffer pool. + * + * @note the opaque parameter of ref is used by the buffer pool implementation, + * therefore you have to use this function to access the original opaque + * parameter of an allocated buffer. + */ +void *av_buffer_pool_buffer_get_opaque(AVBufferRef *ref); + /** * @} */ diff --git a/libavutil/eval.c b/libavutil/eval.c index 62d2ae938b..d527f6a9d0 100644 --- a/libavutil/eval.c +++ b/libavutil/eval.c @@ -166,8 +166,8 @@ struct AVExpr { e_sgn, } type; double value; // is sign in other types + int const_index; union { - int const_index; double (*func0)(double); double (*func1)(void *, double); double (*func2)(void *, double, double); @@ -185,7 +185,7 @@ static double eval_expr(Parser *p, AVExpr *e) { switch (e->type) { case e_value: return e->value; - case e_const: return e->value * p->const_values[e->a.const_index]; + case e_const: return e->value * p->const_values[e->const_index]; case e_func0: return e->value * e->a.func0(eval_expr(p, e->param[0])); case e_func1: return e->value * e->a.func1(p->opaque, eval_expr(p, e->param[0])); case e_func2: return e->value * e->a.func2(p->opaque, eval_expr(p, e->param[0]), eval_expr(p, e->param[1])); @@ -367,7 +367,7 @@ static int parse_primary(AVExpr **e, Parser *p) if (strmatch(p->s, p->const_names[i])) { p->s+= strlen(p->const_names[i]); d->type = e_const; - d->a.const_index = i; + d->const_index = i; *e = d; return 0; } @@ -478,6 +478,7 @@ static int parse_primary(AVExpr **e, Parser *p) if (strmatch(next, p->func1_names[i])) { d->a.func1 = p->funcs1[i]; d->type = e_func1; + d->const_index = i; *e = d; return 0; } @@ -487,6 +488,7 @@ static int parse_primary(AVExpr **e, Parser *p) if (strmatch(next, p->func2_names[i])) { d->a.func2 = p->funcs2[i]; d->type = e_func2; + d->const_index = i; *e = d; return 0; } @@ -735,22 +737,32 @@ end: return ret; } -int av_expr_count_vars(AVExpr *e, unsigned *counter, int size) +static int expr_count(AVExpr *e, unsigned *counter, int size, int type) { int i; if (!e || !counter || !size) return AVERROR(EINVAL); - for (i = 0; e->type != e_const && i < 3 && e->param[i]; i++) - av_expr_count_vars(e->param[i], counter, size); + for (i = 0; e->type != type && i < 3 && e->param[i]; i++) + expr_count(e->param[i], counter, size, type); - if (e->type == e_const && e->a.const_index < size) - counter[e->a.const_index]++; + if (e->type == type && e->const_index < size) + counter[e->const_index]++; return 0; } +int av_expr_count_vars(AVExpr *e, unsigned *counter, int size) +{ + return expr_count(e, counter, size, e_const); +} + +int av_expr_count_func(AVExpr *e, unsigned *counter, int size, int arg) +{ + return expr_count(e, counter, size, ((int[]){e_const, e_func1, e_func2})[arg]); +} + double av_expr_eval(AVExpr *e, const double *const_values, void *opaque) { Parser p = { 0 }; diff --git a/libavutil/eval.h b/libavutil/eval.h index 9bdb10cca2..068c62cdab 100644 --- a/libavutil/eval.h +++ b/libavutil/eval.h @@ -96,6 +96,20 @@ double av_expr_eval(AVExpr *e, const double *const_values, void *opaque); */ int av_expr_count_vars(AVExpr *e, unsigned *counter, int size); +/** + * Track the presence of user provided functions and their number of occurrences + * in a parsed expression. + * + * @param counter a zero-initialized array where the count of each function will be stored + * if you passed 5 functions with 2 arguments to av_expr_parse() + * then for arg=2 this will use upto 5 entries. + * @param size size of array + * @param arg number of arguments the counted functions have + * @return 0 on success, a negative value indicates that no expression or array was passed + * or size was zero + */ +int av_expr_count_func(AVExpr *e, unsigned *counter, int size, int arg); + /** * Free a parsed expression previously created with av_expr_parse(). */ diff --git a/libavutil/log.c b/libavutil/log.c index 93a156b8e4..e8a0db7716 100644 --- a/libavutil/log.c +++ b/libavutil/log.c @@ -226,6 +226,8 @@ static const char *get_level_str(int level) return "quiet"; case AV_LOG_DEBUG: return "debug"; + case AV_LOG_TRACE: + return "trace"; case AV_LOG_VERBOSE: return "verbose"; case AV_LOG_INFO: diff --git a/libavutil/opt.c b/libavutil/opt.c index 9081a593a1..a482febf5f 100644 --- a/libavutil/opt.c +++ b/libavutil/opt.c @@ -446,6 +446,24 @@ static int set_string_sample_fmt(void *obj, const AVOption *o, const char *val, AV_SAMPLE_FMT_NB, av_get_sample_fmt, "sample format"); } +static int set_string_dict(void *obj, const AVOption *o, const char *val, uint8_t **dst) +{ + AVDictionary *options = NULL; + + if (val) { + int ret = av_dict_parse_string(&options, val, "=", ":", 0); + if (ret < 0) { + av_dict_free(&options); + return ret; + } + } + + av_dict_free((AVDictionary **)dst); + *dst = (uint8_t *)options; + + return 0; +} + int av_opt_set(void *obj, const char *name, const char *val, int search_flags) { int ret = 0; @@ -527,6 +545,8 @@ int av_opt_set(void *obj, const char *name, const char *val, int search_flags) return ret; } break; + case AV_OPT_TYPE_DICT: + return set_string_dict(obj, o, val, dst); } av_log(obj, AV_LOG_ERROR, "Invalid option type.\n"); @@ -855,6 +875,12 @@ int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val) i64 = *(int64_t *)dst; ret = snprintf(buf, sizeof(buf), "0x%"PRIx64, i64); break; + case AV_OPT_TYPE_DICT: + if (!*(AVDictionary **)dst && (search_flags & AV_OPT_ALLOW_NULL)) { + *out_val = NULL; + return 0; + } + return av_dict_get_string(*(AVDictionary **)dst, (char **)out_val, '=', ':'); default: return AVERROR(EINVAL); } @@ -1174,6 +1200,9 @@ static void opt_list(void *obj, void *av_log_obj, const char *unit, case AV_OPT_TYPE_BINARY: av_log(av_log_obj, AV_LOG_INFO, "%-12s ", ""); break; + case AV_OPT_TYPE_DICT: + av_log(av_log_obj, AV_LOG_INFO, "%-12s ", ""); + break; case AV_OPT_TYPE_IMAGE_SIZE: av_log(av_log_obj, AV_LOG_INFO, "%-12s ", ""); break; @@ -1247,6 +1276,7 @@ static void opt_list(void *obj, void *av_log_obj, const char *unit, !((opt->type == AV_OPT_TYPE_COLOR || opt->type == AV_OPT_TYPE_IMAGE_SIZE || opt->type == AV_OPT_TYPE_STRING || + opt->type == AV_OPT_TYPE_DICT || opt->type == AV_OPT_TYPE_VIDEO_RATE) && !opt->default_val.str)) { av_log(av_log_obj, AV_LOG_INFO, " (default "); @@ -1297,6 +1327,7 @@ static void opt_list(void *obj, void *av_log_obj, const char *unit, case AV_OPT_TYPE_COLOR: case AV_OPT_TYPE_IMAGE_SIZE: case AV_OPT_TYPE_STRING: + case AV_OPT_TYPE_DICT: case AV_OPT_TYPE_VIDEO_RATE: av_log(av_log_obj, AV_LOG_INFO, "\"%s\"", opt->default_val.str); break; @@ -1386,8 +1417,8 @@ void av_opt_set_defaults2(void *s, int mask, int flags) set_string_binary(s, opt, opt->default_val.str, dst); break; case AV_OPT_TYPE_DICT: - /* Cannot set defaults for these types */ - break; + set_string_dict(s, opt, opt->default_val.str, dst); + break; default: av_log(s, AV_LOG_DEBUG, "AVOption type %d of option %s not implemented yet\n", opt->type, opt->name); @@ -1971,9 +2002,23 @@ int av_opt_is_set_to_default(void *obj, const AVOption *o) av_free(tmp.data); return ret; } - case AV_OPT_TYPE_DICT: - /* Binary and dict have not default support yet. Any pointer is not default. */ - return !!(*(void **)dst); + case AV_OPT_TYPE_DICT: { + AVDictionary *dict1 = NULL; + AVDictionary *dict2 = *(AVDictionary **)dst; + AVDictionaryEntry *en1 = NULL; + AVDictionaryEntry *en2 = NULL; + ret = av_dict_parse_string(&dict1, o->default_val.str, "=", ":", 0); + if (ret < 0) { + av_dict_free(&dict1); + return ret; + } + do { + en1 = av_dict_get(dict1, "", en1, AV_DICT_IGNORE_SUFFIX); + en2 = av_dict_get(dict2, "", en2, AV_DICT_IGNORE_SUFFIX); + } while (en1 && en2 && !strcmp(en1->key, en2->key) && !strcmp(en1->value, en2->value)); + av_dict_free(&dict1); + return (!en1 && !en2); + } case AV_OPT_TYPE_IMAGE_SIZE: if (!o->default_val.str || !strcmp(o->default_val.str, "none")) w = h = 0; diff --git a/libavutil/opt.h b/libavutil/opt.h index bc98ab104d..1969c984dd 100644 --- a/libavutil/opt.h +++ b/libavutil/opt.h @@ -670,6 +670,9 @@ const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *pre * scalars or named flags separated by '+' or '-'. Prefixing a flag * with '+' causes it to be set without affecting the other flags; * similarly, '-' unsets a flag. + * If the field is of a dictionary type, it has to be a ':' separated list of + * key=value parameters. Values containing ':' special characters must be + * escaped. * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN * is passed here, then the option may be set on a child of obj. * @@ -730,9 +733,10 @@ int av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val, in /** * @note the returned string will be av_malloc()ed and must be av_free()ed by the caller * - * @note if AV_OPT_ALLOW_NULL is set in search_flags in av_opt_get, and the option has - * AV_OPT_TYPE_STRING or AV_OPT_TYPE_BINARY and is set to NULL, *out_val will be set - * to NULL instead of an allocated empty string. + * @note if AV_OPT_ALLOW_NULL is set in search_flags in av_opt_get, and the + * option is of type AV_OPT_TYPE_STRING, AV_OPT_TYPE_BINARY or AV_OPT_TYPE_DICT + * and is set to NULL, *out_val will be set to NULL instead of an allocated + * empty string. */ int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val); int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val); diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h index d78e863d4b..37ecebd501 100644 --- a/libavutil/pixfmt.h +++ b/libavutil/pixfmt.h @@ -257,18 +257,18 @@ enum AVPixelFormat { AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range - AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ - AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ - AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ - AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ - AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ - AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ - AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ - AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ - AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ - AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ - AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ - AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples + AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples + AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples + AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples + AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian + AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian + AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian + AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian + AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian + AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian + AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian + AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing diff --git a/libavutil/tests/opt.c b/libavutil/tests/opt.c index f4cfa590aa..3134ffd354 100644 --- a/libavutil/tests/opt.c +++ b/libavutil/tests/opt.c @@ -55,6 +55,8 @@ typedef struct TestContext { int bool1; int bool2; int bool3; + AVDictionary *dict1; + AVDictionary *dict2; } TestContext; #define OFFSET(x) offsetof(TestContext, x) @@ -89,6 +91,8 @@ static const AVOption test_options[]= { {"bool1", "set boolean value", OFFSET(bool1), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, 1 }, {"bool2", "set boolean value", OFFSET(bool2), AV_OPT_TYPE_BOOL, { .i64 = 1 }, -1, 1, 1 }, {"bool3", "set boolean value", OFFSET(bool3), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, 1 }, + {"dict1", "set dictionary value", OFFSET(dict1), AV_OPT_TYPE_DICT, { .str = NULL}, 0, 0, 1 }, + {"dict2", "set dictionary value", OFFSET(dict2), AV_OPT_TYPE_DICT, { .str = "happy=':-)'"}, 0, 0, 1 }, { NULL }, }; @@ -167,6 +171,47 @@ int main(void) av_opt_free(&test_ctx); } + printf("\nTesting av_opt_get/av_opt_set()\n"); + { + TestContext test_ctx = { 0 }; + TestContext test2_ctx = { 0 }; + const AVOption *o = NULL; + test_ctx.class = &test_class; + test2_ctx.class = &test_class; + + av_log_set_level(AV_LOG_QUIET); + + av_opt_set_defaults(&test_ctx); + + while (o = av_opt_next(&test_ctx, o)) { + char *value1 = NULL; + char *value2 = NULL; + int ret1 = AVERROR_BUG; + int ret2 = AVERROR_BUG; + int ret3 = AVERROR_BUG; + + if (o->type == AV_OPT_TYPE_CONST) + continue; + + ret1 = av_opt_get(&test_ctx, o->name, 0, (uint8_t **)&value1); + if (ret1 >= 0) { + ret2 = av_opt_set(&test2_ctx, o->name, value1, 0); + if (ret2 >= 0) + ret3 = av_opt_get(&test2_ctx, o->name, 0, (uint8_t **)&value2); + } + + printf("name: %-11s get: %-16s set: %-16s get: %-16s %s\n", o->name, + ret1 >= 0 ? value1 : av_err2str(ret1), + ret2 >= 0 ? "OK" : av_err2str(ret2), + ret3 >= 0 ? value2 : av_err2str(ret3), + ret1 >= 0 && ret2 >= 0 && ret3 >= 0 && !strcmp(value1, value2) ? "OK" : "Mismatch"); + av_free(value1); + av_free(value2); + } + av_opt_free(&test_ctx); + av_opt_free(&test2_ctx); + } + printf("\nTest av_opt_serialize()\n"); { TestContext test_ctx = { 0 }; @@ -256,6 +301,7 @@ int main(void) "dbl=101", "bool1=true", "bool2=auto", + "dict1='happy=\\:-):sad=\\:-('", }; test_ctx.class = &test_class; diff --git a/libavutil/utils.c b/libavutil/utils.c index 230081ea47..c1cd452eee 100644 --- a/libavutil/utils.c +++ b/libavutil/utils.c @@ -70,7 +70,7 @@ const char *avutil_configuration(void) const char *avutil_license(void) { #define LICENSE_PREFIX "libavutil license: " - return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; + return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1]; } const char *av_get_media_type_string(enum AVMediaType media_type) diff --git a/libavutil/version.h b/libavutil/version.h index e18163388d..af8f614aff 100644 --- a/libavutil/version.h +++ b/libavutil/version.h @@ -79,8 +79,8 @@ */ #define LIBAVUTIL_VERSION_MAJOR 56 -#define LIBAVUTIL_VERSION_MINOR 36 -#define LIBAVUTIL_VERSION_MICRO 101 +#define LIBAVUTIL_VERSION_MINOR 38 +#define LIBAVUTIL_VERSION_MICRO 100 #define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ LIBAVUTIL_VERSION_MINOR, \ diff --git a/libpostproc/postprocess.c b/libpostproc/postprocess.c index 1fef8747c0..e16ef259ce 100644 --- a/libpostproc/postprocess.c +++ b/libpostproc/postprocess.c @@ -108,7 +108,7 @@ const char *postproc_configuration(void) const char *postproc_license(void) { #define LICENSE_PREFIX "libpostproc license: " - return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; + return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1]; } #define GET_MODE_BUFFER_SIZE 500 diff --git a/libswresample/swresample.c b/libswresample/swresample.c index 1ac5ef9a30..a7bb69dd4f 100644 --- a/libswresample/swresample.c +++ b/libswresample/swresample.c @@ -46,7 +46,7 @@ const char *swresample_configuration(void) const char *swresample_license(void) { #define LICENSE_PREFIX "libswresample license: " - return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; + return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1]; } int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map){ diff --git a/libswscale/aarch64/hscale.S b/libswscale/aarch64/hscale.S index cc78c1901d..8743183b51 100644 --- a/libswscale/aarch64/hscale.S +++ b/libswscale/aarch64/hscale.S @@ -21,39 +21,60 @@ #include "libavutil/aarch64/asm.S" function ff_hscale_8_to_15_neon, export=1 - add x10, x4, w6, UXTW #1 // filter2 = filter + filterSize*2 (x2 because int16) -1: ldr w8, [x5], #4 // filterPos[0] - ldr w9, [x5], #4 // filterPos[1] - movi v4.4S, #0 // val sum part 1 (for dst[0]) - movi v5.4S, #0 // val sum part 2 (for dst[1]) - mov w7, w6 // filterSize counter - mov x13, x3 // srcp = src -2: add x11, x13, w8, UXTW // srcp + filterPos[0] - add x12, x13, w9, UXTW // srcp + filterPos[1] - ld1 {v0.8B}, [x11] // srcp[filterPos[0] + {0..7}] - ld1 {v1.8B}, [x12] // srcp[filterPos[1] + {0..7}] - ld1 {v2.8H}, [x4], #16 // load 8x16-bit filter values, part 1 - ld1 {v3.8H}, [x10], #16 // ditto at filter+filterSize for part 2 - uxtl v0.8H, v0.8B // unpack part 1 to 16-bit - uxtl v1.8H, v1.8B // unpack part 2 to 16-bit - smull v16.4S, v0.4H, v2.4H // v16.i32{0..3} = part 1 of: srcp[filterPos[0] + {0..7}] * filter[{0..7}] - smull v18.4S, v1.4H, v3.4H // v18.i32{0..3} = part 1 of: srcp[filterPos[1] + {0..7}] * filter[{0..7}] - smull2 v17.4S, v0.8H, v2.8H // v17.i32{0..3} = part 2 of: srcp[filterPos[0] + {0..7}] * filter[{0..7}] - smull2 v19.4S, v1.8H, v3.8H // v19.i32{0..3} = part 2 of: srcp[filterPos[1] + {0..7}] * filter[{0..7}] - addp v16.4S, v16.4S, v17.4S // horizontal pair adding of the 8x32-bit multiplied values for part 1 into 4x32-bit - addp v18.4S, v18.4S, v19.4S // horizontal pair adding of the 8x32-bit multiplied values for part 2 into 4x32-bit - add v4.4S, v4.4S, v16.4S // update val accumulator for part 1 - add v5.4S, v5.4S, v18.4S // update val accumulator for part 2 - add x13, x13, #8 // srcp += 8 - subs w7, w7, #8 // processed 8/filterSize + sbfiz x7, x6, #1, #32 // filterSize*2 (*2 because int16) +1: ldr w18, [x5], #4 // filterPos[idx] + ldr w0, [x5], #4 // filterPos[idx + 1] + ldr w11, [x5], #4 // filterPos[idx + 2] + ldr w9, [x5], #4 // filterPos[idx + 3] + mov x16, x4 // filter0 = filter + add x12, x16, x7 // filter1 = filter0 + filterSize*2 + add x13, x12, x7 // filter2 = filter1 + filterSize*2 + add x4, x13, x7 // filter3 = filter2 + filterSize*2 + movi v0.2D, #0 // val sum part 1 (for dst[0]) + movi v1.2D, #0 // val sum part 2 (for dst[1]) + movi v2.2D, #0 // val sum part 3 (for dst[2]) + movi v3.2D, #0 // val sum part 4 (for dst[3]) + add x17, x3, w18, UXTW // srcp + filterPos[0] + add x18, x3, w0, UXTW // srcp + filterPos[1] + add x0, x3, w11, UXTW // srcp + filterPos[2] + add x11, x3, w9, UXTW // srcp + filterPos[3] + mov w15, w6 // filterSize counter +2: ld1 {v4.8B}, [x17], #8 // srcp[filterPos[0] + {0..7}] + ld1 {v5.8H}, [x16], #16 // load 8x16-bit filter values, part 1 + ld1 {v6.8B}, [x18], #8 // srcp[filterPos[1] + {0..7}] + ld1 {v7.8H}, [x12], #16 // load 8x16-bit at filter+filterSize + uxtl v4.8H, v4.8B // unpack part 1 to 16-bit + smlal v0.4S, v4.4H, v5.4H // v0 accumulates srcp[filterPos[0] + {0..3}] * filter[{0..3}] + smlal2 v0.4S, v4.8H, v5.8H // v0 accumulates srcp[filterPos[0] + {4..7}] * filter[{4..7}] + ld1 {v8.8B}, [x0], #8 // srcp[filterPos[2] + {0..7}] + ld1 {v9.8H}, [x13], #16 // load 8x16-bit at filter+2*filterSize + uxtl v6.8H, v6.8B // unpack part 2 to 16-bit + smlal v1.4S, v6.4H, v7.4H // v1 accumulates srcp[filterPos[1] + {0..3}] * filter[{0..3}] + uxtl v8.8H, v8.8B // unpack part 3 to 16-bit + smlal v2.4S, v8.4H, v9.4H // v2 accumulates srcp[filterPos[2] + {0..3}] * filter[{0..3}] + smlal2 v2.4S, V8.8H, v9.8H // v2 accumulates srcp[filterPos[2] + {4..7}] * filter[{4..7}] + ld1 {v10.8B}, [x11], #8 // srcp[filterPos[3] + {0..7}] + smlal2 v1.4S, v6.8H, v7.8H // v1 accumulates srcp[filterPos[1] + {4..7}] * filter[{4..7}] + ld1 {v11.8H}, [x4], #16 // load 8x16-bit at filter+3*filterSize + subs w15, w15, #8 // j -= 8: processed 8/filterSize + uxtl v10.8H, v10.8B // unpack part 4 to 16-bit + smlal v3.4S, v10.4H, v11.4H // v3 accumulates srcp[filterPos[3] + {0..3}] * filter[{0..3}] + smlal2 v3.4S, v10.8H, v11.8H // v3 accumulates srcp[filterPos[3] + {4..7}] * filter[{4..7}] b.gt 2b // inner loop if filterSize not consumed completely - mov x4, x10 // filter = filter2 - add x10, x10, w6, UXTW #1 // filter2 += filterSize*2 - addp v4.4S, v4.4S, v5.4S // horizontal pair adding of the 8x32-bit sums into 4x32-bit - addp v4.4S, v4.4S, v4.4S // horizontal pair adding of the 4x32-bit sums into 2x32-bit - sqshrn v4.4H, v4.4S, #7 // shift and clip the 2x16-bit final values - st1 {v4.S}[0], [x1], #4 // write to destination - subs w2, w2, #2 // dstW -= 2 + addp v0.4S, v0.4S, v0.4S // part0 horizontal pair adding + addp v1.4S, v1.4S, v1.4S // part1 horizontal pair adding + addp v2.4S, v2.4S, v2.4S // part2 horizontal pair adding + addp v3.4S, v3.4S, v3.4S // part3 horizontal pair adding + addp v0.4S, v0.4S, v0.4S // part0 horizontal pair adding + addp v1.4S, v1.4S, v1.4S // part1 horizontal pair adding + addp v2.4S, v2.4S, v2.4S // part2 horizontal pair adding + addp v3.4S, v3.4S, v3.4S // part3 horizontal pair adding + zip1 v0.4S, v0.4S, v1.4S // part01 = zip values from part0 and part1 + zip1 v2.4S, v2.4S, v3.4S // part23 = zip values from part2 and part3 + mov v0.d[1], v2.d[0] // part0123 = zip values from part01 and part23 + subs w2, w2, #4 // dstW -= 4 + sqshrn v0.4H, v0.4S, #7 // shift and clip the 2x16-bit final values + st1 {v0.4H}, [x1], #8 // write to destination part0123 b.gt 1b // loop until end of line ret endfunc diff --git a/libswscale/aarch64/output.S b/libswscale/aarch64/output.S index 90d3b57b10..25bf28b6e4 100644 --- a/libswscale/aarch64/output.S +++ b/libswscale/aarch64/output.S @@ -38,29 +38,21 @@ function ff_yuv2planeX_8_neon, export=1 add x12, x12, x7, lsl #1 // &src[j+1][i] ld1 {v5.8H}, [x11] // read 8x16-bit @ src[j ][i + {0..7}]: A,B,C,D,E,F,G,H ld1 {v6.8H}, [x12] // read 8x16-bit @ src[j+1][i + {0..7}]: I,J,K,L,M,N,O,P - ldr w11, [x10], #4 // read 2x16-bit coeffs (X, Y) at (filter[j], filter[j+1]) - zip1 v16.8H, v5.8H, v6.8H // A,I,B,J,C,K,D,L - zip2 v17.8H, v5.8H, v6.8H // E,M,F,N,F,O,H,P - dup v7.4S, w11 // X,Y,X,Y,X,Y,X,Y - smull v18.4S, v16.4H, v7.4H // A.X I.Y B.X J.Y - smull v20.4S, v17.4H, v7.4H // E.X M.Y F.X N.Y - smull2 v19.4S, v16.8H, v7.8H // C.X K.Y D.X L.Y - smull2 v21.4S, v17.8H, v7.8H // G.X O.Y H.X P.Y - addp v16.4S, v18.4S, v19.4S // A.X+I.Y B.X+J.Y C.X+K.Y D.X+L.Y - addp v17.4S, v20.4S, v21.4S // E.X+M.Y F.X+N.Y F.X+O.Y H.X+P.Y - add v3.4S, v3.4S, v16.4S // update val accumulator for part 1 - add v4.4S, v4.4S, v17.4S // update val accumulator for part 2 + ld1r {v7.8H}, [x10], #2 // read 1x16-bit coeff X at filter[j ] and duplicate across lanes + ld1r {v8.8H}, [x10], #2 // read 1x16-bit coeff Y at filter[j+1] and duplicate across lanes + smlal v3.4S, v5.4H, v7.4H // val0 += {A,B,C,D} * X + smlal2 v4.4S, v5.8H, v7.8H // val1 += {E,F,G,H} * X + smlal v3.4S, v6.4H, v8.4H // val0 += {I,J,K,L} * Y + smlal2 v4.4S, v6.8H, v8.8H // val1 += {M,N,O,P} * Y subs w8, w8, #2 // tmpfilterSize -= 2 b.gt 3b // loop until filterSize consumed - sshr v3.4S, v3.4S, #19 // val>>19 (part 1) - sshr v4.4S, v4.4S, #19 // val>>19 (part 2) - sqxtun v3.4H, v3.4S // clip16(val>>19) (part 1) - sqxtun v4.4H, v4.4S // clip16(val>>19) (part 2) - mov v3.D[1], v4.D[0] // merge part 1 and part 2 - uqxtn v3.8B, v3.8H // clip8(val>>19) - st1 {v3.1D}, [x3], #8 // write to destination - add x7, x7, #8 // i += 8 + + sqshrun v3.4h, v3.4s, #16 // clip16(val0>>16) + sqshrun2 v3.8h, v4.4s, #16 // clip16(val1>>16) + uqshrn v3.8b, v3.8h, #3 // clip8(val>>19) + st1 {v3.8b}, [x3], #8 // write to destination subs w4, w4, #8 // dstW -= 8 + add x7, x7, #8 // i += 8 b.gt 2b // loop until width consumed ret endfunc diff --git a/libswscale/utils.c b/libswscale/utils.c index 57c4fd2b0f..b2c08a5983 100644 --- a/libswscale/utils.c +++ b/libswscale/utils.c @@ -86,7 +86,7 @@ const char *swscale_configuration(void) const char *swscale_license(void) { #define LICENSE_PREFIX "libswscale license: " - return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; + return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1]; } typedef struct FormatEntry { @@ -95,7 +95,7 @@ typedef struct FormatEntry { uint8_t is_supported_endianness :1; } FormatEntry; -static const FormatEntry format_entries[AV_PIX_FMT_NB] = { +static const FormatEntry format_entries[] = { [AV_PIX_FMT_YUV420P] = { 1, 1 }, [AV_PIX_FMT_YUYV422] = { 1, 1 }, [AV_PIX_FMT_RGB24] = { 1, 1 }, @@ -270,19 +270,19 @@ static const FormatEntry format_entries[AV_PIX_FMT_NB] = { int sws_isSupportedInput(enum AVPixelFormat pix_fmt) { - return (unsigned)pix_fmt < AV_PIX_FMT_NB ? + return (unsigned)pix_fmt < FF_ARRAY_ELEMS(format_entries) ? format_entries[pix_fmt].is_supported_in : 0; } int sws_isSupportedOutput(enum AVPixelFormat pix_fmt) { - return (unsigned)pix_fmt < AV_PIX_FMT_NB ? + return (unsigned)pix_fmt < FF_ARRAY_ELEMS(format_entries) ? format_entries[pix_fmt].is_supported_out : 0; } int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt) { - return (unsigned)pix_fmt < AV_PIX_FMT_NB ? + return (unsigned)pix_fmt < FF_ARRAY_ELEMS(format_entries) ? format_entries[pix_fmt].is_supported_endianness : 0; } diff --git a/tests/Makefile b/tests/Makefile index 0ef571b067..e5f41008d4 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -85,6 +85,16 @@ FILTERDEMDECENCMUX = $(call ALLYES, $(1:%=%_FILTER) $(2)_DEMUXER $(3)_DECODER $( PARSERDEMDEC = $(call ALLYES, $(1)_PARSER $(2)_DEMUXER $(3)_DECODER) +# Allow overriding CONFIG_LARGE_TESTS via LARGE_TESTS, if set on the +# make command line. +ifeq ($(LARGE_TESTS), yes) +CONFIG_LARGE_TESTS:=yes +!CONFIG_LARGE_TESTS:= +else ifeq ($(LARGE_TESTS), no) +CONFIG_LARGE_TESTS:= +!CONFIG_LARGE_TESTS:=yes +endif + include $(SRC_PATH)/$(APITESTSDIR)/Makefile include $(SRC_PATH)/$(DNNTESTSDIR)/Makefile diff --git a/tests/checkasm/aacpsdsp.c b/tests/checkasm/aacpsdsp.c index ea68b39fa9..2ceef4341f 100644 --- a/tests/checkasm/aacpsdsp.c +++ b/tests/checkasm/aacpsdsp.c @@ -17,6 +17,7 @@ */ #include "libavcodec/aacpsdsp.h" +#include "libavutil/intfloat.h" #include "checkasm.h" @@ -34,6 +35,16 @@ #define EPS 0.005 +static void clear_less_significant_bits(INTFLOAT *buf, int len, int bits) +{ + int i; + for (i = 0; i < len; i++) { + union av_intfloat32 u = { .f = buf[i] }; + u.i &= (0xffffffff << bits); + buf[i] = u.f; + } +} + static void test_add_squares(void) { LOCAL_ALIGNED_16(INTFLOAT, dst0, [BUF_SIZE]); @@ -198,6 +209,13 @@ static void test_stereo_interpolate(PSDSPContext *psdsp) randomize((INTFLOAT *)h, 2 * 4); randomize((INTFLOAT *)h_step, 2 * 4); + // Clear the least significant 14 bits of h_step, to avoid + // divergence when accumulating h_step BUF_SIZE times into + // a float variable which may or may not have extra intermediate + // precision. Therefore clear roughly log2(BUF_SIZE) less + // significant bits, to get the same result regardless of any + // extra precision in the accumulator. + clear_less_significant_bits((INTFLOAT *)h_step, 2 * 4, 14); call_ref(l0, r0, h, h_step, BUF_SIZE); call_new(l1, r1, h, h_step, BUF_SIZE); diff --git a/tests/checkasm/af_afir.c b/tests/checkasm/af_afir.c index e3fb76e8e0..8d1f815469 100644 --- a/tests/checkasm/af_afir.c +++ b/tests/checkasm/af_afir.c @@ -53,7 +53,19 @@ static void test_fcmul_add(const float *src0, const float *src1, const float *sr call_ref(cdst, src1, src2, LEN); call_new(odst, src1, src2, LEN); for (i = 0; i <= LEN*2; i++) { - if (!float_near_abs_eps(cdst[i], odst[i], 6.2e-05)) { + int idx = i & ~1; + float cre = src2[idx]; + float cim = src2[idx + 1]; + float tre = src1[idx]; + float tim = src1[idx + 1]; + double t = fabs(src0[i]) + + fabs(tre) + fabs(tim) + fabs(cre) + fabs(cim) + + fabs(tre * cre) + fabs(tim * cim) + + fabs(tre * cim) + fabs(tim * cre) + + fabs(tre * cre - tim * cim) + + fabs(tre * cim + tim * cre) + + fabs(cdst[i]) + 1.0; + if (!float_near_abs_eps(cdst[i], odst[i], t * 2 * FLT_EPSILON)) { fprintf(stderr, "%d: %- .12f - %- .12f = % .12g\n", i, cdst[i], odst[i], cdst[i] - odst[i]); fail(); diff --git a/tests/checkasm/float_dsp.c b/tests/checkasm/float_dsp.c index 2abe4eccbd..a1616a61a8 100644 --- a/tests/checkasm/float_dsp.c +++ b/tests/checkasm/float_dsp.c @@ -51,7 +51,8 @@ static void test_vector_fmul(const float *src0, const float *src1) call_ref(cdst, src0, src1, LEN); call_new(odst, src0, src1, LEN); for (i = 0; i < LEN; i++) { - if (!float_near_abs_eps(cdst[i], odst[i], FLT_EPSILON)) { + double t = fabs(src0[i]) + fabs(src1[i]) + fabs(src0[i] * src1[i]) + 1.0; + if (!float_near_abs_eps(cdst[i], odst[i], t * 2 * FLT_EPSILON)) { fprintf(stderr, "%d: %- .12f - %- .12f = % .12g\n", i, cdst[i], odst[i], cdst[i] - odst[i]); fail(); @@ -73,7 +74,8 @@ static void test_vector_dmul(const double *src0, const double *src1) call_ref(cdst, src0, src1, LEN); call_new(odst, src0, src1, LEN); for (i = 0; i < LEN; i++) { - if (!double_near_abs_eps(cdst[i], odst[i], DBL_EPSILON)) { + double t = fabs(src0[i]) + fabs(src1[i]) + fabs(src0[i] * src1[i]) + 1.0; + if (!double_near_abs_eps(cdst[i], odst[i], t * 2 * DBL_EPSILON)) { fprintf(stderr, "%d: %- .12f - %- .12f = % .12g\n", i, cdst[i], odst[i], cdst[i] - odst[i]); fail(); @@ -117,7 +119,8 @@ static void test_vector_fmul_scalar(const float *src0, const float *src1) call_ref(cdst, src0, src1[0], LEN); call_new(odst, src0, src1[0], LEN); for (i = 0; i < LEN; i++) { - if (!float_near_abs_eps(cdst[i], odst[i], FLT_EPSILON)) { + double t = fabs(src0[i]) + fabs(src1[0]) + fabs(src0[i] * src1[0]) + 1.0; + if (!float_near_abs_eps(cdst[i], odst[i], t * 2 * FLT_EPSILON)) { fprintf(stderr, "%d: %- .12f - %- .12f = % .12g\n", i, cdst[i], odst[i], cdst[i] - odst[i]); fail(); diff --git a/tests/fate-run.sh b/tests/fate-run.sh index aec12c16a3..552b3dd9df 100755 --- a/tests/fate-run.sh +++ b/tests/fate-run.sh @@ -154,7 +154,7 @@ md5pipe(){ md5(){ encfile="${outdir}/${test}.out" cleanfiles="$cleanfiles $encfile" - ffmpeg "$@" $encfile + ffmpeg "$@" $(target_path $encfile) do_md5sum $encfile | awk '{print $1}' } @@ -223,7 +223,7 @@ transcode(){ -f $enc_fmt -y $tencfile || return do_md5sum $encfile echo $(wc -c $encfile) - ffmpeg $DEC_OPTS -i $encfile $ENC_OPTS $FLAGS $final_decode \ + ffmpeg $DEC_OPTS -i $tencfile $ENC_OPTS $FLAGS $final_decode \ -f framecrc - || return } @@ -239,7 +239,7 @@ stream_remux(){ tencfile=$(target_path $encfile) ffmpeg -f $src_fmt -i $tsrcfile $stream_maps -codec copy $FLAGS \ -f $enc_fmt -y $tencfile || return - ffmpeg $DEC_OPTS -i $encfile $ENC_OPTS $FLAGS $final_decode \ + ffmpeg $DEC_OPTS -i $tencfile $ENC_OPTS $FLAGS $final_decode \ -f framecrc - || return } @@ -420,16 +420,16 @@ gapless(){ cleanfiles="$cleanfiles $decfile1 $decfile2 $decfile3" # test packet data - ffmpeg $extra_args -i "$sample" -bitexact -c:a copy -f framecrc -y $decfile1 + ffmpeg $extra_args -i "$sample" -bitexact -c:a copy -f framecrc -y $(target_path $decfile1) do_md5sum $decfile1 # test decoded (and cut) data ffmpeg $extra_args -i "$sample" -bitexact -f wav md5: # the same as above again, with seeking to the start - ffmpeg $extra_args -ss 0 -seek_timestamp 1 -i "$sample" -bitexact -c:a copy -f framecrc -y $decfile2 + ffmpeg $extra_args -ss 0 -seek_timestamp 1 -i "$sample" -bitexact -c:a copy -f framecrc -y $(target_path $decfile2) do_md5sum $decfile2 ffmpeg $extra_args -ss 0 -seek_timestamp 1 -i "$sample" -bitexact -f wav md5: # test packet data, with seeking to a specific position - ffmpeg $extra_args -ss 5 -seek_timestamp 1 -i "$sample" -bitexact -c:a copy -f framecrc -y $decfile3 + ffmpeg $extra_args -ss 5 -seek_timestamp 1 -i "$sample" -bitexact -c:a copy -f framecrc -y $(target_path $decfile3) do_md5sum $decfile3 } @@ -442,19 +442,19 @@ gaplessenc(){ cleanfiles="$cleanfiles $file1" # test data after reencoding - ffmpeg -i "$sample" -bitexact -map 0:a -c:a $codec -f $format -y "$file1" - probegaplessinfo "$file1" + ffmpeg -i "$sample" -bitexact -map 0:a -c:a $codec -f $format -y "$(target_path "$file1")" + probegaplessinfo "$(target_path "$file1")" } audio_match(){ sample=$(target_path $1) - trefile=$(target_path $2) + trefile=$2 extra_args=$3 decfile="${outdir}/${test}.wav" cleanfiles="$cleanfiles $decfile" - ffmpeg -i "$sample" -bitexact $extra_args -y $decfile + ffmpeg -i "$sample" -bitexact $extra_args -y $(target_path $decfile) tests/audiomatch${HOSTEXECSUF} $decfile $trefile } @@ -471,10 +471,10 @@ concat(){ awk "{gsub(/%SRCFILE%/, \"$sample\"); print}" $template > $concatfile if [ "$mode" = "md5" ]; then - run ffprobe${PROGSUF}${EXECSUF} -bitexact -show_streams -show_packets -v 0 -fflags keepside -safe 0 $extra_args $concatfile | tr -d '\r' > $packetfile + run ffprobe${PROGSUF}${EXECSUF} -bitexact -show_streams -show_packets -v 0 -safe 0 $extra_args $(target_path $concatfile) | tr -d '\r' > $packetfile do_md5sum $packetfile else - run ffprobe${PROGSUF}${EXECSUF} -bitexact -show_streams -show_packets -v 0 -of compact=p=0:nk=1 -fflags keepside -safe 0 $extra_args $concatfile + run ffprobe${PROGSUF}${EXECSUF} -bitexact -show_streams -show_packets -v 0 -of compact=p=0:nk=1 -safe 0 $extra_args $(target_path $concatfile) fi } diff --git a/tests/fate/cbs.mak b/tests/fate/cbs.mak index 824e6c86ab..ad9c85863b 100644 --- a/tests/fate/cbs.mak +++ b/tests/fate/cbs.mak @@ -34,10 +34,10 @@ FATE_CBS_AV1_SAMPLES = \ seq_hdr_op_param_info.ivf \ switch_frame.ivf -$(foreach N,$(FATE_CBS_AV1_CONFORMANCE_SAMPLES),$(eval $(call FATE_CBS_TEST,av1,$(basename $(N)),av1-test-vectors/$(N),ivf))) -$(foreach N,$(FATE_CBS_AV1_SAMPLES),$(eval $(call FATE_CBS_TEST,av1,$(basename $(N)),av1/$(N),ivf))) +$(foreach N,$(FATE_CBS_AV1_CONFORMANCE_SAMPLES),$(eval $(call FATE_CBS_TEST,av1,$(basename $(N)),av1-test-vectors/$(N),rawvideo))) +$(foreach N,$(FATE_CBS_AV1_SAMPLES),$(eval $(call FATE_CBS_TEST,av1,$(basename $(N)),av1/$(N),rawvideo))) -FATE_CBS_AV1-$(call ALLYES, IVF_DEMUXER AV1_PARSER AV1_METADATA_BSF IVF_MUXER) = $(FATE_CBS_av1) +FATE_CBS_AV1-$(call ALLYES, IVF_DEMUXER AV1_PARSER AV1_METADATA_BSF RAWVIDEO_MUXER) = $(FATE_CBS_av1) FATE_SAMPLES_AVCONV += $(FATE_CBS_AV1-yes) fate-cbs-av1: $(FATE_CBS_AV1-yes) diff --git a/tests/fate/ffmpeg.mak b/tests/fate/ffmpeg.mak index f37ca00d46..967150983a 100644 --- a/tests/fate/ffmpeg.mak +++ b/tests/fate/ffmpeg.mak @@ -69,27 +69,27 @@ fate-unknown_layout-ac3: CMD = md5 \ FATE_STREAMCOPY-$(call ALLYES, EAC3_DEMUXER MOV_MUXER) += fate-copy-trac3074 -fate-copy-trac3074: $(TARGET_SAMPLES)/eac3/csi_miami_stereo_128_spx.eac3 +fate-copy-trac3074: $(SAMPLES)/eac3/csi_miami_stereo_128_spx.eac3 fate-copy-trac3074: CMD = transcode eac3 $(TARGET_SAMPLES)/eac3/csi_miami_stereo_128_spx.eac3\ mp4 "-codec copy -map 0" "-codec copy" FATE_STREAMCOPY-$(call ALLYES, MOV_DEMUXER MOV_MUXER) += fate-copy-trac236 -fate-copy-trac236: $(TARGET_SAMPLES)/mov/fcp_export8-236.mov +fate-copy-trac236: $(SAMPLES)/mov/fcp_export8-236.mov fate-copy-trac236: CMD = transcode mov $(TARGET_SAMPLES)/mov/fcp_export8-236.mov\ mov "-codec copy -map 0" FATE_STREAMCOPY-$(call ALLYES, MPEGTS_DEMUXER MXF_MUXER PCM_S16LE_ENCODER) += fate-copy-trac4914 -fate-copy-trac4914: $(TARGET_SAMPLES)/mpeg2/xdcam8mp2-1s_small.ts +fate-copy-trac4914: $(SAMPLES)/mpeg2/xdcam8mp2-1s_small.ts fate-copy-trac4914: CMD = transcode mpegts $(TARGET_SAMPLES)/mpeg2/xdcam8mp2-1s_small.ts\ mxf "-c:a pcm_s16le -c:v copy" FATE_STREAMCOPY-$(call ALLYES, MPEGTS_DEMUXER AVI_MUXER) += fate-copy-trac4914-avi -fate-copy-trac4914-avi: $(TARGET_SAMPLES)/mpeg2/xdcam8mp2-1s_small.ts +fate-copy-trac4914-avi: $(SAMPLES)/mpeg2/xdcam8mp2-1s_small.ts fate-copy-trac4914-avi: CMD = transcode mpegts $(TARGET_SAMPLES)/mpeg2/xdcam8mp2-1s_small.ts\ avi "-c:a copy -c:v copy" FATE_STREAMCOPY-$(call ALLYES, H264_DEMUXER AVI_MUXER) += fate-copy-trac2211-avi -fate-copy-trac2211-avi: $(TARGET_SAMPLES)/h264/bbc2.sample.h264 +fate-copy-trac2211-avi: $(SAMPLES)/h264/bbc2.sample.h264 fate-copy-trac2211-avi: CMD = transcode "h264 -r 14" $(TARGET_SAMPLES)/h264/bbc2.sample.h264\ avi "-c:a copy -c:v copy" @@ -98,34 +98,34 @@ fate-copy-apng: fate-lavf-apng fate-copy-apng: CMD = transcode apng tests/data/lavf/lavf.apng apng "-c:v copy" FATE_STREAMCOPY-$(call DEMMUX, OGG, OGG) += fate-limited_input_seek fate-limited_input_seek-copyts -fate-limited_input_seek: $(TARGET_SAMPLES)/vorbis/moog_small.ogg +fate-limited_input_seek: $(SAMPLES)/vorbis/moog_small.ogg fate-limited_input_seek: CMD = md5 -ss 1.5 -t 1.3 -i $(TARGET_SAMPLES)/vorbis/moog_small.ogg -c:a copy -fflags +bitexact -f ogg -fate-limited_input_seek-copyts: $(TARGET_SAMPLES)/vorbis/moog_small.ogg +fate-limited_input_seek-copyts: $(SAMPLES)/vorbis/moog_small.ogg fate-limited_input_seek-copyts: CMD = md5 -ss 1.5 -t 1.3 -i $(TARGET_SAMPLES)/vorbis/moog_small.ogg -c:a copy -copyts -fflags +bitexact -f ogg FATE_STREAMCOPY-$(call ALLYES, MOV_DEMUXER MOV_MUXER) += fate-copy-psp -fate-copy-psp: $(TARGET_SAMPLES)/h264/wwwq_cut.mp4 +fate-copy-psp: $(SAMPLES)/h264/wwwq_cut.mp4 fate-copy-psp: CMD = transcode "mov" $(TARGET_SAMPLES)/h264/wwwq_cut.mp4\ psp "-c copy" "-codec copy" FATE_STREAMCOPY-$(CONFIG_FLV_DEMUXER) += fate-ffmpeg-streamloop -fate-ffmpeg-streamloop: $(TARGET_SAMPLES)/flv/streamloop.flv +fate-ffmpeg-streamloop: $(SAMPLES)/flv/streamloop.flv fate-ffmpeg-streamloop: CMD = framemd5 -stream_loop 2 -i $(TARGET_SAMPLES)/flv/streamloop.flv -c copy fate-streamcopy: $(FATE_STREAMCOPY-yes) FATE_SAMPLES_FFMPEG-$(call ALLYES, MOV_DEMUXER MATROSKA_MUXER) += fate-rgb24-mkv -fate-rgb24-mkv: $(TARGET_SAMPLES)/qtrle/aletrek-rle.mov +fate-rgb24-mkv: $(SAMPLES)/qtrle/aletrek-rle.mov fate-rgb24-mkv: CMD = transcode "mov" $(TARGET_SAMPLES)/qtrle/aletrek-rle.mov\ matroska "-c:v rawvideo -pix_fmt rgb24 -allow_raw_vfw 1 -frames:v 1" FATE_SAMPLES_FFMPEG-$(call ALLYES, AAC_DEMUXER MOV_MUXER) += fate-adtstoasc_ticket3715 -fate-adtstoasc_ticket3715: $(TARGET_SAMPLES)/aac/foo.aac +fate-adtstoasc_ticket3715: $(SAMPLES)/aac/foo.aac fate-adtstoasc_ticket3715: CMD = transcode "aac" $(TARGET_SAMPLES)/aac/foo.aac\ mov "-c copy -bsf:a aac_adtstoasc" "-codec copy" FATE_SAMPLES_FFMPEG-$(call ALLYES, MOV_DEMUXER H264_MUXER H264_MP4TOANNEXB_BSF) += fate-h264_mp4toannexb_ticket2991 -fate-h264_mp4toannexb_ticket2991: $(TARGET_SAMPLES)/h264/wwwq_cut.mp4 +fate-h264_mp4toannexb_ticket2991: $(SAMPLES)/h264/wwwq_cut.mp4 fate-h264_mp4toannexb_ticket2991: CMD = transcode "mp4" $(TARGET_SAMPLES)/h264/wwwq_cut.mp4\ h264 "-c:v copy -bsf:v h264_mp4toannexb" "-codec copy" @@ -136,13 +136,13 @@ fate-h264_mp4toannexb_ticket5927_2: CMD = transcode "mp4" $(TARGET_SAMPLES)/h264 h264 "-c:v copy -an" "-c:v copy" FATE_SAMPLES_FFMPEG-$(call ALLYES, MPEGPS_DEMUXER AVI_MUXER REMOVE_EXTRADATA_BSF) += fate-ffmpeg-bsf-remove-k fate-ffmpeg-bsf-remove-r fate-ffmpeg-bsf-remove-e -fate-ffmpeg-bsf-remove-k: $(TARGET_SAMPLES)/mpeg2/matrixbench_mpeg2.lq1.mpg +fate-ffmpeg-bsf-remove-k: $(SAMPLES)/mpeg2/matrixbench_mpeg2.lq1.mpg fate-ffmpeg-bsf-remove-k: CMD = transcode "mpeg" $(TARGET_SAMPLES)/mpeg2/matrixbench_mpeg2.lq1.mpg\ avi "-vbsf remove_extra=k" "-codec copy" -fate-ffmpeg-bsf-remove-r: $(TARGET_SAMPLES)/mpeg2/matrixbench_mpeg2.lq1.mpg +fate-ffmpeg-bsf-remove-r: $(SAMPLES)/mpeg2/matrixbench_mpeg2.lq1.mpg fate-ffmpeg-bsf-remove-r: CMD = transcode "mpeg" $(TARGET_SAMPLES)/mpeg2/matrixbench_mpeg2.lq1.mpg\ avi "-vbsf remove_extra=keyframe" "-codec copy" -fate-ffmpeg-bsf-remove-e: $(TARGET_SAMPLES)/mpeg2/matrixbench_mpeg2.lq1.mpg +fate-ffmpeg-bsf-remove-e: $(SAMPLES)/mpeg2/matrixbench_mpeg2.lq1.mpg fate-ffmpeg-bsf-remove-e: CMD = transcode "mpeg" $(TARGET_SAMPLES)/mpeg2/matrixbench_mpeg2.lq1.mpg\ avi "-vbsf remove_extra=e" "-codec copy" @@ -150,7 +150,7 @@ fate-ffmpeg-bsf-remove-e: CMD = transcode "mpeg" $(TARGET_SAMPLES)/mpeg2/matrixb FATE_SAMPLES_FFMPEG-yes += $(FATE_STREAMCOPY-yes) FATE_TIME_BASE-$(call ALLYES, MPEGPS_DEMUXER MXF_MUXER) += fate-time_base -fate-time_base: $(TARGET_SAMPLES)/mpeg2/dvd_single_frame.vob +fate-time_base: $(SAMPLES)/mpeg2/dvd_single_frame.vob fate-time_base: CMD = md5 -i $(TARGET_SAMPLES)/mpeg2/dvd_single_frame.vob -an -sn -c:v copy -r 25 -time_base 1001:30000 -fflags +bitexact -f mxf FATE_SAMPLES_FFMPEG-yes += $(FATE_TIME_BASE-yes) diff --git a/tests/fate/ffprobe.mak b/tests/fate/ffprobe.mak index d5fb05cd68..c867bebf41 100644 --- a/tests/fate/ffprobe.mak +++ b/tests/fate/ffprobe.mak @@ -1,5 +1,5 @@ FFPROBE_TEST_FILE=tests/data/ffprobe-test.nut -FFPROBE_COMMAND=ffprobe$(PROGSSUF)$(EXESUF) -show_streams -show_packets -show_format -show_frames -bitexact $(FFPROBE_TEST_FILE) +FFPROBE_COMMAND=ffprobe$(PROGSSUF)$(EXESUF) -show_streams -show_packets -show_format -show_frames -bitexact $(TARGET_PATH)/$(FFPROBE_TEST_FILE) -print_filename $(FFPROBE_TEST_FILE) FATE_FFPROBE-$(CONFIG_AVDEVICE) += fate-ffprobe_compact fate-ffprobe_compact: $(FFPROBE_TEST_FILE) diff --git a/tests/fate/filter-audio.mak b/tests/fate/filter-audio.mak index fed2644ccf..79b1536df0 100644 --- a/tests/fate/filter-audio.mak +++ b/tests/fate/filter-audio.mak @@ -186,10 +186,13 @@ FATE_AFILTER_SAMPLES-$(call FILTERDEMDECENCMUX, STEREOTOOLS, WAV, PCM_S16LE, PCM fate-filter-stereotools: SRC = $(TARGET_SAMPLES)/audio-reference/luckynight_2ch_44kHz_s16.wav fate-filter-stereotools: CMD = framecrc -i $(SRC) -frames:a 20 -af stereotools=mlev=0.015625 -FATE_AFILTER-$(call FILTERDEMDECENCMUX, TREMOLO, WAV, PCM_S16LE, PCM_S16LE, WAV) += fate-filter-tremolo +FATE_AFILTER_SAMPLES-$(call FILTERDEMDECENCMUX, TREMOLO, WAV, PCM_S16LE, PCM_S16LE, WAV) += fate-filter-tremolo fate-filter-tremolo: tests/data/asynth-44100-2.wav fate-filter-tremolo: SRC = $(TARGET_PATH)/tests/data/asynth-44100-2.wav -fate-filter-tremolo: CMD = framecrc -i $(SRC) -frames:a 20 -af tremolo +fate-filter-tremolo: CMD = ffmpeg -i $(SRC) -frames:a 20 -af tremolo -f wav -f s16le - +fate-filter-tremolo: REF = $(SAMPLES)/filter/tremolo.pcm +fate-filter-tremolo: CMP = oneoff +fate-filter-tremolo: CMP_UNIT = s16 FATE_AFILTER-$(call FILTERDEMDECENCMUX, COMPAND, WAV, PCM_S16LE, PCM_S16LE, WAV) += fate-filter-compand fate-filter-compand: tests/data/asynth-44100-2.wav diff --git a/tests/fate/filter-video.mak b/tests/fate/filter-video.mak index b5206909aa..02986b569c 100644 --- a/tests/fate/filter-video.mak +++ b/tests/fate/filter-video.mak @@ -1,6 +1,6 @@ FATE_FILTER_SAMPLES-$(call ALLYES, SMJPEG_DEMUXER MJPEG_DECODER PERMS_FILTER OWDENOISE_FILTER) += fate-filter-owdenoise-sample fate-filter-owdenoise-sample: CMD = ffmpeg -idct simple -i $(TARGET_SAMPLES)/smjpeg/scenwin.mjpg -vf "trim=duration=0.5,perms=random,owdenoise=10:20:20:enable=not(between(t\,0.2\,1.2))" -an -f rawvideo - -fate-filter-owdenoise-sample: REF = $(TARGET_SAMPLES)/filter-reference/owdenoise-scenwin.raw +fate-filter-owdenoise-sample: REF = $(SAMPLES)/filter-reference/owdenoise-scenwin.raw fate-filter-owdenoise-sample: CMP_TARGET = 1 fate-filter-owdenoise-sample: FUZZ = 3539 fate-filter-owdenoise-sample: CMP = oneoff @@ -259,6 +259,10 @@ FATE_FILTER_SAMPLES-$(call ALLYES, PNG_DECODER APNG_DEMUXER FORMAT_FILTER COLOR_ FATE_FILTER_VSYNTH-$(CONFIG_PHASE_FILTER) += fate-filter-phase fate-filter-phase: CMD = framecrc -c:v pgmyuv -i $(SRC) -vf phase +FATE_FILTER_VSYNTH-$(CONFIG_DNN_PROCESSING_FILTER) += fate-filter-dnn_processing-halve_first_channel_float fate-filter-dnn_processing-halve_gray_float +fate-filter-dnn_processing-halve_first_channel_float: CMD = framecrc -c:v pgmyuv -i $(SRC) -vf format=rgb24,dnn_processing=model=$(TARGET_SAMPLES)/dnn_processing/halve_first_channel_float.model:input=dnn_in:output=dnn_out:dnn_backend=native +fate-filter-dnn_processing-halve_gray_float: CMD = framecrc -c:v pgmyuv -i $(SRC) -vf format=grayf32,dnn_processing=model=$(TARGET_SAMPLES)/dnn_processing/halve_gray_float.model:input=dnn_in:output=dnn_out:dnn_backend=native + FATE_REMOVEGRAIN += fate-filter-removegrain-mode-00 fate-filter-removegrain-mode-00: CMD = framecrc -c:v pgmyuv -i $(SRC) -frames:v 1 -vf removegrain=0:0:0 @@ -484,7 +488,7 @@ fate-filter-scale2ref_keep_aspect: CMD = framemd5 -frames:v 5 -filter_complex_sc FATE_FILTER_VSYNTH-$(CONFIG_SCALE_FILTER) += fate-filter-scalechroma fate-filter-scalechroma: tests/data/vsynth1.yuv -fate-filter-scalechroma: CMD = framecrc -flags bitexact -s 352x288 -pix_fmt yuv444p -i tests/data/vsynth1.yuv -pix_fmt yuv420p -sws_flags +bitexact -vf scale=out_v_chr_pos=33:out_h_chr_pos=151 +fate-filter-scalechroma: CMD = framecrc -flags bitexact -s 352x288 -pix_fmt yuv444p -i $(TARGET_PATH)/tests/data/vsynth1.yuv -pix_fmt yuv420p -sws_flags +bitexact -vf scale=out_v_chr_pos=33:out_h_chr_pos=151 FATE_FILTER_VSYNTH-$(CONFIG_VFLIP_FILTER) += fate-filter-vflip fate-filter-vflip: CMD = video_filter "vflip" diff --git a/tests/fate/gapless.mak b/tests/fate/gapless.mak index 91fddb4130..3a82c3ef68 100644 --- a/tests/fate/gapless.mak +++ b/tests/fate/gapless.mak @@ -2,7 +2,7 @@ FATE_GAPLESS-$(CONFIG_MP3_DEMUXER) += fate-gapless-mp3 fate-gapless-mp3: CMD = gapless $(TARGET_SAMPLES)/gapless/gapless.mp3 "-c:a mp3" FATE_GAPLESS-$(CONFIG_MP3_DEMUXER) += fate-audiomatch-square-mp3 -fate-audiomatch-square-mp3: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/square3.mp3 $(TARGET_SAMPLES)/audiomatch/square3.wav +fate-audiomatch-square-mp3: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/square3.mp3 $(SAMPLES)/audiomatch/square3.wav FATE_GAPLESS-$(CONFIG_MOV_DEMUXER) += fate-audiomatch-square-aac FATE_GAPLESS-$(CONFIG_MOV_DEMUXER) += fate-audiomatch-afconvert-16000-mono-lc-adts fate-audiomatch-afconvert-16000-mono-lc-m4a @@ -40,57 +40,57 @@ FATE_GAPLESS-$(CONFIG_MOV_DEMUXER) += fate-audiomatch-nero-44100-stereo-lc-m4a FATE_GAPLESS-$(CONFIG_MOV_DEMUXER) += fate-audiomatch-quicktime7-44100-stereo-lc-mp4 fate-audiomatch-quicktimeX-44100-stereo-lc-m4a -fate-audiomatch-square-aac: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/square3.m4a $(TARGET_SAMPLES)/audiomatch/square3.wav +fate-audiomatch-square-aac: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/square3.m4a $(SAMPLES)/audiomatch/square3.wav -fate-audiomatch-afconvert-16000-mono-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_mono_aac_lc.adts $(TARGET_SAMPLES)/audiomatch/tones_16000_mono.wav -fate-audiomatch-afconvert-16000-mono-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_mono_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_16000_mono.wav -fate-audiomatch-afconvert-16000-mono-he-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_mono_aac_he.adts $(TARGET_SAMPLES)/audiomatch/tones_16000_mono.wav "-ac 1 -ar 16000" -fate-audiomatch-afconvert-16000-mono-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_mono_aac_he.m4a $(TARGET_SAMPLES)/audiomatch/tones_16000_mono.wav "-ac 1 -ar 16000" -fate-audiomatch-afconvert-16000-stereo-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_stereo_aac_lc.adts $(TARGET_SAMPLES)/audiomatch/tones_16000_stereo.wav -fate-audiomatch-afconvert-16000-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_stereo_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_16000_stereo.wav -fate-audiomatch-afconvert-16000-stereo-he-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_stereo_aac_he.adts $(TARGET_SAMPLES)/audiomatch/tones_16000_stereo.wav "-ar 16000" -fate-audiomatch-afconvert-16000-stereo-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_stereo_aac_he.m4a $(TARGET_SAMPLES)/audiomatch/tones_16000_stereo.wav "-ar 16000" -fate-audiomatch-afconvert-16000-stereo-he2-adts:CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_stereo_aac_he2.adts $(TARGET_SAMPLES)/audiomatch/tones_16000_stereo.wav "-ar 16000" -fate-audiomatch-afconvert-16000-stereo-he2-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_stereo_aac_he2.m4a $(TARGET_SAMPLES)/audiomatch/tones_16000_stereo.wav "-ar 16000" -fate-audiomatch-afconvert-44100-mono-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_mono_aac_lc.adts $(TARGET_SAMPLES)/audiomatch/tones_44100_mono.wav -fate-audiomatch-afconvert-44100-mono-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_mono_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_mono.wav -fate-audiomatch-afconvert-44100-mono-he-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_mono_aac_he.adts $(TARGET_SAMPLES)/audiomatch/tones_44100_mono.wav "-ac 1" -fate-audiomatch-afconvert-44100-mono-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_mono_aac_he.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_mono.wav "-ac 1" -fate-audiomatch-afconvert-44100-stereo-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_stereo_aac_lc.adts $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-afconvert-44100-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_stereo_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-afconvert-44100-stereo-he-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_stereo_aac_he.adts $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-afconvert-44100-stereo-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_stereo_aac_he.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-afconvert-44100-stereo-he2-adts:CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_stereo_aac_he2.adts $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-afconvert-44100-stereo-he2-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_stereo_aac_he2.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-afconvert-16000-mono-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_mono_aac_lc.adts $(SAMPLES)/audiomatch/tones_16000_mono.wav +fate-audiomatch-afconvert-16000-mono-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_mono_aac_lc.m4a $(SAMPLES)/audiomatch/tones_16000_mono.wav +fate-audiomatch-afconvert-16000-mono-he-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_mono_aac_he.adts $(SAMPLES)/audiomatch/tones_16000_mono.wav "-ac 1 -ar 16000" +fate-audiomatch-afconvert-16000-mono-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_mono_aac_he.m4a $(SAMPLES)/audiomatch/tones_16000_mono.wav "-ac 1 -ar 16000" +fate-audiomatch-afconvert-16000-stereo-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_stereo_aac_lc.adts $(SAMPLES)/audiomatch/tones_16000_stereo.wav +fate-audiomatch-afconvert-16000-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_stereo_aac_lc.m4a $(SAMPLES)/audiomatch/tones_16000_stereo.wav +fate-audiomatch-afconvert-16000-stereo-he-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_stereo_aac_he.adts $(SAMPLES)/audiomatch/tones_16000_stereo.wav "-ar 16000" +fate-audiomatch-afconvert-16000-stereo-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_stereo_aac_he.m4a $(SAMPLES)/audiomatch/tones_16000_stereo.wav "-ar 16000" +fate-audiomatch-afconvert-16000-stereo-he2-adts:CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_stereo_aac_he2.adts $(SAMPLES)/audiomatch/tones_16000_stereo.wav "-ar 16000" +fate-audiomatch-afconvert-16000-stereo-he2-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_16000_stereo_aac_he2.m4a $(SAMPLES)/audiomatch/tones_16000_stereo.wav "-ar 16000" +fate-audiomatch-afconvert-44100-mono-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_mono_aac_lc.adts $(SAMPLES)/audiomatch/tones_44100_mono.wav +fate-audiomatch-afconvert-44100-mono-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_mono_aac_lc.m4a $(SAMPLES)/audiomatch/tones_44100_mono.wav +fate-audiomatch-afconvert-44100-mono-he-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_mono_aac_he.adts $(SAMPLES)/audiomatch/tones_44100_mono.wav "-ac 1" +fate-audiomatch-afconvert-44100-mono-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_mono_aac_he.m4a $(SAMPLES)/audiomatch/tones_44100_mono.wav "-ac 1" +fate-audiomatch-afconvert-44100-stereo-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_stereo_aac_lc.adts $(SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-afconvert-44100-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_stereo_aac_lc.m4a $(SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-afconvert-44100-stereo-he-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_stereo_aac_he.adts $(SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-afconvert-44100-stereo-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_stereo_aac_he.m4a $(SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-afconvert-44100-stereo-he2-adts:CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_stereo_aac_he2.adts $(SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-afconvert-44100-stereo-he2-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_afconvert_44100_stereo_aac_he2.m4a $(SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-dolby-44100-mono-lc-mp4: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_dolby_44100_mono_aac_lc.mp4 $(TARGET_SAMPLES)/audiomatch/tones_44100_mono.wav -fate-audiomatch-dolby-44100-mono-he-mp4: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_dolby_44100_mono_aac_he.mp4 $(TARGET_SAMPLES)/audiomatch/tones_44100_mono.wav "-ac 1" -fate-audiomatch-dolby-44100-stereo-lc-mp4: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_dolby_44100_stereo_aac_lc.mp4 $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-dolby-44100-stereo-he-mp4: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_dolby_44100_stereo_aac_he.mp4 $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-dolby-44100-stereo-he2-mp4: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_dolby_44100_stereo_aac_he2.mp4 $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-dolby-44100-mono-lc-mp4: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_dolby_44100_mono_aac_lc.mp4 $(SAMPLES)/audiomatch/tones_44100_mono.wav +fate-audiomatch-dolby-44100-mono-he-mp4: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_dolby_44100_mono_aac_he.mp4 $(SAMPLES)/audiomatch/tones_44100_mono.wav "-ac 1" +fate-audiomatch-dolby-44100-stereo-lc-mp4: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_dolby_44100_stereo_aac_lc.mp4 $(SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-dolby-44100-stereo-he-mp4: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_dolby_44100_stereo_aac_he.mp4 $(SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-dolby-44100-stereo-he2-mp4: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_dolby_44100_stereo_aac_he2.mp4 $(SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-faac-16000-mono-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_16000_mono_aac_lc.adts $(TARGET_SAMPLES)/audiomatch/tones_16000_mono.wav -fate-audiomatch-faac-16000-mono-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_16000_mono_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_16000_mono.wav -fate-audiomatch-faac-16000-stereo-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_16000_stereo_aac_lc.adts $(TARGET_SAMPLES)/audiomatch/tones_16000_stereo.wav -fate-audiomatch-faac-16000-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_16000_stereo_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_16000_stereo.wav -fate-audiomatch-faac-44100-mono-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_44100_mono_aac_lc.adts $(TARGET_SAMPLES)/audiomatch/tones_44100_mono.wav -fate-audiomatch-faac-44100-mono-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_44100_mono_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_mono.wav -fate-audiomatch-faac-44100-stereo-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_44100_stereo_aac_lc.adts $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-faac-44100-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_44100_stereo_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-faac-16000-mono-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_16000_mono_aac_lc.adts $(SAMPLES)/audiomatch/tones_16000_mono.wav +fate-audiomatch-faac-16000-mono-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_16000_mono_aac_lc.m4a $(SAMPLES)/audiomatch/tones_16000_mono.wav +fate-audiomatch-faac-16000-stereo-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_16000_stereo_aac_lc.adts $(SAMPLES)/audiomatch/tones_16000_stereo.wav +fate-audiomatch-faac-16000-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_16000_stereo_aac_lc.m4a $(SAMPLES)/audiomatch/tones_16000_stereo.wav +fate-audiomatch-faac-44100-mono-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_44100_mono_aac_lc.adts $(SAMPLES)/audiomatch/tones_44100_mono.wav +fate-audiomatch-faac-44100-mono-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_44100_mono_aac_lc.m4a $(SAMPLES)/audiomatch/tones_44100_mono.wav +fate-audiomatch-faac-44100-stereo-lc-adts: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_44100_stereo_aac_lc.adts $(SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-faac-44100-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_faac_44100_stereo_aac_lc.m4a $(SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-nero-16000-mono-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_16000_mono_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_16000_mono.wav -fate-audiomatch-nero-16000-mono-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_16000_mono_aac_he.m4a $(TARGET_SAMPLES)/audiomatch/tones_16000_mono.wav -fate-audiomatch-nero-16000-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_16000_stereo_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_16000_stereo.wav -fate-audiomatch-nero-16000-stereo-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_16000_stereo_aac_he.m4a $(TARGET_SAMPLES)/audiomatch/tones_16000_stereo.wav -fate-audiomatch-nero-16000-stereo-he2-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_16000_stereo_aac_he2.m4a $(TARGET_SAMPLES)/audiomatch/tones_16000_stereo.wav -fate-audiomatch-nero-44100-mono-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_44100_mono_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_mono.wav -fate-audiomatch-nero-44100-mono-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_44100_mono_aac_he.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_mono.wav -fate-audiomatch-nero-44100-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_44100_stereo_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-nero-44100-stereo-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_44100_stereo_aac_he.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-nero-44100-stereo-he2-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_44100_stereo_aac_he2.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-nero-16000-mono-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_16000_mono_aac_lc.m4a $(SAMPLES)/audiomatch/tones_16000_mono.wav +fate-audiomatch-nero-16000-mono-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_16000_mono_aac_he.m4a $(SAMPLES)/audiomatch/tones_16000_mono.wav +fate-audiomatch-nero-16000-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_16000_stereo_aac_lc.m4a $(SAMPLES)/audiomatch/tones_16000_stereo.wav +fate-audiomatch-nero-16000-stereo-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_16000_stereo_aac_he.m4a $(SAMPLES)/audiomatch/tones_16000_stereo.wav +fate-audiomatch-nero-16000-stereo-he2-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_16000_stereo_aac_he2.m4a $(SAMPLES)/audiomatch/tones_16000_stereo.wav +fate-audiomatch-nero-44100-mono-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_44100_mono_aac_lc.m4a $(SAMPLES)/audiomatch/tones_44100_mono.wav +fate-audiomatch-nero-44100-mono-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_44100_mono_aac_he.m4a $(SAMPLES)/audiomatch/tones_44100_mono.wav +fate-audiomatch-nero-44100-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_44100_stereo_aac_lc.m4a $(SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-nero-44100-stereo-he-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_44100_stereo_aac_he.m4a $(SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-nero-44100-stereo-he2-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_nero_44100_stereo_aac_he2.m4a $(SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-quicktime7-44100-stereo-lc-mp4: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_quicktime7_44100_stereo_aac_lc.mp4 $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav -fate-audiomatch-quicktimeX-44100-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_quicktimeX_44100_stereo_aac_lc.m4a $(TARGET_SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-quicktime7-44100-stereo-lc-mp4: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_quicktime7_44100_stereo_aac_lc.mp4 $(SAMPLES)/audiomatch/tones_44100_stereo.wav +fate-audiomatch-quicktimeX-44100-stereo-lc-m4a: CMD = audio_match $(TARGET_SAMPLES)/audiomatch/tones_quicktimeX_44100_stereo_aac_lc.m4a $(SAMPLES)/audiomatch/tones_44100_stereo.wav FATE_GAPLESS = $(FATE_GAPLESS-yes) diff --git a/tests/fate/hevc.mak b/tests/fate/hevc.mak index 559c3898bc..35af3e43ac 100644 --- a/tests/fate/hevc.mak +++ b/tests/fate/hevc.mak @@ -226,9 +226,6 @@ $(foreach N,$(HEVC_SAMPLES_444_12BIT),$(eval $(call FATE_HEVC_TEST_444_12BIT,$(N fate-hevc-paramchange-yuv420p-yuv420p10: CMD = framecrc -vsync 0 -i $(TARGET_SAMPLES)/hevc/paramchange_yuv420p_yuv420p10.hevc -sws_flags area+accurate_rnd+bitexact FATE_HEVC += fate-hevc-paramchange-yuv420p-yuv420p10 -fate-hevc-paired-fields: CMD = probeframes -show_entries frame=interlaced_frame,top_field_first $(TARGET_SAMPLES)/hevc/paired_fields.hevc -FATE_HEVC_FFPROBE-$(call DEMDEC, HEVC, HEVC) += fate-hevc-paired-fields - tests/data/hevc-mp4.mov: TAG = GEN tests/data/hevc-mp4.mov: ffmpeg$(PROGSSUF)$(EXESUF) | tests/data $(M)$(TARGET_EXEC) $(TARGET_PATH)/$< \ @@ -250,12 +247,18 @@ FATE_HEVC-$(call DEMDEC, MOV, HEVC) += fate-hevc-extradata-reload fate-hevc-extradata-reload: CMD = framemd5 -i $(TARGET_SAMPLES)/hevc/extradata-reload-multi-stsd.mov -sws_flags bitexact +fate-hevc-paired-fields: CMD = probeframes -show_entries frame=interlaced_frame,top_field_first $(TARGET_SAMPLES)/hevc/paired_fields.hevc +FATE_HEVC_FFPROBE-$(call DEMDEC, HEVC, HEVC) += fate-hevc-paired-fields + fate-hevc-monochrome-crop: CMD = probeframes -show_entries frame=width,height:stream=width,height $(TARGET_SAMPLES)/hevc/hevc-monochrome.hevc FATE_HEVC_FFPROBE-$(call DEMDEC, HEVC, HEVC) += fate-hevc-monochrome-crop fate-hevc-two-first-slice: CMD = threads=2 framemd5 -i $(TARGET_SAMPLES)/hevc/two_first_slice.mp4 -sws_flags bitexact -t 00:02.00 -an FATE_HEVC-$(call DEMDEC, MOV, HEVC) += fate-hevc-two-first-slice +fate-hevc-cabac-tudepth: CMD = framecrc -flags unaligned -i $(TARGET_SAMPLES)/hevc/cbf_cr_cb_TUDepth_4_circle.h265 -pix_fmt yuv444p +FATE_HEVC-$(call DEMDEC, HEVC, HEVC) += fate-hevc-cabac-tudepth + FATE_SAMPLES_AVCONV += $(FATE_HEVC-yes) FATE_SAMPLES_FFPROBE += $(FATE_HEVC_FFPROBE-yes) diff --git a/tests/fate/matroska.mak b/tests/fate/matroska.mak index 99145338ae..4aca4dc8eb 100644 --- a/tests/fate/matroska.mak +++ b/tests/fate/matroska.mak @@ -1,6 +1,11 @@ FATE_MATROSKA-$(call ALLYES, MATROSKA_DEMUXER ZLIB) += fate-matroska-prores-zlib fate-matroska-prores-zlib: CMD = framecrc -i $(TARGET_SAMPLES)/mkv/prores_zlib.mkv -c:v copy +# This tests that the matroska demuxer correctly adds the icpf header atom +# upon demuxing; it also tests bz2 decompression and unknown-length cluster. +FATE_MATROSKA-$(call ALLYES, MATROSKA_DEMUXER BZLIB) += fate-matroska-prores-header-insertion-bz2 +fate-matroska-prores-header-insertion-bz2: CMD = framecrc -i $(TARGET_SAMPLES)/mkv/prores_bz2.mkv -map 0 -c copy + # This tests that the matroska demuxer supports modifying the colorspace # properties in remuxing (-c:v copy) # It also tests automatic insertion of the vp9_superframe bitstream filter @@ -17,6 +22,10 @@ fate-matroska-xiph-lacing: CMD = framecrc -i $(TARGET_SAMPLES)/mkv/xiph_lacing.m FATE_MATROSKA-$(call ALLYES, MATROSKA_DEMUXER ZLIB) += fate-matroska-zlib-decompression fate-matroska-zlib-decompression: CMD = framecrc -i $(TARGET_SAMPLES)/mkv/subtitle_zlib.mks -c:s copy +# This tests that the matroska demuxer can decompress lzo compressed tracks. +FATE_MATROSKA-$(call ALLYES, MATROSKA_DEMUXER LZO) += fate-matroska-lzo-decompression +fate-matroska-lzo-decompression: CMD = framecrc -i $(TARGET_SAMPLES)/mkv/lzo.mka -c copy + # This tests that the matroska demuxer correctly propagates # the channel layout contained in vorbis comments in the CodecPrivate # of flac tracks. It also tests header removal compression. diff --git a/tests/fate/mov.mak b/tests/fate/mov.mak index 8ed66cf135..7a721d7c95 100644 --- a/tests/fate/mov.mak +++ b/tests/fate/mov.mak @@ -119,7 +119,7 @@ fate-mov-guess-delay-1: CMD = run ffprobe$(PROGSSUF)$(EXESUF) -show_entries stre fate-mov-guess-delay-2: CMD = run ffprobe$(PROGSSUF)$(EXESUF) -show_entries stream=has_b_frames -select_streams v $(TARGET_SAMPLES)/h264/h264_3bf_pyramid_nobsrestriction.mp4 fate-mov-guess-delay-3: CMD = run ffprobe$(PROGSSUF)$(EXESUF) -show_entries stream=has_b_frames -select_streams v $(TARGET_SAMPLES)/h264/h264_4bf_pyramid_nobsrestriction.mp4 -fate-mov-faststart-4gb-overflow: CMD = run tools/qt-faststart$(EXESUF) $(TARGET_SAMPLES)/mov/faststart-4gb-overflow.mov faststart-4gb-overflow-output.mov > /dev/null ; do_md5sum faststart-4gb-overflow-output.mov | cut -d " " -f1 ; rm faststart-4gb-overflow-output.mov +fate-mov-faststart-4gb-overflow: CMD = run tools/qt-faststart$(EXESUF) $(TARGET_SAMPLES)/mov/faststart-4gb-overflow.mov $(TARGET_PATH)/faststart-4gb-overflow-output.mov > /dev/null ; do_md5sum faststart-4gb-overflow-output.mov | cut -d " " -f1 ; rm faststart-4gb-overflow-output.mov fate-mov-faststart-4gb-overflow: CMP = oneline fate-mov-faststart-4gb-overflow: REF = bc875921f151871e787c4b4023269b29 diff --git a/tests/fate/mpegps.mak b/tests/fate/mpegps.mak index cec1ea77fa..87a8e1480e 100644 --- a/tests/fate/mpegps.mak +++ b/tests/fate/mpegps.mak @@ -1,6 +1,6 @@ # This tests that a 16-bit pcm_dvd stream is correctly remuxed in mpegps FATE_MPEGPS-$(call DEMMUX, MPEGPS, MPEG1SYSTEM) += fate-mpegps-remuxed-pcm-demux -fate-mpegps-remuxed-pcm-demux: $(TARGET_SAMPLES)/mpegps/pcm_aud.mpg +fate-mpegps-remuxed-pcm-demux: $(SAMPLES)/mpegps/pcm_aud.mpg fate-mpegps-remuxed-pcm-demux: CMD = stream_remux "mpeg" "$(TARGET_SAMPLES)/mpegps/pcm_aud.mpg" "mpeg" "-map 0:a:0" "-codec copy" FATE_SAMPLES_FFMPEG += $(FATE_MPEGPS-yes) diff --git a/tests/fate/mxf.mak b/tests/fate/mxf.mak index 62e4ec01cb..4aafc1f578 100644 --- a/tests/fate/mxf.mak +++ b/tests/fate/mxf.mak @@ -34,19 +34,19 @@ fate-mxf-probe-dv25: SRC = $(TARGET_SAMPLES)/mxf/Avid-00005.mxf fate-mxf-probe-dv25: CMD = run $(PROBE_FORMAT_STREAMS_COMMAND) -i "$(SRC)" FATE_MXF_REEL_NAME-$(call ENCDEC2, MPEG2VIDEO, PCM_S16LE, MXF) += fate-mxf-reel_name -fate-mxf-reel_name: $(TARGET_SAMPLES)/mxf/Sony-00001.mxf +fate-mxf-reel_name: $(SAMPLES)/mxf/Sony-00001.mxf fate-mxf-reel_name: CMD = md5 -y -i $(TARGET_SAMPLES)/mxf/Sony-00001.mxf -c copy -timecode 00:00:00:00 -metadata "reel_name=test_reel" -fflags +bitexact -f mxf FATE_MXF_USER_COMMENTS-$(call ENCDEC2, MPEG2VIDEO, PCM_S16LE, MXF) += fate-mxf-user-comments -fate-mxf-user-comments: $(TARGET_SAMPLES)/mxf/Sony-00001.mxf +fate-mxf-user-comments: $(SAMPLES)/mxf/Sony-00001.mxf fate-mxf-user-comments: CMD = md5 -y -i $(TARGET_SAMPLES)/mxf/Sony-00001.mxf -c copy -metadata "comment_test=value" -fflags +bitexact -f mxf FATE_MXF_D10_USER_COMMENTS-$(call ENCDEC2, MPEG2VIDEO, PCM_S16LE, MXF) += fate-mxf-d10-user-comments -fate-mxf-d10-user-comments: $(TARGET_SAMPLES)/mxf/Sony-00001.mxf +fate-mxf-d10-user-comments: $(SAMPLES)/mxf/Sony-00001.mxf fate-mxf-d10-user-comments: CMD = md5 -y -i $(TARGET_SAMPLES)/mxf/Sony-00001.mxf -c copy -metadata "comment_test=value" -store_user_comments 1 -fflags +bitexact -f mxf_d10 FATE_MXF_OPATOM_USER_COMMENTS-$(call ENCDEC2, MPEG2VIDEO, PCM_S16LE, MXF) += fate-mxf-opatom-user-comments -fate-mxf-opatom-user-comments: $(TARGET_SAMPLES)/mxf/Sony-00001.mxf +fate-mxf-opatom-user-comments: $(SAMPLES)/mxf/Sony-00001.mxf fate-mxf-opatom-user-comments: CMD = md5 -y -i $(TARGET_SAMPLES)/mxf/Sony-00001.mxf -an -vcodec copy -metadata "comment_test=value" -fflags +bitexact -f mxf_opatom FATE_MXF-$(CONFIG_MXF_DEMUXER) += $(FATE_MXF) diff --git a/tests/fate/seek.mak b/tests/fate/seek.mak index a15c9e919a..98d2b54674 100644 --- a/tests/fate/seek.mak +++ b/tests/fate/seek.mak @@ -64,7 +64,6 @@ FATE_SEEK_VSYNTH_LENA-$(call ENCDEC, ASV1, AVI) += asv1 FATE_SEEK_VSYNTH_LENA-$(call ENCDEC, ASV2, AVI) += asv2 FATE_SEEK_VSYNTH_LENA-$(call ENCDEC, DNXHD, DNXHD) += dnxhd-720p FATE_SEEK_VSYNTH_LENA-$(call ENCDEC, DNXHD, DNXHD) += dnxhd-720p-rd -FATE_SEEK_VSYNTH_LENA-$(call ENCDEC, DNXHD, DNXHD) += dnxhd-4k-hr-lb FATE_SEEK_VSYNTH_LENA-$(call ENCDEC, DNXHD, MOV) += dnxhd-1080i FATE_SEEK_VSYNTH_LENA-$(call ENCDEC, DVVIDEO, DV) += dv FATE_SEEK_VSYNTH_LENA-$(call ENCDEC, DVVIDEO, DV) += dv-411 @@ -80,6 +79,8 @@ FATE_SEEK_VSYNTH_LENA-$(call ENCDEC, JPEGLS, AVI) += jpegls FATE_SEEK_VSYNTH_LENA-$(call ENCDEC, LJPEG MJPEG, AVI) += ljpeg FATE_SEEK_VSYNTH_LENA-$(call ENCDEC, MJPEG, AVI) += mjpeg +FATE_SEEK_VSYNTH_LENA-$(call ALLYES, DNXHD_ENCODER DNXHD_DECODER LARGE_TESTS) += dnxhd-4k-hr-lb + FATE_SEEK_VSYNTH_LENA-$(call ENCDEC, MPEG1VIDEO, MPEG1VIDEO MPEGVIDEO) += \ mpeg1 \ mpeg1b @@ -260,7 +261,7 @@ fate-seek-extra-mp4: CMD = run libavformat/tests/seek$(EXESUF) $(TARGET_SAMPLES fate-seek-empty-edit-mp4: CMD = run libavformat/tests/seek$(EXESUF) $(TARGET_SAMPLES)/mov/empty_edit_5s.mp4 -duration 15 -frames 4 fate-seek-test-iibbibb-mp4: CMD = run libavformat/tests/seek$(EXESUF) $(TARGET_SAMPLES)/mov/test_iibbibb.mp4 -duration 13 -frames 4 fate-seek-test-iibbibb-neg-ctts-mp4: CMD = run libavformat/tests/seek$(EXESUF) $(TARGET_SAMPLES)/mov/test_iibbibb_neg_ctts.mp4 -duration 13 -frames 4 -fate-seek-cache-pipe: CMD = cat $(TARGET_SAMPLES)/gapless/gapless.mp3 | run libavformat/tests/seek$(EXESUF) cache:pipe:0 -read_ahead_limit -1 +fate-seek-cache-pipe: CMD = cat $(SAMPLES)/gapless/gapless.mp3 | run libavformat/tests/seek$(EXESUF) cache:pipe:0 -read_ahead_limit -1 fate-seek-mkv-codec-delay: CMD = run libavformat/tests/seek$(EXESUF) $(TARGET_SAMPLES)/mkv/codec_delay_opus.mkv FATE_SEEK_EXTRA += $(FATE_SEEK_EXTRA-yes) diff --git a/tests/fate/vcodec.mak b/tests/fate/vcodec.mak index 452246689e..fc27da5456 100644 --- a/tests/fate/vcodec.mak +++ b/tests/fate/vcodec.mak @@ -29,13 +29,14 @@ FATE_VCODEC-$(call ENCDEC, DNXHD, DNXHD) += dnxhd-720p \ dnxhd-720p-rd \ dnxhd-720p-10bit \ dnxhd-720p-hr-lb \ - dnxhd-4k-hr-lb \ dnxhd-uhd-hr-sq \ - dnxhd-2k-hr-hq \ dnxhd-edge1-hr \ dnxhd-edge2-hr \ dnxhd-edge3-hr +FATE_VCODEC-$(call ALLYES, DNXHD_ENCODER DNXHD_DECODER LARGE_TESTS) += dnxhd-4k-hr-lb \ + dnxhd-2k-hr-hq + FATE_VCODEC-$(call ENCDEC, VC2 DIRAC, MOV) += vc2-420p vc2-420p10 vc2-420p12 \ vc2-422p vc2-422p10 vc2-422p12 \ vc2-444p vc2-444p10 vc2-444p12 \ diff --git a/tests/ref/fate/binsub-movtextenc b/tests/ref/fate/binsub-movtextenc index dacee0931e..78c05f4376 100644 --- a/tests/ref/fate/binsub-movtextenc +++ b/tests/ref/fate/binsub-movtextenc @@ -1 +1 @@ -66b25412f7ca699ee525ba162246edb6 +35adf776cd73e808186ae7124445f4b8 diff --git a/tests/ref/fate/cbs-av1-av1-1-b10-23-film_grain-50 b/tests/ref/fate/cbs-av1-av1-1-b10-23-film_grain-50 index 832c682108..500e655803 100644 --- a/tests/ref/fate/cbs-av1-av1-1-b10-23-film_grain-50 +++ b/tests/ref/fate/cbs-av1-av1-1-b10-23-film_grain-50 @@ -1 +1 @@ -5fe6622ac6cb22dd10dfbe9e70e82d4e +0ab934a437181d0275dc6c26bb9f6281 diff --git a/tests/ref/fate/cbs-av1-av1-1-b8-02-allintra b/tests/ref/fate/cbs-av1-av1-1-b8-02-allintra index 9a127ff90c..11abf2f336 100644 --- a/tests/ref/fate/cbs-av1-av1-1-b8-02-allintra +++ b/tests/ref/fate/cbs-av1-av1-1-b8-02-allintra @@ -1 +1 @@ -b905c6f2bf68fddbf58a50f940c5f5a2 +134b447b04086088de4da127a97731f3 diff --git a/tests/ref/fate/cbs-av1-av1-1-b8-03-sizedown b/tests/ref/fate/cbs-av1-av1-1-b8-03-sizedown index 6d771a2331..7188d16dad 100644 --- a/tests/ref/fate/cbs-av1-av1-1-b8-03-sizedown +++ b/tests/ref/fate/cbs-av1-av1-1-b8-03-sizedown @@ -1 +1 @@ -4db2f13b7028a2bb6a5a067badf1b0ad +e5924930773efdbbd82da02c96747f27 diff --git a/tests/ref/fate/cbs-av1-av1-1-b8-03-sizeup b/tests/ref/fate/cbs-av1-av1-1-b8-03-sizeup index 689934c2fb..9d767bbede 100644 --- a/tests/ref/fate/cbs-av1-av1-1-b8-03-sizeup +++ b/tests/ref/fate/cbs-av1-av1-1-b8-03-sizeup @@ -1 +1 @@ -964ea060d898a4358708b3f83ecda4b5 +0348fba6ebf6caadfe80b19a6ad93caa diff --git a/tests/ref/fate/cbs-av1-av1-1-b8-04-cdfupdate b/tests/ref/fate/cbs-av1-av1-1-b8-04-cdfupdate index f0a2481f8a..9325deac36 100644 --- a/tests/ref/fate/cbs-av1-av1-1-b8-04-cdfupdate +++ b/tests/ref/fate/cbs-av1-av1-1-b8-04-cdfupdate @@ -1 +1 @@ -e24ce6a099ce9389f06606b0a7653007 +aec87cd950fb985b1e345d0366709aea diff --git a/tests/ref/fate/cbs-av1-av1-1-b8-05-mv b/tests/ref/fate/cbs-av1-av1-1-b8-05-mv index b3350a283b..aa4ab5138b 100644 --- a/tests/ref/fate/cbs-av1-av1-1-b8-05-mv +++ b/tests/ref/fate/cbs-av1-av1-1-b8-05-mv @@ -1 +1 @@ -b39c0edaf5695367ec07eeffce57303d +33f548eeef87e12b93b9bf4a3b79c70e diff --git a/tests/ref/fate/cbs-av1-av1-1-b8-06-mfmv b/tests/ref/fate/cbs-av1-av1-1-b8-06-mfmv index 929a2a4498..ede34aede0 100644 --- a/tests/ref/fate/cbs-av1-av1-1-b8-06-mfmv +++ b/tests/ref/fate/cbs-av1-av1-1-b8-06-mfmv @@ -1 +1 @@ -95c1fcf60b4fc4530d38f0896c783e28 +2e20870d44ba5ec5a8e1450b287e20b4 diff --git a/tests/ref/fate/cbs-av1-av1-1-b8-22-svc-L1T2 b/tests/ref/fate/cbs-av1-av1-1-b8-22-svc-L1T2 index 1c3cc5cebd..d17f202fdc 100644 --- a/tests/ref/fate/cbs-av1-av1-1-b8-22-svc-L1T2 +++ b/tests/ref/fate/cbs-av1-av1-1-b8-22-svc-L1T2 @@ -1 +1 @@ -16f91c11586a53336ee8b2e57a6503c9 +f7138eaa1e572260a8a34f73f91e058a diff --git a/tests/ref/fate/cbs-av1-av1-1-b8-22-svc-L2T1 b/tests/ref/fate/cbs-av1-av1-1-b8-22-svc-L2T1 index 09368ac3fd..a23f3cbd4e 100644 --- a/tests/ref/fate/cbs-av1-av1-1-b8-22-svc-L2T1 +++ b/tests/ref/fate/cbs-av1-av1-1-b8-22-svc-L2T1 @@ -1 +1 @@ -e7a413248005a06a98443c9ef645f92b +4f51af7abcf75eba35ab1c4796793681 diff --git a/tests/ref/fate/cbs-av1-av1-1-b8-22-svc-L2T2 b/tests/ref/fate/cbs-av1-av1-1-b8-22-svc-L2T2 index c2faf1b5ce..7b6a77d980 100644 --- a/tests/ref/fate/cbs-av1-av1-1-b8-22-svc-L2T2 +++ b/tests/ref/fate/cbs-av1-av1-1-b8-22-svc-L2T2 @@ -1 +1 @@ -75e1296365303b39e642ff1a933cac4f +d52adb1719a0422782e40352e44c6cb0 diff --git a/tests/ref/fate/cbs-av1-av1-1-b8-23-film_grain-50 b/tests/ref/fate/cbs-av1-av1-1-b8-23-film_grain-50 index 2a8fcfdfdf..c0daa8d1cb 100644 --- a/tests/ref/fate/cbs-av1-av1-1-b8-23-film_grain-50 +++ b/tests/ref/fate/cbs-av1-av1-1-b8-23-film_grain-50 @@ -1 +1 @@ -93e578da4b2bdb70ce0a92262da930ec +99a635753d7e4e7deb99fd2ba866818e diff --git a/tests/ref/fate/cbs-av1-decode_model b/tests/ref/fate/cbs-av1-decode_model index cca6426090..3d7695804f 100644 --- a/tests/ref/fate/cbs-av1-decode_model +++ b/tests/ref/fate/cbs-av1-decode_model @@ -1 +1 @@ -228a6ee5204a4d6fb00c22080e8153bc +171ebf527c4cd57179d6a4e5c4f23ce4 diff --git a/tests/ref/fate/cbs-av1-frames_refs_short_signaling b/tests/ref/fate/cbs-av1-frames_refs_short_signaling index eee02e5122..15a76aef88 100644 --- a/tests/ref/fate/cbs-av1-frames_refs_short_signaling +++ b/tests/ref/fate/cbs-av1-frames_refs_short_signaling @@ -1 +1 @@ -ebe808041a023ba46249cc676a89bb51 +d1b05cf934aeda64b25a93423904c14d diff --git a/tests/ref/fate/cbs-av1-non_uniform_tiling b/tests/ref/fate/cbs-av1-non_uniform_tiling index 0d0ebc2cbd..257ae07d61 100644 --- a/tests/ref/fate/cbs-av1-non_uniform_tiling +++ b/tests/ref/fate/cbs-av1-non_uniform_tiling @@ -1 +1 @@ -e82617e3d4e694a5598447606c0fcda7 +3e204ee8a71273cf0247f48e977e64b7 diff --git a/tests/ref/fate/cbs-av1-seq_hdr_op_param_info b/tests/ref/fate/cbs-av1-seq_hdr_op_param_info index 75da9c5f9a..85d4f9b071 100644 --- a/tests/ref/fate/cbs-av1-seq_hdr_op_param_info +++ b/tests/ref/fate/cbs-av1-seq_hdr_op_param_info @@ -1 +1 @@ -d3aa6616d22c983ae0ad04100cd00698 +10e7bdd7cab67f203520e44b28a6477c diff --git a/tests/ref/fate/cbs-av1-switch_frame b/tests/ref/fate/cbs-av1-switch_frame index ff9ccc72b6..07b0fd6b36 100644 --- a/tests/ref/fate/cbs-av1-switch_frame +++ b/tests/ref/fate/cbs-av1-switch_frame @@ -1 +1 @@ -72889950e593695af54575e414470314 +156b5297ca32c18183ca41a102a09a02 diff --git a/tests/ref/fate/copy-psp b/tests/ref/fate/copy-psp index 44ec461265..8b2cef87fa 100644 --- a/tests/ref/fate/copy-psp +++ b/tests/ref/fate/copy-psp @@ -1,4 +1,4 @@ -65a177552e03123c9a62ddb942970d05 *tests/data/fate/copy-psp.psp +8578401522773d0832f538ac915ad0b0 *tests/data/fate/copy-psp.psp 2041445 tests/data/fate/copy-psp.psp #extradata 0: 51, 0xaf6d1012 #extradata 1: 2, 0x00b200a1 diff --git a/tests/ref/fate/filter-dnn_processing-halve_first_channel_float b/tests/ref/fate/filter-dnn_processing-halve_first_channel_float new file mode 100644 index 0000000000..ad7dedaf64 --- /dev/null +++ b/tests/ref/fate/filter-dnn_processing-halve_first_channel_float @@ -0,0 +1,55 @@ +#tb 0: 1/25 +#media_type 0: video +#codec_id 0: rawvideo +#dimensions 0: 352x288 +#sar 0: 0/1 +0, 0, 0, 1, 304128, 0xdecb04c8 +0, 1, 1, 1, 304128, 0x55d9a543 +0, 2, 2, 1, 304128, 0x11ae175c +0, 3, 3, 1, 304128, 0x2cbc8734 +0, 4, 4, 1, 304128, 0xa8e5cb1f +0, 5, 5, 1, 304128, 0xb6d9ce81 +0, 6, 6, 1, 304128, 0x51613881 +0, 7, 7, 1, 304128, 0x09b9614f +0, 8, 8, 1, 304128, 0x172a6901 +0, 9, 9, 1, 304128, 0x30237bf4 +0, 10, 10, 1, 304128, 0xb8646354 +0, 11, 11, 1, 304128, 0xe2dd8145 +0, 12, 12, 1, 304128, 0x6f0b3cea +0, 13, 13, 1, 304128, 0xb1a04427 +0, 14, 14, 1, 304128, 0xe0ab42cf +0, 15, 15, 1, 304128, 0xd4dc3224 +0, 16, 16, 1, 304128, 0xdb3462a1 +0, 17, 17, 1, 304128, 0x1d9931a1 +0, 18, 18, 1, 304128, 0x17c80e7a +0, 19, 19, 1, 304128, 0x800d587b +0, 20, 20, 1, 304128, 0x97d67832 +0, 21, 21, 1, 304128, 0xffc116db +0, 22, 22, 1, 304128, 0x80510bc1 +0, 23, 23, 1, 304128, 0xbf838895 +0, 24, 24, 1, 304128, 0x3c8ce931 +0, 25, 25, 1, 304128, 0x8640e1cd +0, 26, 26, 1, 304128, 0xa944fcac +0, 27, 27, 1, 304128, 0x7cef3f83 +0, 28, 28, 1, 304128, 0x3c8d60d2 +0, 29, 29, 1, 304128, 0x83fad1ef +0, 30, 30, 1, 304128, 0xbd6031ac +0, 31, 31, 1, 304128, 0x9e63188a +0, 32, 32, 1, 304128, 0x0e45cb70 +0, 33, 33, 1, 304128, 0x02a9ec32 +0, 34, 34, 1, 304128, 0x6ff674cc +0, 35, 35, 1, 304128, 0x7d1143e6 +0, 36, 36, 1, 304128, 0x52c6b9b7 +0, 37, 37, 1, 304128, 0x16696d9c +0, 38, 38, 1, 304128, 0x0612973f +0, 39, 39, 1, 304128, 0xed130f6a +0, 40, 40, 1, 304128, 0xe0051904 +0, 41, 41, 1, 304128, 0x6930d331 +0, 42, 42, 1, 304128, 0x35f722f7 +0, 43, 43, 1, 304128, 0x0adb7e81 +0, 44, 44, 1, 304128, 0x1eb10598 +0, 45, 45, 1, 304128, 0x73ec2115 +0, 46, 46, 1, 304128, 0xf9d24a8c +0, 47, 47, 1, 304128, 0x94a3748d +0, 48, 48, 1, 304128, 0xbaeac1d5 +0, 49, 49, 1, 304128, 0x5493efd3 diff --git a/tests/ref/fate/filter-dnn_processing-halve_gray_float b/tests/ref/fate/filter-dnn_processing-halve_gray_float new file mode 100644 index 0000000000..b33a951c62 --- /dev/null +++ b/tests/ref/fate/filter-dnn_processing-halve_gray_float @@ -0,0 +1,55 @@ +#tb 0: 1/25 +#media_type 0: video +#codec_id 0: rawvideo +#dimensions 0: 352x288 +#sar 0: 0/1 +0, 0, 0, 1, 405504, 0xb3a2caab +0, 1, 1, 1, 405504, 0x878b2fb8 +0, 2, 2, 1, 405504, 0xf39dac33 +0, 3, 3, 1, 405504, 0x94ef53b1 +0, 4, 4, 1, 405504, 0x6ed80f30 +0, 5, 5, 1, 405504, 0x82def1f8 +0, 6, 6, 1, 405504, 0xdafab027 +0, 7, 7, 1, 405504, 0xddef2774 +0, 8, 8, 1, 405504, 0x877771bd +0, 9, 9, 1, 405504, 0xfaf7da12 +0, 10, 10, 1, 405504, 0x484be589 +0, 11, 11, 1, 405504, 0x15d660e2 +0, 12, 12, 1, 405504, 0xa01849c8 +0, 13, 13, 1, 405504, 0x823c33da +0, 14, 14, 1, 405504, 0x3aef6445 +0, 15, 15, 1, 405504, 0x75f2961b +0, 16, 16, 1, 405504, 0x7615ddac +0, 17, 17, 1, 405504, 0xbcf92755 +0, 18, 18, 1, 405504, 0xc84ee75f +0, 19, 19, 1, 405504, 0xf9d11220 +0, 20, 20, 1, 405504, 0x6e1afa4a +0, 21, 21, 1, 405504, 0x47fcc8b2 +0, 22, 22, 1, 405504, 0xa5f618bc +0, 23, 23, 1, 405504, 0x2528509b +0, 24, 24, 1, 405504, 0x0b77ec0b +0, 25, 25, 1, 405504, 0x8d5ea91d +0, 26, 26, 1, 405504, 0xd8a04b22 +0, 27, 27, 1, 405504, 0xbde327bd +0, 28, 28, 1, 405504, 0x9713aeb4 +0, 29, 29, 1, 405504, 0xc168c52e +0, 30, 30, 1, 405504, 0xa3da9f70 +0, 31, 31, 1, 405504, 0xe58350d0 +0, 32, 32, 1, 405504, 0x6c656178 +0, 33, 33, 1, 405504, 0xe9563056 +0, 34, 34, 1, 405504, 0xf1f2c14d +0, 35, 35, 1, 405504, 0x5d59fe20 +0, 36, 36, 1, 405504, 0x5ddb514e +0, 37, 37, 1, 405504, 0x6251dbf8 +0, 38, 38, 1, 405504, 0x94c7d2d6 +0, 39, 39, 1, 405504, 0x1e44022a +0, 40, 40, 1, 405504, 0x51c157a1 +0, 41, 41, 1, 405504, 0xc8991bd1 +0, 42, 42, 1, 405504, 0x046be642 +0, 43, 43, 1, 405504, 0x330da15f +0, 44, 44, 1, 405504, 0xf6428e42 +0, 45, 45, 1, 405504, 0x8d303561 +0, 46, 46, 1, 405504, 0x135ed7d0 +0, 47, 47, 1, 405504, 0x0382f361 +0, 48, 48, 1, 405504, 0xddea2009 +0, 49, 49, 1, 405504, 0xd9b0262b diff --git a/tests/ref/fate/filter-tremolo b/tests/ref/fate/filter-tremolo deleted file mode 100644 index c6cff52c0e..0000000000 --- a/tests/ref/fate/filter-tremolo +++ /dev/null @@ -1,26 +0,0 @@ -#tb 0: 1/44100 -#media_type 0: audio -#codec_id 0: pcm_s16le -#sample_rate 0: 44100 -#channel_layout 0: 3 -#channel_layout_name 0: stereo -0, 0, 0, 1024, 4096, 0x5d3be907 -0, 1024, 1024, 1024, 4096, 0xea151fbe -0, 2048, 2048, 1024, 4096, 0xa5bc19f4 -0, 3072, 3072, 1024, 4096, 0x8706ec6d -0, 4096, 4096, 1024, 4096, 0x334ff275 -0, 5120, 5120, 1024, 4096, 0xcd0ff7ad -0, 6144, 6144, 1024, 4096, 0x29a1e9c9 -0, 7168, 7168, 1024, 4096, 0x1d41e77f -0, 8192, 8192, 1024, 4096, 0x99e7fe07 -0, 9216, 9216, 1024, 4096, 0x4bbf09ce -0, 10240, 10240, 1024, 4096, 0x94600236 -0, 11264, 11264, 1024, 4096, 0xc8af0c9e -0, 12288, 12288, 1024, 4096, 0x70eef88f -0, 13312, 13312, 1024, 4096, 0xb222ec47 -0, 14336, 14336, 1024, 4096, 0x1071ee27 -0, 15360, 15360, 1024, 4096, 0x7c390bd2 -0, 16384, 16384, 1024, 4096, 0x68bdf655 -0, 17408, 17408, 1024, 4096, 0x810cfacb -0, 18432, 18432, 1024, 4096, 0x9639e41f -0, 19456, 19456, 1024, 4096, 0xa30be70f diff --git a/tests/ref/fate/filter-vectorscope_color b/tests/ref/fate/filter-vectorscope_color index 57875ab418..81d97d4fae 100644 --- a/tests/ref/fate/filter-vectorscope_color +++ b/tests/ref/fate/filter-vectorscope_color @@ -3,6 +3,6 @@ #codec_id 0: rawvideo #dimensions 0: 256x256 #sar 0: 1/1 -0, 0, 0, 1, 196608, 0xf6e3aa30 -0, 1, 1, 1, 196608, 0x5584acf9 -0, 2, 2, 1, 196608, 0xa862775d +0, 0, 0, 1, 196608, 0x7c431d1f +0, 1, 1, 1, 196608, 0xb7e82028 +0, 2, 2, 1, 196608, 0x2feeeb61 diff --git a/tests/ref/fate/filter-vectorscope_color2 b/tests/ref/fate/filter-vectorscope_color2 index 3b2ad90b9c..adbe9e65bc 100644 --- a/tests/ref/fate/filter-vectorscope_color2 +++ b/tests/ref/fate/filter-vectorscope_color2 @@ -3,6 +3,6 @@ #codec_id 0: rawvideo #dimensions 0: 256x256 #sar 0: 1/1 -0, 0, 0, 1, 196608, 0x5e62fae5 -0, 1, 1, 1, 196608, 0x4c27fcbf -0, 2, 2, 1, 196608, 0xb7531088 +0, 0, 0, 1, 196608, 0xdad38823 +0, 1, 1, 1, 196608, 0xeb8589bd +0, 2, 2, 1, 196608, 0x31a79c93 diff --git a/tests/ref/fate/filter-vectorscope_color3 b/tests/ref/fate/filter-vectorscope_color3 index 4baecca921..2b6a6b08fb 100644 --- a/tests/ref/fate/filter-vectorscope_color3 +++ b/tests/ref/fate/filter-vectorscope_color3 @@ -3,6 +3,6 @@ #codec_id 0: rawvideo #dimensions 0: 256x256 #sar 0: 1/1 -0, 0, 0, 1, 196608, 0x83df8770 -0, 1, 1, 1, 196608, 0xa6a674a7 -0, 2, 2, 1, 196608, 0x11757143 +0, 0, 0, 1, 196608, 0x005f14ae +0, 1, 1, 1, 196608, 0x461301a5 +0, 2, 2, 1, 196608, 0x8bbafd4e diff --git a/tests/ref/fate/filter-vectorscope_color4 b/tests/ref/fate/filter-vectorscope_color4 index 21d6762ff2..fcc21b7857 100644 --- a/tests/ref/fate/filter-vectorscope_color4 +++ b/tests/ref/fate/filter-vectorscope_color4 @@ -3,6 +3,6 @@ #codec_id 0: rawvideo #dimensions 0: 256x256 #sar 0: 1/1 -0, 0, 0, 1, 196608, 0x326953c4 -0, 1, 1, 1, 196608, 0x870e1dcc -0, 2, 2, 1, 196608, 0x87cb8800 +0, 0, 0, 1, 196608, 0xaedae0f3 +0, 1, 1, 1, 196608, 0x267baabb +0, 2, 2, 1, 196608, 0x021f141a diff --git a/tests/ref/fate/filter-vectorscope_gray b/tests/ref/fate/filter-vectorscope_gray index a81fbf8f87..78fb1d70c3 100644 --- a/tests/ref/fate/filter-vectorscope_gray +++ b/tests/ref/fate/filter-vectorscope_gray @@ -3,6 +3,6 @@ #codec_id 0: rawvideo #dimensions 0: 256x256 #sar 0: 1/1 -0, 0, 0, 1, 196608, 0x79ba71e2 -0, 1, 1, 1, 196608, 0x909271e2 -0, 2, 2, 1, 196608, 0x143971e2 +0, 0, 0, 1, 196608, 0xf62bff11 +0, 1, 1, 1, 196608, 0x2ffffed1 +0, 2, 2, 1, 196608, 0x8e7efded diff --git a/tests/ref/fate/filter-vectorscope_xy b/tests/ref/fate/filter-vectorscope_xy index 83b719468c..6ab64e1f19 100644 --- a/tests/ref/fate/filter-vectorscope_xy +++ b/tests/ref/fate/filter-vectorscope_xy @@ -3,6 +3,6 @@ #codec_id 0: rawvideo #dimensions 0: 256x256 #sar 0: 1/1 -0, 0, 0, 1, 196608, 0xa2899af1 -0, 1, 1, 1, 196608, 0x26409af1 -0, 2, 2, 1, 196608, 0xf5209af1 +0, 0, 0, 1, 196608, 0xd2bfcc40 +0, 1, 1, 1, 196608, 0x2851cb74 +0, 2, 2, 1, 196608, 0x48efcc64 diff --git a/tests/ref/fate/filter-waveform_uv b/tests/ref/fate/filter-waveform_uv index 8cb3bc81f8..3636820481 100644 --- a/tests/ref/fate/filter-waveform_uv +++ b/tests/ref/fate/filter-waveform_uv @@ -3,53 +3,53 @@ #codec_id 0: rawvideo #dimensions 0: 352x512 #sar 0: 1/1 -0, 0, 0, 1, 540672, 0x8a2521d6 -0, 1, 1, 1, 540672, 0xb9a321d6 -0, 2, 2, 1, 540672, 0x325421d6 -0, 3, 3, 1, 540672, 0xafee21d2 -0, 4, 4, 1, 540672, 0x172121d6 -0, 5, 5, 1, 540672, 0x24d121d6 -0, 6, 6, 1, 540672, 0x7fec21d6 -0, 7, 7, 1, 540672, 0xa8a021d6 -0, 8, 8, 1, 540672, 0x29fd21d6 -0, 9, 9, 1, 540672, 0x6dfe21d6 -0, 10, 10, 1, 540672, 0xe39821d6 -0, 11, 11, 1, 540672, 0x83f521d6 -0, 12, 12, 1, 540672, 0x57aa21d6 -0, 13, 13, 1, 540672, 0x67b221d6 -0, 14, 14, 1, 540672, 0x535821d6 -0, 15, 15, 1, 540672, 0xb8ac21d6 -0, 16, 16, 1, 540672, 0x27f621d6 -0, 17, 17, 1, 540672, 0x775221d6 -0, 18, 18, 1, 540672, 0x8e6621d6 -0, 19, 19, 1, 540672, 0x74c921d6 -0, 20, 20, 1, 540672, 0x04cd21d6 -0, 21, 21, 1, 540672, 0xccd421d6 -0, 22, 22, 1, 540672, 0x317221d6 -0, 23, 23, 1, 540672, 0xd79321d6 -0, 24, 24, 1, 540672, 0xa2ac21d6 -0, 25, 25, 1, 540672, 0x7f0a21d6 -0, 26, 26, 1, 540672, 0x483521d6 -0, 27, 27, 1, 540672, 0xb65721d6 -0, 28, 28, 1, 540672, 0xb77021d6 -0, 29, 29, 1, 540672, 0x9fd521d6 -0, 30, 30, 1, 540672, 0xb72121d6 -0, 31, 31, 1, 540672, 0x540221d6 -0, 32, 32, 1, 540672, 0xa34121d6 -0, 33, 33, 1, 540672, 0xe01421d6 -0, 34, 34, 1, 540672, 0x6fc721d6 -0, 35, 35, 1, 540672, 0x7fa621d6 -0, 36, 36, 1, 540672, 0xc48c21d6 -0, 37, 37, 1, 540672, 0x40f021d6 -0, 38, 38, 1, 540672, 0xdf3f21d6 -0, 39, 39, 1, 540672, 0xb04321d6 -0, 40, 40, 1, 540672, 0x222821d6 -0, 41, 41, 1, 540672, 0x2a5521d6 -0, 42, 42, 1, 540672, 0x6a4621be -0, 43, 43, 1, 540672, 0xed7f21d6 -0, 44, 44, 1, 540672, 0xb16521d6 -0, 45, 45, 1, 540672, 0x9f5621d6 -0, 46, 46, 1, 540672, 0x204321d6 -0, 47, 47, 1, 540672, 0xc26e21d6 -0, 48, 48, 1, 540672, 0x3e8321d6 -0, 49, 49, 1, 540672, 0xaaee21d6 +0, 0, 0, 1, 540672, 0xe33821d6 +0, 1, 1, 1, 540672, 0x12c521d6 +0, 2, 2, 1, 540672, 0x8b6721d6 +0, 3, 3, 1, 540672, 0x6fd321d6 +0, 4, 4, 1, 540672, 0x703421d6 +0, 5, 5, 1, 540672, 0x7de421d6 +0, 6, 6, 1, 540672, 0xd8ff21d6 +0, 7, 7, 1, 540672, 0x01c221d6 +0, 8, 8, 1, 540672, 0x831021d6 +0, 9, 9, 1, 540672, 0xc71121d6 +0, 10, 10, 1, 540672, 0x3cba21d6 +0, 11, 11, 1, 540672, 0xdd0821d6 +0, 12, 12, 1, 540672, 0xb0bd21d6 +0, 13, 13, 1, 540672, 0xc0c521d6 +0, 14, 14, 1, 540672, 0xac6b21d6 +0, 15, 15, 1, 540672, 0x11ce21d6 +0, 16, 16, 1, 540672, 0x810921d6 +0, 17, 17, 1, 540672, 0xd06521d6 +0, 18, 18, 1, 540672, 0xe77921d6 +0, 19, 19, 1, 540672, 0xcddc21d6 +0, 20, 20, 1, 540672, 0x5de021d6 +0, 21, 21, 1, 540672, 0x25f621d6 +0, 22, 22, 1, 540672, 0x8a8521d6 +0, 23, 23, 1, 540672, 0x30b521d6 +0, 24, 24, 1, 540672, 0xfbbf21d6 +0, 25, 25, 1, 540672, 0xd81d21d6 +0, 26, 26, 1, 540672, 0xa14821d6 +0, 27, 27, 1, 540672, 0x0f7921d6 +0, 28, 28, 1, 540672, 0x109221d6 +0, 29, 29, 1, 540672, 0xf8e821d6 +0, 30, 30, 1, 540672, 0x104321d6 +0, 31, 31, 1, 540672, 0xad1521d6 +0, 32, 32, 1, 540672, 0xfc5421d6 +0, 33, 33, 1, 540672, 0x393621d6 +0, 34, 34, 1, 540672, 0xc8da21d6 +0, 35, 35, 1, 540672, 0xd8b921d6 +0, 36, 36, 1, 540672, 0x1dae21d6 +0, 37, 37, 1, 540672, 0x9a0321d6 +0, 38, 38, 1, 540672, 0x386121d6 +0, 39, 39, 1, 540672, 0x096521d6 +0, 40, 40, 1, 540672, 0x7b3b21d6 +0, 41, 41, 1, 540672, 0x836821d6 +0, 42, 42, 1, 540672, 0x97bd21d6 +0, 43, 43, 1, 540672, 0x46a121d6 +0, 44, 44, 1, 540672, 0x0a8721d6 +0, 45, 45, 1, 540672, 0xf86921d6 +0, 46, 46, 1, 540672, 0x795621d6 +0, 47, 47, 1, 540672, 0x1b9021d6 +0, 48, 48, 1, 540672, 0x979621d6 +0, 49, 49, 1, 540672, 0x041021d6 diff --git a/tests/ref/fate/hevc-cabac-tudepth b/tests/ref/fate/hevc-cabac-tudepth new file mode 100644 index 0000000000..ad874c3dde --- /dev/null +++ b/tests/ref/fate/hevc-cabac-tudepth @@ -0,0 +1,6 @@ +#tb 0: 1/25 +#media_type 0: video +#codec_id 0: rawvideo +#dimensions 0: 64x64 +#sar 0: 0/1 +0, 0, 0, 1, 12288, 0x0127a0d9 diff --git a/tests/ref/fate/matroska-lzo-decompression b/tests/ref/fate/matroska-lzo-decompression new file mode 100644 index 0000000000..241d5adf44 --- /dev/null +++ b/tests/ref/fate/matroska-lzo-decompression @@ -0,0 +1,10 @@ +#tb 0: 11337/500000000 +#media_type 0: audio +#codec_id 0: pcm_s16le +#sample_rate 0: 44100 +#channel_layout 0: 3 +#channel_layout_name 0: stereo +0, 0, 0, 4096, 16384, 0x00000000 +0, 4096, 4096, 4096, 16384, 0xad7eebf4 +0, 8192, 8192, 4096, 16384, 0x1d1ff9d9 +0, 12288, 12288, 4097, 16384, 0xf1d9e2e2 diff --git a/tests/ref/fate/matroska-prores-header-insertion-bz2 b/tests/ref/fate/matroska-prores-header-insertion-bz2 new file mode 100644 index 0000000000..63a59f9b8d --- /dev/null +++ b/tests/ref/fate/matroska-prores-header-insertion-bz2 @@ -0,0 +1,16 @@ +#extradata 0: 4, 0x0402019c +#extradata 1: 4, 0x0402019c +#tb 0: 1/1000 +#media_type 0: video +#codec_id 0: prores +#dimensions 0: 720x480 +#sar 0: 186/157 +#tb 1: 1/1000 +#media_type 1: video +#codec_id 1: prores +#dimensions 1: 720x480 +#sar 1: 186/157 +0, 0, 0, 0, 4304, 0x3625b1fc +1, 0, 0, 0, 4304, 0x3625b1fc +0, 42, 42, 0, 4304, 0x3625b1fc +1, 42, 42, 0, 4304, 0x3625b1fc diff --git a/tests/ref/fate/movenc b/tests/ref/fate/movenc index 5e8f324ea3..637a347e05 100644 --- a/tests/ref/fate/movenc +++ b/tests/ref/fate/movenc @@ -2,127 +2,127 @@ write_data len 36, time nopts, type header atom ftyp write_data len 2389, time nopts, type header atom - write_data len 788, time 1000000, type sync atom moof write_data len 110, time nopts, type trailer atom - -17a37691eba8b858cf15e60aa9a7dbf7 3323 non-empty-moov +66cf48604f039aa9a51711786f5c8778 3323 non-empty-moov write_data len 36, time nopts, type header atom ftyp write_data len 2721, time nopts, type header atom - write_data len 908, time 966667, type sync atom moof write_data len 110, time nopts, type trailer atom - -0026ffe059c06c592021f972bf2c5e79 3775 non-empty-moov-elst +04b2e86f455af94f9258b8d66dbf71f5 3775 non-empty-moov-elst write_data len 36, time nopts, type header atom ftyp write_data len 2629, time nopts, type header atom - write_data len 908, time 1000000, type sync atom moof write_data len 110, time nopts, type trailer atom - -c184e168ac1e5bb3d9c70e580ab6179c 3683 non-empty-moov-no-elst -write_data len 20, time nopts, type header atom ftyp +e9f6fa032d6d8265d67aef5de81a48bf 3683 non-empty-moov-no-elst +write_data len 24, time nopts, type header atom ftyp write_data len 1171, time nopts, type header atom - write_data len 728, time 0, type sync atom moof write_data len 828, time nopts, type unknown atom - -write_data len 728, time 1046439, type sync atom moof +write_data len 728, time 999999, type sync atom moof write_data len 812, time nopts, type unknown atom - write_data len 148, time nopts, type trailer atom - -49bf122c4c732a344ef68b58acd19be5 4435 ismv +da105e0b2c19079519c6eed7d5a1151c 4439 ismv write_data len 36, time nopts, type header atom ftyp write_data len 1123, time nopts, type header atom - write_data len 796, time 0, type sync atom moof write_data len 788, time 1000000, type sync atom moof write_data len 148, time nopts, type trailer atom - -ed8506ebfce4c41732205ae26a4759fd 2891 empty-moov +e6a4b15443d006efd727a80f6624b7db 2891 empty-moov write_data len 36, time nopts, type header atom ftyp write_data len 1123, time nopts, type header atom - write_data len 1068, time 0, type sync atom moof write_data len 908, time 1000000, type sync atom moof write_data len 148, time nopts, type trailer atom - -1844ee6d19fd1e6daf2655632cf26310 3283 empty-moov-no-elst +800f854aff2ac76dfaddebd0562c75b9 3283 empty-moov-no-elst write_data len 36, time nopts, type header atom ftyp write_data len 1123, time nopts, type header atom - write_data len 900, time -33333, type sync atom moof write_data len 908, time 966667, type sync atom moof write_data len 148, time nopts, type trailer atom - -139b27dbe2a80c2dc088d0c755f26033 3115 empty-moov-no-elst-no-adjust +eca1a945c9063dab0858af6b85925533 3115 empty-moov-no-elst-no-adjust write_data len 1159, time nopts, type header atom ftyp write_data len 796, time 0, type sync atom moof write_data len 788, time 1000000, type sync atom moof write_data len 148, time nopts, type trailer atom - -ed8506ebfce4c41732205ae26a4759fd 2891 delay-moov +e6a4b15443d006efd727a80f6624b7db 2891 delay-moov write_data len 1231, time nopts, type header atom ftyp write_data len 916, time -33333, type sync atom moof write_data len 908, time 966667, type sync atom moof write_data len 148, time nopts, type trailer atom - -3ece148745cd64b4428530a4d1080a2d 3203 delay-moov-elst +c2ecdbc80668fcee73f5a039e2dba579 3203 delay-moov-elst write_data len 1195, time nopts, type header atom ftyp write_data len 836, time 0, type sync atom moof write_data len 67, time nopts, type trailer atom - -9562946a369e6fb570fb2fd7aa2fe728 2098 delay-moov-empty-track +95d6f59a7354b0cfe7ce49927baada4e 2098 delay-moov-empty-track write_data len 1195, time nopts, type header atom ftyp write_data len 360, time 0, type sync atom moof write_data len 360, time 1000000, type sync atom moof write_data len 86, time nopts, type trailer atom - -4c7832b81836331c6c37155dc31d95be 2001 delay-moov-empty-track-flush +8805d72a27b340ea229c16edde78f974 2001 delay-moov-empty-track-flush write_data len 36, time nopts, type header atom ftyp write_data len 1123, time nopts, type header atom - -b7e3c768b9094ebe2fda44979a7f8985 1159 empty-moov-header +351ae2c8b6d35d98b4848c309cce6704 1159 empty-moov-header write_data len 796, time 0, type sync atom moof write_data len 788, time 1000000, type sync atom moof a0165f4a26a409212b0946e981bdefb9 1584 empty-moov-content write_data len 148, time nopts, type trailer atom - write_data len 1159, time nopts, type header atom ftyp -b7e3c768b9094ebe2fda44979a7f8985 1159 delay-moov-header +351ae2c8b6d35d98b4848c309cce6704 1159 delay-moov-header write_data len 796, time 0, type sync atom moof write_data len 788, time 1000000, type sync atom moof a0165f4a26a409212b0946e981bdefb9 1584 delay-moov-content write_data len 148, time nopts, type trailer atom - -write_data len 24, time nopts, type header atom - +write_data len 28, time nopts, type header atom - write_data len 1123, time nopts, type header atom - write_data len 884, time 0, type sync atom sidx write_data len 876, time 1000000, type sync atom sidx 272a474cfd2a68cc5f05b426b14a2b7d 876 empty-moov-second-frag write_data len 148, time nopts, type trailer atom - -write_data len 24, time nopts, type header atom - +write_data len 28, time nopts, type header atom - write_data len 1123, time nopts, type header atom - write_data len 876, time 1000000, type sync atom sidx 272a474cfd2a68cc5f05b426b14a2b7d 876 empty-moov-second-frag-discont write_data len 110, time nopts, type trailer atom - -write_data len 1219, time nopts, type header atom - +write_data len 1223, time nopts, type header atom - write_data len 876, time 1000000, type sync atom sidx 272a474cfd2a68cc5f05b426b14a2b7d 876 delay-moov-second-frag-discont write_data len 110, time nopts, type trailer atom - -write_data len 1219, time nopts, type header atom ftyp -6ec3698bcc86013e0016e3d47d230363 1219 delay-moov-elst-init +write_data len 1223, time nopts, type header atom ftyp +b3811928793ed0749927eb2f7958421c 1223 delay-moov-elst-init write_data len 988, time -33333, type sync atom sidx write_data len 996, time 966667, type sync atom sidx fcae8f40e015b59aabc8d4a99a759ca1 996 delay-moov-elst-second-frag write_data len 148, time nopts, type trailer atom - -write_data len 1219, time nopts, type header atom ftyp -6ec3698bcc86013e0016e3d47d230363 1219 delay-moov-elst-init-discont +write_data len 1223, time nopts, type header atom ftyp +b3811928793ed0749927eb2f7958421c 1223 delay-moov-elst-init-discont write_data len 996, time 966667, type sync atom sidx fcae8f40e015b59aabc8d4a99a759ca1 996 delay-moov-elst-second-frag-discont write_data len 110, time nopts, type trailer atom - -write_data len 1219, time nopts, type header atom ftyp -c3681590a292cb9ca19a5a982e530166 1219 delay-moov-elst-signal-init +write_data len 1223, time nopts, type header atom ftyp +041ac8efc35a0d023c26d05eedb20403 1223 delay-moov-elst-signal-init write_data len 1004, time -33333, type sync atom sidx write_data len 996, time 966667, type sync atom sidx aa5462cc0d2144f72154d9c309edb57d 996 delay-moov-elst-signal-second-frag write_data len 148, time nopts, type trailer atom - -write_data len 1219, time nopts, type header atom ftyp -c3681590a292cb9ca19a5a982e530166 1219 delay-moov-elst-signal-init-discont +write_data len 1223, time nopts, type header atom ftyp +041ac8efc35a0d023c26d05eedb20403 1223 delay-moov-elst-signal-init-discont write_data len 996, time 966667, type sync atom sidx aa5462cc0d2144f72154d9c309edb57d 996 delay-moov-elst-signal-second-frag-discont write_data len 110, time nopts, type trailer atom - -write_data len 1243, time nopts, type header atom ftyp -dac14c8795d5cbd91ae770c6e2880c62 1243 delay-moov-elst-signal-init-discont-largets +write_data len 1247, time nopts, type header atom ftyp +80511a51d1ac9cde62337eed7176ae03 1247 delay-moov-elst-signal-init-discont-largets write_data len 996, time 279621233333, type sync atom sidx 41cac4c3df656a87bb38363fdcd745e6 996 delay-moov-elst-signal-second-frag-discont-largets write_data len 110, time nopts, type trailer atom - -write_data len 1219, time nopts, type header atom ftyp +write_data len 1223, time nopts, type header atom ftyp write_data len 2572, time -333333, type sync atom sidx write_data len 996, time 5166667, type sync atom sidx write_data len 148, time nopts, type trailer atom - -f12d4a0e054abcc508cc0d28cb320e57 4935 vfr -write_data len 1219, time nopts, type header atom ftyp +c3eb39921c90724784d1ab84fac58b34 4939 vfr +write_data len 1223, time nopts, type header atom ftyp write_data len 2572, time -333333, type sync atom sidx write_data len 996, time 5166667, type sync atom sidx write_data len 148, time nopts, type trailer atom - -f12d4a0e054abcc508cc0d28cb320e57 4935 vfr-noduration +c3eb39921c90724784d1ab84fac58b34 4939 vfr-noduration write_data len 1231, time nopts, type header atom ftyp write_data len 1500, time -333333, type sync atom moof write_data len 1500, time nopts, type unknown atom - @@ -131,7 +131,7 @@ write_data len 1500, time 9666667, type sync atom moof write_data len 1500, time nopts, type unknown atom - write_data len 1004, time nopts, type unknown atom - write_data len 148, time nopts, type trailer atom - -3c2c3f98c8a047f0ecefff07570fd457 9299 large_frag +5bde1358e246e715b2096daa321c9f1b 9299 large_frag write_data len 1231, time nopts, type header atom ftyp write_data len 684, time -33333, type sync atom moof write_data len 504, time 800000, type boundary atom moof @@ -139,15 +139,15 @@ write_data len 420, time 1266667, type boundary atom moof write_data len 668, time 1566667, type sync atom moof write_data len 440, time 2233333, type boundary atom moof write_data len 262, time nopts, type trailer atom - -edd19deae2b70afcf2cd744b89b7013d 4209 vfr-noduration-interleave +47cc2460c4b18390c67991cf3251409b 4209 vfr-noduration-interleave write_data len 1231, time nopts, type header atom ftyp write_data len 916, time 0, type sync atom moof write_data len 908, time 1000000, type sync atom moof write_data len 148, time nopts, type trailer atom - -781dbfd228f36903178e29faa727d78b 3203 delay-moov-elst-neg-cts +c200a345c365dd35a31e7e62a9ae6c10 3203 delay-moov-elst-neg-cts write_data len 36, time nopts, type header atom ftyp write_data len 1123, time nopts, type header atom - -write_data len 1188, time 0, type sync atom moof -write_data len 908, time 1033333, type sync atom moof +write_data len 900, time 0, type sync atom moof +write_data len 908, time 1000000, type sync atom moof write_data len 148, time nopts, type trailer atom - -7630fdf358e02c79e88f312f82a260b7 3403 empty-moov-neg-cts +868bb53d861d81b1c15ef4d59afc83b5 3115 empty-moov-neg-cts diff --git a/tests/ref/fate/opt b/tests/ref/fate/opt index 71f56ce51e..46ea0652f8 100644 --- a/tests/ref/fate/opt +++ b/tests/ref/fate/opt @@ -43,6 +43,8 @@ TestContext AVOptions: -bool1 E......... set boolean value (default auto) -bool2 E......... set boolean value (default true) -bool3 E......... set boolean value (default false) + -dict1 E......... set dictionary value + -dict2 E......... set dictionary value (default "happy=':-)'") Testing av_opt_is_set_to_default() name: num default:1 error: @@ -70,6 +72,8 @@ name: dbl default:0 error: name: bool1 default:0 error: name: bool2 default:0 error: name: bool3 default:1 error: +name: dict1 default:1 error: +name: dict2 default:0 error: name: num default:1 error: name: toggle default:1 error: name: rational default:1 error: @@ -95,9 +99,37 @@ name: dbl default:1 error: name: bool1 default:1 error: name: bool2 default:1 error: name: bool3 default:1 error: +name: dict1 default:1 error: +name: dict2 default:1 error: + +Testing av_opt_get/av_opt_set() +name: num get: 0 set: OK get: 0 OK +name: toggle get: 1 set: OK get: 1 OK +name: rational get: 1/1 set: OK get: 1/1 OK +name: string get: default set: OK get: default OK +name: escape get: \=, set: OK get: \=, OK +name: flags get: 0x00000001 set: OK get: 0x00000001 OK +name: size get: 200x300 set: OK get: 200x300 OK +name: pix_fmt get: 0bgr set: OK get: 0bgr OK +name: sample_fmt get: s16 set: OK get: s16 OK +name: video_rate get: 25/1 set: OK get: 25/1 OK +name: duration get: 0.001 set: OK get: 0.001 OK +name: color get: 0xffc0cbff set: OK get: 0xffc0cbff OK +name: cl get: 0x137 set: OK get: 0x137 OK +name: bin get: 62696E00 set: OK get: 62696E00 OK +name: bin1 get: set: OK get: OK +name: bin2 get: set: OK get: OK +name: num64 get: 1 set: OK get: 1 OK +name: flt get: 0.333333 set: OK get: 0.333333 OK +name: dbl get: 0.333333 set: OK get: 0.333333 OK +name: bool1 get: auto set: OK get: auto OK +name: bool2 get: true set: OK get: true OK +name: bool3 get: false set: OK get: false OK +name: dict1 get: set: OK get: OK +name: dict2 get: happy=\:-) set: OK get: happy=\:-) OK Test av_opt_serialize() -num=0,toggle=1,rational=1/1,string=default,escape=\\\=\,,flags=0x00000001,size=200x300,pix_fmt=0bgr,sample_fmt=s16,video_rate=25/1,duration=0.001,color=0xffc0cbff,cl=0x137,bin=62696E00,bin1=,bin2=,num64=1,flt=0.333333,dbl=0.333333,bool1=auto,bool2=true,bool3=false +num=0,toggle=1,rational=1/1,string=default,escape=\\\=\,,flags=0x00000001,size=200x300,pix_fmt=0bgr,sample_fmt=s16,video_rate=25/1,duration=0.001,color=0xffc0cbff,cl=0x137,bin=62696E00,bin1=,bin2=,num64=1,flt=0.333333,dbl=0.333333,bool1=auto,bool2=true,bool3=false,dict1=,dict2=happy\=\\:-) Setting entry with key 'num' to value '0' Setting entry with key 'toggle' to value '1' Setting entry with key 'rational' to value '1/1' @@ -120,7 +152,9 @@ Setting entry with key 'dbl' to value '0.333333' Setting entry with key 'bool1' to value 'auto' Setting entry with key 'bool2' to value 'true' Setting entry with key 'bool3' to value 'false' -num=0,toggle=1,rational=1/1,string=default,escape=\\\=\,,flags=0x00000001,size=200x300,pix_fmt=0bgr,sample_fmt=s16,video_rate=25/1,duration=0.001,color=0xffc0cbff,cl=0x137,bin=62696E00,bin1=,bin2=,num64=1,flt=0.333333,dbl=0.333333,bool1=auto,bool2=true,bool3=false +Setting entry with key 'dict1' to value '' +Setting entry with key 'dict2' to value 'happy=\:-)' +num=0,toggle=1,rational=1/1,string=default,escape=\\\=\,,flags=0x00000001,size=200x300,pix_fmt=0bgr,sample_fmt=s16,video_rate=25/1,duration=0.001,color=0xffc0cbff,cl=0x137,bin=62696E00,bin1=,bin2=,num64=1,flt=0.333333,dbl=0.333333,bool1=auto,bool2=true,bool3=false,dict1=,dict2=happy\=\\:-) Testing av_set_options_string() Setting options string '' @@ -341,6 +375,9 @@ OK 'bool1=true' Setting options string 'bool2=auto' Setting entry with key 'bool2' to value 'auto' OK 'bool2=auto' +Setting options string 'dict1='happy=\:-):sad=\:-('' +Setting entry with key 'dict1' to value 'happy=\:-):sad=\:-(' +OK 'dict1='happy=\:-):sad=\:-('' Testing av_opt_set_from_string() Setting options string '' diff --git a/tests/ref/fate/sub-microdvd-remux b/tests/ref/fate/sub-microdvd-remux index a71da99031..92ff233f56 100644 Binary files a/tests/ref/fate/sub-microdvd-remux and b/tests/ref/fate/sub-microdvd-remux differ diff --git a/tests/ref/lavf/ismv b/tests/ref/lavf/ismv index 865a6913cf..e7361705fa 100644 --- a/tests/ref/lavf/ismv +++ b/tests/ref/lavf/ismv @@ -1,9 +1,9 @@ -bd88b50defa57766619c092ea89f25a6 *tests/data/lavf/lavf.ismv -313165 tests/data/lavf/lavf.ismv +4c6bc5ac805a76bbbd886a69d2e61554 *tests/data/lavf/lavf.ismv +313169 tests/data/lavf/lavf.ismv tests/data/lavf/lavf.ismv CRC=0x9d9a638a -805a2557bf952c84835f3c10b6893e15 *tests/data/lavf/lavf.ismv -322071 tests/data/lavf/lavf.ismv +18678627921460328ea3fed238d0d57d *tests/data/lavf/lavf.ismv +322075 tests/data/lavf/lavf.ismv tests/data/lavf/lavf.ismv CRC=0xe8130120 -96053075a3f60d271131fe2d0765c267 *tests/data/lavf/lavf.ismv -312542 tests/data/lavf/lavf.ismv +b9a858caf55b1eff2273e746e9f72dc4 *tests/data/lavf/lavf.ismv +312546 tests/data/lavf/lavf.ismv tests/data/lavf/lavf.ismv CRC=0x9d9a638a diff --git a/tools/python/convert_from_tensorflow.py b/tools/python/convert_from_tensorflow.py index 605158a32e..5e87e227ea 100644 --- a/tools/python/convert_from_tensorflow.py +++ b/tools/python/convert_from_tensorflow.py @@ -193,7 +193,10 @@ class TFConverter: filter_width = ktensor.tensor_shape.dim[1].size in_channels = ktensor.tensor_shape.dim[2].size out_channels = ktensor.tensor_shape.dim[3].size - kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32) + if filter_height * filter_width * in_channels * out_channels == 1: + kernel = np.float32(ktensor.float_val[0]) + else: + kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32) kernel = kernel.reshape(filter_height, filter_width, in_channels, out_channels) kernel = np.transpose(kernel, [3, 0, 1, 2]) diff --git a/tools/target_dec_fuzzer.c b/tools/target_dec_fuzzer.c index cf66e2c449..be00d1f2ed 100644 --- a/tools/target_dec_fuzzer.c +++ b/tools/target_dec_fuzzer.c @@ -140,10 +140,12 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { case AV_CODEC_ID_BINKVIDEO: maxpixels /= 32; break; case AV_CODEC_ID_CFHD: maxpixels /= 128; break; case AV_CODEC_ID_DIRAC: maxpixels /= 8192; break; + case AV_CODEC_ID_DST: maxsamples /= 8192; break; case AV_CODEC_ID_DXV: maxpixels /= 32; break; case AV_CODEC_ID_FFWAVESYNTH: maxsamples /= 16384; break; case AV_CODEC_ID_MSRLE: maxpixels /= 16; break; case AV_CODEC_ID_QTRLE: maxpixels /= 16; break; + case AV_CODEC_ID_RASC: maxpixels /= 16; break; case AV_CODEC_ID_SANM: maxpixels /= 16; break; case AV_CODEC_ID_G2M: maxpixels /= 64; break; case AV_CODEC_ID_GIF: maxpixels /= 16; break; @@ -205,9 +207,9 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { extradata_size = bytestream2_get_le32(&gbc); - ctx->sample_rate = bytestream2_get_le32(&gbc); + ctx->sample_rate = bytestream2_get_le32(&gbc) & 0x7FFFFFFF; ctx->channels = (unsigned)bytestream2_get_le32(&gbc) % FF_SANE_NB_CHANNELS; - ctx->block_align = bytestream2_get_le32(&gbc); + ctx->block_align = bytestream2_get_le32(&gbc) & 0x7FFFFFFF; ctx->codec_tag = bytestream2_get_le32(&gbc); keyframes = bytestream2_get_le64(&gbc); ctx->request_channel_layout = bytestream2_get_le64(&gbc);