Compare commits

...

293 Commits

Author SHA1 Message Date
MrBesen 438b0eae71
Merge branch 'master' of https://git.ffmpeg.org/ffmpeg 2019-12-11 10:30:27 +01:00
Steven Liu 4110029e56 avformat/cache: rename the class name fro Cache to cache
liuqideMacBook-Pro:build liuqi$ ffmpeg --help full | grep cache
cannot find cache protocol options.

after patch:
bogon:dash liuqi$ ./ffmpeg --help full | grep cache
cache AVOptions:
can find the cache AVOptions after patch.

Signed-off-by: Steven Liu <lq@chinaffmpeg.org>
2019-12-11 15:22:24 +08:00
Steven Liu ed89763336 avformat/hlsenc: remove duplicate code block
Signed-off-by: Steven Liu <lq@chinaffmpeg.org>
2019-12-11 15:19:19 +08:00
Jun Zhao 4eae85a82f lavf/tls_openssl: support both pre-1.1.0 and post-1.1.0 init
supporting both pre-1.1.0 and post-1.1.0 version of the OpenSSL
library as the link:
https://wiki.openssl.org/index.php/Library_Initialization

Signed-off-by: Jun Zhao <barryjzhao@tencent.com>
2019-12-11 14:48:18 +08:00
macweng e3c732bb0c configure: add OPENSSL_init_ssl check if pkg-config fail
fix when pkg-config fail and openssl > 1.1.0 --enable-openssl fail,
the root cause is check_lib can't found the SSL_library_init().

Reviewed-by: James Almer <jamrial@gmail.com>
Signed-off-by: macweng <macweng@tencent.com>
2019-12-11 14:46:53 +08:00
Jun Zhao 952fd0c768 lavf/libsrt: enable other encryption parameters
Enable the SRTO_ENFORCEDENCRYPTION/SRTO_KMREFRESHRATE/
SRTO_KMPREANNOUNCE for srt encryption control.

Signed-off-by: Jun Zhao <barryjzhao@tencent.com>
2019-12-11 14:23:26 +08:00
Jun Zhao 8d823e6005 lavf/libsrt: add linger parameter to libsrt
add linger parameter to libsrt, it's setting the number of seconds
that the socket waits for unsent data when closing.

Reviewed-by: Andriy Gelman <andriy.gelman@gmail.com>
Signed-off-by: Jun Zhao <barryjzhao@tencent.com>
2019-12-11 14:23:26 +08:00
Fei Wang 1ea44178f5 avcodec/cbs_av1: avoid reading trailing bits when obu type is OBU_TILE_LIST
Signed-off-by: Fei Wang <fei.w.wang@intel.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-10 23:39:16 -03:00
Michael Niedermayer 7e665e4a81 avformat/rmdec: Initialize and sanity check offset in ivr_read_header()
Fixes: signed integer overflow: -9223372036854775808 - 17 cannot be represented in type 'long'
Fixes: 18768/clusterfuzz-testcase-minimized-ffmpeg_DEMUXER_fuzzer-5674385247830016

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-10 16:09:14 +01:00
Limin Wang 8558c231fb swscale/swscale_unscaled: add AV_PIX_FMT_GBRAP10 for LE and BE conversion wrapper
Signed-off-by: Limin Wang <lance.lmwang@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-10 16:09:14 +01:00
Michael Niedermayer 7a1b30c871 avcodec/agm: Do not allow MVs out of the picture area as no edge is allocated
Fixes: out of array access
Fixes: 18499/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_AGM_fuzzer-5749038406434816

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-10 16:09:14 +01:00
Michael Niedermayer a2d6b2042e avcodec/wmalosslessdec: Set FF_CODEC_CAP_INIT_CLEANUP
Fixes: memleaks
Fixes: 18429/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_WMALOSSLESS_fuzzer-6210814364614656
Fixes: 18722/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_WMALOSSLESS_fuzzer-5680535690543104

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-10 16:09:14 +01:00
Michael Niedermayer 6e15ba2d1f avcodec/apedec: Fix 2 integer overflows
Fixes: signed integer overflow: 2119056926 - -134217728 cannot be represented in type 'int'
Fixes: 18728/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_APE_fuzzer-5747539563511808

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-10 16:09:14 +01:00
Michael Niedermayer a9cbd25d89 avcodec/wmaprodec: Set packet_loss when we error out on a sanity check
Fixes: left shift of negative value -34
Fixes: 18719/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_WMAPRO_fuzzer-5642658173419520

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-10 16:09:14 +01:00
Michael Niedermayer 5473c7825e avcodec/wmaprodec: Check offset
Fixes: index 33280 out of bounds for type 'float [32768]'
Fixes: 18718/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_XMA2_fuzzer-5635373899710464

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-10 16:09:14 +01:00
Michael Niedermayer 93d52a181e avcodec/truemotion2: Fix 2 integer overflows in tm2_low_res_block()
Fixes: signed integer overflow: 1778647621 + 574372924 cannot be represented in type 'int'
Fixes: 18692/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_TRUEMOTION2_fuzzer-6248679635943424

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-10 16:09:14 +01:00
Michael Niedermayer 090ac57997 avcodec/wmaprodec: Check if the channel sum of all internal contexts match the external
Fixes: NULL pointer dereference
Fixes: 18689/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_XMA1_fuzzer-5715114640015360

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-10 16:09:14 +01:00
Michael Niedermayer 8bcb5fbab5 avcodec/truespeech: Fix an integer overflow in truespeech_synth()
Fixes: signed integer overflow: 2147483188 + 2048 cannot be represented in type 'int'
Fixes: 18741/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_TRUESPEECH_fuzzer-5748950460268544

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-10 16:09:14 +01:00
Michael Niedermayer a0ae4b7df9 Remove redundant ;
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-10 16:09:14 +01:00
Andreas Rheinhardt 67d4940a77 avformat/id3v2: Fix double-free on error
ff_id3v2_parse_priv_dict() uses av_dict_set() with the flags
AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL. In this case both
key and value are freed on error (and owned by the destination
dictionary on success), so that freeing them again on error is a
double-free and therefore forbidden. But it nevertheless happened.

Fixes CID 1452489 and 1452421.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-10 16:09:14 +01:00
Gyan Doshi e73688eff4 avfilter: rename scale.c,h to scale_eval
scale.c is too generic; scale_eval is more representative
2019-12-10 12:55:48 +05:30
Andreas Rheinhardt 9f7b2b37e3 fate/matroska: Add a test for propagating flac channel layouts
contained in Vorbis comments in the CodecPrivate of flac tracks.
Moreover, it also tests header removal compression.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-08 18:20:53 -03:00
Michael Niedermayer e3dddf2142 tools/target_dec_fuzzer: Also fuzz request_channel_layout
This should improve coverage

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-08 13:08:23 +01:00
Gyan Doshi 1b4f473d18 avfilter/scale.c: factorize ff_scale_eval_dimensions
Adjustment of evaluated values shifted to ff_adjust_scale_dimensions
Shifted code for force_original_aspect_ratio and force_divisble_by from
vf_scale so it is now available for scale_cuda, scale_npp and
scale_vaapi as well.
2019-12-08 16:12:31 +05:30
Andreas Rheinhardt ff2b75d94c avformat/matroskadec: Add a fate test for CodecPrivate compression
This test contains a track with zlib compressed CodecPrivate in addition
to compressed frames; the former was unchecked before.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-07 23:11:55 -03:00
Michael Niedermayer e1d836d237 avcodec/atrac9dec: Check q_unit_cnt more completely before using it to access at9_tab_band_ext_group
Fixes: index 8 out of bounds for type 'const uint8_t [8][3]'
Fixes: 19127/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_ATRAC9_fuzzer-5709394985091072

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Reviewed-by: Lynne <dev@lynne.ee>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-07 19:34:57 +01:00
hwrenx b7583230ae MAINTAINERS: add myself as libxavs2 maintainer
Signed-off-by: hwrenx <hwrenx@126.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-07 19:34:57 +01:00
James Almer ade7f4c60c fate/matroska: fix dependencies for fate-matroska-prores-zlib test
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-07 13:30:18 -03:00
James Almer 70efa31ba1 fate/matroska: add a demux test for ProRes using zlib compression
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-07 12:36:21 -03:00
Andreas Rheinhardt af50f0a515 avformat/matroskadec: Fix use-after-free when demuxing ProRes
ProRes in Matroska is supposed to not contain the first atom header
(containing a size field and the tag "icpf") and therefore the Matroska
demuxer has to recreate it; this involves an allocation and copy, of
course. Whether the old buffer (containing the data without the atom
header) needs to be freed or not depends upon whether it is what was
directly read (in which case it is owned by an AVBuffer) or whether it
has been allocated when reversing the track's content compression (e.g.
zlib compression) that Matroska supports.

So there are three pointers involved: The one pointing to the directly
read data (owned by the AVBuffer), the one pointing to the currently
valid data (which coincides with the former if no content compression
needed to be reverted) and the one pointing to the new data with the
first atom header. The check for whether to free the second of these is
simply whether the first two are different.

This works mostly, but there is a complication: Some muxers don't strip
the first atom header away and in this case, it is also not reinserted
and no new buffer is allocated; instead, the second and the third
pointers agree. In this case, one must never free the second buffer.
Yet it is currently done if the track is e.g. zlib compressed.
This commit fixes this.

This is a regression since b8e75a2a.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-07 12:36:21 -03:00
Jun Zhao 70e292becf lavf/rtmpproto: Don't unref uninitialized buffers
This happens if ffurl_open_whitelist fails and stream is unset.

Signed-off-by: Jun Zhao <barryjzhao@tencent.com>
2019-12-07 14:09:12 +08:00
Jun Zhao 46d2a67f80 lavfi/avf_showspectrum: Fix the memory leak in error handle path
Fix the memory leak in error handle path.

Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Jun Zhao <barryjzhao@tencent.com>
2019-12-07 14:08:08 +08:00
Andriy Gelman 02a83e26de lavc/cbs_h2645: Fix incorrect max size of nalu unit
In the worst case the startcode prefix has 4 bytes.

This fixes a trigerred assertion:
Assertion dp <= max_size failed at libavcodec/cbs_h2645.c:1451

Found-by:libFuzzer
Reviewed-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Andriy Gelman <andriy.gelman@gmail.com>
2019-12-06 15:05:56 -03:00
Limin Wang 79d907774d avformat/libsrt: change tlpktdrop, nakreport, messageapi options to boolean type
Signed-off-by: Limin Wang <lance.lmwang@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-06 11:27:25 +01:00
Michael Niedermayer e7011a0ca6 avcodec/mvha: Check remaining space when reading VLC table probabilities
Fixes: Infinite loop
Fixes: 19183/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_MVHA_fuzzer-5666216765292544

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-06 11:25:29 +01:00
Limin Wang 5ee4c12ec2 avfilter/vsrc_life: Fix for random_seed type
Signed-off-by: Limin Wang <lance.lmwang@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-06 11:25:29 +01:00
Limin Wang 1d757b111a avfilter/vsrc_cellauto: Fix for random_seed type
Signed-off-by: Limin Wang <lance.lmwang@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-06 11:25:29 +01:00
leozhang 0c7f9f714d avfilter/vf_yaepblur: add yaepblur filter
Signed-off-by: leozhang <leozhang@qiyi.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-06 11:25:29 +01:00
Ting Fu 039a0ebe6f libswscale/swscale_unscaled.c: remove redundant code
Signed-off-by: Ting Fu <ting.fu@intel.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-06 11:25:29 +01:00
Limin Wang f9d6addd60 avformat/libsrt: fix for the memory leak if passphrase has been configured by option
Signed-off-by: Limin Wang <lance.lmwang@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-06 11:25:29 +01:00
Marton Balint 648b8cca6c avdevice/xcbgrab: wrap non-shm image replies in a buffer ref
This avoids a memcpy improving performance if SHM is not used.

Signed-off-by: Marton Balint <cus@passwd.hu>
2019-12-06 10:09:49 +01:00
Gyan Doshi b66a800877 avfilter/crop: avoid premature eval error
Width and height expressions can refer to each other. Width is
evaluated twice to allow for reference to output height. So we
should not error out upon failure of first evaluation of width.
2019-12-06 10:19:47 +05:30
Zhao Zhili 71fd72070c avutil/buffer: use appropriate atomic operations
No functional changes. ref/unref vs add/sub is symmetrical.

Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-05 20:53:52 -03:00
James Almer 964eb754b4 tools: add a fuzzer tool for bitstream filters
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-05 20:49:15 -03:00
Michael Niedermayer 37f31f4e50 avcodec/fitsdec: Use lrint()
Fixes: fate-fitsdec-bitpix-64

Possibly Fixes: -nan is outside the range of representable values of type 'unsigned short'
Possibly Fixes: 17769/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_FITS_fuzzer-5678314672357376

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-05 20:20:05 +01:00
Andreas Rheinhardt 4825d8a98d avformat/mpeg: Fix leaks of AVFormatContext and subtitle packets
If an error happens in vobsub_read_header() after allocating the
AVFormatContext intended to read the sub-file, both the AVFormatContext
as well as the data in the subtitles queues leaks. This has been fixed.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-05 17:22:40 +01:00
Andreas Rheinhardt bc3cf2bbd3 avformat/mpeg: Don't copy or leak string in AVBPrint
vobsub_read_header() uses an AVBPrint to write a string and up until
now, it collected the string stored in the AVBPrint via
av_bprint_finalize(), which might involve an allocation and copy of the
string. But this is unnecessary, as the lifetime of the returned string
does not exceed the lifetime of the AVBPrint. So use the string in the
AVBPrint directly.

This also makes it possible to easily fix a memleak: In certain error
situations, the string stored in the AVBPrint would not be freed (if it
was dynamically allocated). This has been fixed, too.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-05 17:22:40 +01:00
Andreas Rheinhardt 3f37880c05 avformat/mpeg: Make VobSub demuxer have its own context struct
When the VobSub demuxer was added, the fields it required were simply
added to the MpegDemuxContext (if the VobSub demuxer was selected at
all). The mpeg demuxer of course doesn't use these fields even if they
are there; and the VobSub demuxer doesn't use the old ones: It opens an
mpeg subdemuxer of its own and uses this where a mpeg demuxer is
required. Hence the two contexts can be split, saving memory.

Furthermore several headers can now be moved to the section that is
guarded by #if CONFIG_VOBSUB_DEMUXER (this even includes avassert.h
which was unguarded and has been added in 9cde9f70 despite not being
used in that patch).

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-05 17:22:40 +01:00
Andreas Rheinhardt d5274f86a8 avformat/matroskadec: Reuse AVIOContext
When parsing EBML lacing, for every number read, a new AVIOContext has
been initialized (via ffio_init_context()) just for this number. This
has been changed: The context is kept now.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-04 23:11:37 -03:00
Andreas Rheinhardt dbe3be6744 avformat/matroskadec: Improve frame size parsing error messages
When parsing the sizes of the frames in a lace fails, sometimes no
error message was raised (e.g. when using xiph or fixed-size lacing).
Only EBML lacing generated error messages (which were wrongly declared
as AV_LOG_INFO), but even here not all errors resulted in an error
message. So add a generic error message to catch them all.

Moreover, if parsing one of the EBML numbers fails, ebml_read_num already
emits its own error messages, so that all that is needed is a generic error
message to indicate that this happened during parsing the sizes of the
frames in a block; in other words, the error messages specific to
parsing EBML lace numbers can be and have been removed.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-04 23:11:37 -03:00
Andreas Rheinhardt f74eaa17bb avformat/matroskadec: Remove unnecessary check
870e7552 introduced validating the lace sizes when they are parsed and
removed the old check; yet when merging this libav commit in 6902c3ac,
the old check for whether the frame extends beyond the frame has been kept.
It is unnecessary and has been removed.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-04 23:11:37 -03:00
Andreas Rheinhardt a69f92a946 avformat/matroskadec: Simplify control flow of parsing laces
Up until now, when an error happened in one of the inner loops in
matroska_parse_laces, a variable designated for the return value has
been set to an error value and break has been used to exit the
current loop/case. This was done so that the end of matroska_parse_laces
is reached, because said function allocated memory which is later used
and freed in the calling function and passed at the end of
matroska_parse_laces.

But given that there is no allocation any more, one can now return
immediately. And this commit does this.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-04 23:11:37 -03:00
Andreas Rheinhardt 9ad1a6d64c avformat/matroskadec: Avoid allocating array for lace sizes
The maximal number of frames in a lace can be 256; hence one has a not
excessive upper bound on the size of an array that can hold the sizes of
all the frames in a lace. Yet up until now, said array has been
dynamically allocated. This has been changed.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-04 23:11:37 -03:00
Andreas Rheinhardt 668490ac98 avformat/matroskadec: Use bytestream API instead of AVIOContext
It avoids the overhead of function calls.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-04 23:11:37 -03:00
Andreas Rheinhardt eec26b5911 avformat/matroskadec: avcodec/tta: Set extradata_size to 22
Up until c4e0e314, the seek table has been included in the tta
extradata, so that the size of said extradata was 22 (the size of a TTA1
header) + 4 * number of frames. The decoder rejected anything below a
size of 30 and so the Matroska demuxer exported 30 byte long extradata,
of which only 18 were set (it ignores a CRC-32 and simply leaves it at
0). But this is unnecessary since said commit, so reduce the size to 22.

Furthermore, replace 30 by 22 in a comment about the extradata size in
libavcodec/tta.c.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-04 23:11:37 -03:00
Andreas Rheinhardt f7bf59b431 avformat/matroskadec: Check before allocations
That way one doesn't have to free later. In this case (concerning TTA
extradata), this also fixes a memleak when the output samplerate is
invalid.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-04 23:11:37 -03:00
James Almer 94fd85d81d fate/matroska: add a test for xiph lacing
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-04 22:52:01 -03:00
hwren 0bafcc9874 lavc/libxavs2.c: optimize error descriptions
Signed-off-by: hwren <hwrenx@126.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-04 20:21:34 +01:00
hwren 191203aa1f lavc/libxavs2.c: fix code style - spaces
Signed-off-by: hwren <hwrenx@126.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-04 20:17:00 +01:00
hwren 6721cd942a lavc/libxavs2.c: avoid recomputations of pointers in xavs2_copy_frame* functions
Signed-off-by: hwren <hwrenx@126.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-04 20:17:00 +01:00
hwren 3003917a8f lavc/libxavs2.c: use more descriptive variable names in xavs2_copy_frame* functions
Signed-off-by: hwren <hwrenx@126.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-04 20:17:00 +01:00
Limin Wang 0485033ae1 avfilter/vf_elbg: Fix for the seed type
Signed-off-by: Limin Wang <lance.lmwang@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-04 20:17:00 +01:00
Andreas Rheinhardt 296f769fdc avformat/rmdec: Use av_packet_move_ref() for packet ownership transfer
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-04 20:17:00 +01:00
Kusanagi Kouichi 12bbfc4cca avdevice/xcbgrab: Handle reply and error properly
Fix a NULL dereference and leaks.

Signed-off-by: Kusanagi Kouichi <slash@ac.auone-net.jp>
Signed-off-by: Marton Balint <cus@passwd.hu>
2019-12-03 21:13:00 +01:00
Michael Niedermayer 5ac8675cb1 tools/target_dec_fuzzer: Support setting AV_CODEC_FLAG2_FAST
This should improve coverage

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-03 16:06:33 +01:00
Michael Niedermayer 3ae87bb3c1 tools/target_dec_fuzzer: Support fuzzing error detection
This should increase coverage

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-03 16:06:20 +01:00
Marton Balint 1e0ea36945 avformat/mpegtsenc: add padding to m2ts streams
6144 byte alignment is needed.

Signed-off-by: Marton Balint <cus@passwd.hu>
2019-12-03 11:00:11 +01:00
Marton Balint 998906a0a4 avformat/mpegtsenc: factorize writing packet
Signed-off-by: Marton Balint <cus@passwd.hu>
2019-12-03 11:00:11 +01:00
Marton Balint db63db3977 avformat/mpegtsenc: move around setting m2ts_mode
Signed-off-by: Marton Balint <cus@passwd.hu>
2019-12-03 11:00:11 +01:00
Marton Balint 565dc3e451 avformat/mpegtsenc: set priority flag for AC3 codecs if writing BluRay
Signed-off-by: Marton Balint <cus@passwd.hu>
2019-12-03 11:00:11 +01:00
Marton Balint f5b83d5419 avformat/mpegtsenc: allow any sensible PID for elementary and PMT PIDs
This sets the range of the first automatically assigned PMT PID or elementary
stream PID parameters to [0x20, 0x1ffa]. You can still assign manually a PID
for a stream using AVStream->id in the wider [0x10, 0x1ffe] range as specified
by ISO13818-1. But since DVB and ATSC both reserves some PIDs, let's not allow
them to be automatically assigned.

Also make sure that assigned PID numbers are valid and fix the error message
for the previous PID collision checks.

Signed-off-by: Marton Balint <cus@passwd.hu>
2019-12-03 11:00:10 +01:00
Guo, Yejun b864af033d MAINTAINERS: add myself to libavfilter/dnn
Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-03 09:52:17 +01:00
Linjie Fu 8fc8bdddbf libavformat/utils: Fix code indentation
Introduced since 077939626e.

Signed-off-by: Linjie Fu <linjie.fu@intel.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-03 09:50:00 +01:00
Andreas Rheinhardt 710ab13693 avfilter/vf_unsharp: Don't dereference NULL
The unsharp filter uses an array of arrays of uint32_t, each of which is
separately allocated. These arrays also need to freed separately; but
before doing so, one needs to check whether the array of arrays has
actually been allocated, otherwise one would dereference a NULL pointer.
This fixes #8408.

Furthermore, the array of arrays needs to be zero-initialized so that
no uninitialized pointer will be freed in case an allocation of one of
the individual arrays fails.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-03 09:50:00 +01:00
James Almer 968c4cbf22 fate/cbs: add svc AV1 tests
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-02 14:45:02 -03:00
James Almer 5985ca0436 avcodec/av1_parser: skip frames with spatial_id > 0
This fixes marking keyframes in svc samples.

Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-02 14:38:52 -03:00
Zhao Zhili f9d4366912 avfilter/buffersrc: remove write-only variable 2019-12-02 17:28:16 +01:00
James Almer 637742b45d fate/cbs: add a decode model AV1 test
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-01 16:53:11 -03:00
James Almer af7ab32b89 fate/cbs: add a switch frame AV1 test
Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-01 16:53:11 -03:00
James Almer 553c1431ac Revert "avcodec/cbs_av1_syntax_template: Check ref_frame_idx before use"
This reverts commit 8174e5c77d.

It's no longer needed after the previous commit.

Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-01 16:53:11 -03:00
James Almer eced91afa5 avcodec/cbs_av1: implement missing set_frame_refs() function
Defined in Section 7.8

This finishes implementing support for frames using
frame_refs_short_signaling.

Signed-off-by: James Almer <jamrial@gmail.com>
2019-12-01 16:53:11 -03:00
Michael Niedermayer a11aa5f3ed avcodec/alsdec: Discard frames for which no channel could be decoded
Fixes: Timeout (80sec -> 33sec)
Fixes: 18668/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_ALS_fuzzer-5710836719157248

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Michael Niedermayer fd3c34ff30 avcodec/alsdec: Avoid 1 layer of pointer dereferences in INTERLEAVE_OUTPUT()
This optimizes the code slightly (116 -> 80sec)
Testcase: 18668/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_ALS_fuzzer-5710836719157248

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Michael Niedermayer 0ddef00457 avcodec/g729dec: Avoid one multiply by using init_get_bits8()
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Michael Niedermayer 336f9461df avcodec/g729dec: Avoid using buf_size
buf_size is not updated as buf is advanced so it is wrong after the first
iteration

Fixes: Timeout (160sec -> 27sec)
Fixes: 18658/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_G729_fuzzer-5729784269373440

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Michael Niedermayer 576746b4e3 avcodec/g729dec: Factor block_size out
This will be used in the next commit

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Michael Niedermayer f64be9da4c avcodec/g729dec: require buf_size to be non 0
The 0 case was added with the support for multiple packets. It
appears unintended and causes extra complexity and out of array
accesses (though within padding)

No testcase

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Michael Niedermayer d468da8d79 avcodec/g729dec: Check for KELVIN && 6k4
This combination would assume different block sizes throughout the code so its
better to error out.

Fixes: signed integer overflow: -1082385168 * 2 cannot be represented in type 'int'
Fixes: 19110/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_ACELP_KELVIN_fuzzer-5643993950191616

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Michael Niedermayer 7686ba1f14 avcodec/alac: Fix integer overflow in lpc_prediction() with sign
Fixes: signed integer overflow: -2147483648 * -1 cannot be represented in type 'int'
Fixes: 18643/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_ALAC_fuzzer-5672182449700864

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Michael Niedermayer 589cb44498 avcodec/wmaprodec: Fix buflen computation in save_bits()
Fixes: Assertion failure
Fixes: 18630/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_WMAPRO_fuzzer-5201588654440448

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Michael Niedermayer 0e010e489b avcodec/vc1_block: Fix integer overflow in AC rescaling in vc1_decode_i_block_adv()
Fixes: signed integer overflow: 50176 * 262144 cannot be represented in type 'int'
Fixes: 18629/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_VC1IMAGE_fuzzer-5182370286403584

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Michael Niedermayer 47d963335e avcodec/vmdaudio: Check chunk counts to avoid integer overflow
Fixes: signed integer overflow: 4 * 538976288 cannot be represented in type 'int'
Fixes: 18622/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_VMDAUDIO_fuzzer-5092166174507008

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Michael Niedermayer 13816a1d08 avformat/mxfdec: Clear metadata_sets_count in mxf_read_close()
This avoids problems if the function is called twice

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Andreas Rheinhardt 5946243fa8 avformat/flac_picture: Return directly if nothing has been allocated
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Andreas Rheinhardt 84a4261cd8 avformat/flac_picture: Switch to bytestream2 API
ff_flac_parse_picture() parses a buffer containing a flac metadata
picture block by wrapping it in an AVIOContext and using the AVIOContext
API. Consequently, when not enough data could be read AVERROR(EIO) was
returned although reading didn't really fail: A block that contains a
subfield whose size field indicates that it is so big as to extend
beyond the buffer is just invalid.

This commit changes this by using the bytestream2 API instead;
furthermore, the checks for whether there is enough data left are
performed before allocating a buffer for said data.

Finally, if the length of the picture description is bigger than
INT_MAX, it will now raise an error.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Andreas Rheinhardt 35005a4af1 avformat/flac_picture: Simplify checks
During parsing a flac picture metadata block, the mimetype is read as
follows: Its 32b size field is read and checked for being in the range
1..63; afterwards, the actual mimetype-string is read into a buffer of
size 64, where the length to read is the minimum of the length field and
the size of the destination buffer -1. Then an assert guards that length
is indeed < the size of the destination buffer before the string in the
buffer is zero-terminated.

The FFMIN as well as the assert are actually redundant, as it has
been checked that the string (even after terminating) fits into the
buffer. In order to make this clear, reword the check "len >= 64" to
"len >= sizeof(mimetype)" and drop the FFMIN as well as the assert.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-12-01 17:17:04 +01:00
Steven Liu 9cc88ed4b7 avformat/smoothstreamingenc: removed unused check of avformat_free_context
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Reviewed-by: Jun Zhao <barryjzhao@tencent.com>
Signed-off-by: Steven Liu <lq@chinaffmpeg.org>
2019-12-02 00:08:31 +08:00
Steven Liu 0f79a71353 avformat/rtpenc_mpegts: removed unused check of avformat_free_context
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Reviewed-by: Jun Zhao <barryjzhao@tencent.com>
Signed-off-by: Steven Liu <lq@chinaffmpeg.org>
2019-12-02 00:08:21 +08:00
Steven Liu e880f4fb38 avformat/hdsenc: removed unused check of avformat_free_context
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Reviewed-by: Jun Zhao <barryjzhao@tencent.com>
Signed-off-by: Steven Liu <lq@chinaffmpeg.org>
2019-12-02 00:08:14 +08:00
Steven Liu b26225a3c7 avformat/dashenc: remove unused check of avformat_free_context
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Reviewed-by: Jun Zhao <barryjzhao@tencent.com>
Reviewed-by: Jeyapal, Karthick <kjeyapal@akamai.com>
Signed-off-by: Steven Liu <lq@chinaffmpeg.org>
2019-12-02 00:08:07 +08:00
James Almer e5db2e96c0 avformat/avc: add missing return error value
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-30 11:34:58 -03:00
Andriy Gelman 99d78e4f42 lavc/extract_extradata: Use bytestream api
Signed-off-by: Andriy Gelman <andriy.gelman@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-30 11:34:51 -03:00
Andreas Rheinhardt 76e0ecec0b avcodec/extract_extradata_bsf: Don't unref uninitialized buffers
This happens if allocating extradata fails and s->remove is unset.

Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-30 09:59:30 -03:00
James Almer 18507b4882 avformat/avc: fix sps buffer offset when calling ff_avc_decode_sps()
Skip the avcC specific size bytes and the NAL header bits.

Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-29 23:17:43 -03:00
James Almer 0d67af34c2 fate/demux: add an AV1 Annex B test
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-29 18:58:42 -03:00
James Almer 378065f08f avcodec/av1_parser: export stream dimensions in avctx
This is required to demux annexb samples when a decoder isn't available.

Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-29 18:58:34 -03:00
James Almer 13ed243594 fate/lavf-container: add an H264 mp4 remux test
This uses a raw h264 bitstream as source, in order to test the avcC
generation code.

Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-29 18:24:01 -03:00
James Almer de557eb191 fate/lavf-container: add an AV1 mp4 remux test
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-29 18:23:49 -03:00
Zhao Zhili 19956d01cc libavdevice/lavfi: check avfilter_graph_dump return value
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-29 19:58:35 +01:00
Andreas Rheinhardt 2205fb2810 avformat/avio: Use ffurl_closep
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-29 19:58:35 +01:00
Paul B Mahol 8c2f81a17a avfilter/vf_hqdn3d: add support for commands 2019-11-29 17:28:59 +01:00
Paul B Mahol 3a61297a67 avfilter/vf_hqdn3d: add support for 12bit and 14bit yuv formats 2019-11-29 17:28:59 +01:00
Jun Zhao 2952d9c5dc lavc/mvha: Check init_get_bits8() for failure
fix potential null pointer dereference

Signed-off-by: Jun Zhao <barryjzhao@tencent.com>
2019-11-29 10:10:52 +08:00
Jun Zhao d965589022 lavf/vividas: check avformat_new_stream() return
check avformat_new_stream() return.

Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Jun Zhao <barryjzhao@tencent.com>
2019-11-29 10:09:29 +08:00
Limin Wang c1ed00fd18 avfilter/vf_yadif: rename config_props -> config_output, link -> outlink
Signed-off-by: Limin Wang <lance.lmwang@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-29 00:03:28 +01:00
Andreas Rheinhardt ff5c8e57e7 avformat/vividas: Avoid allocation of AVIOContext
Put an AVIOContext whose lifetime doesn't extend beyond the function where
it is allocated on the stack instead of allocating and freeing it. This
also avoids the need to free it, which in this case fixes possible
memleaks on error.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-29 00:03:28 +01:00
James Almer 16fa513392 avformat/avc: write the missing bits in the AVC Decoder Configuration Box
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-28 15:20:37 -03:00
Andreas Rheinhardt 4d97b2ad2f avformat/movenc: Avoid allocation for small dynamic buffers
By using avio_get_dyn_buf() + ffio_free_dyn_buf() instead of
avio_close_dyn_buf() + av_free() one can avoid an allocation + copy for
small dynamic buffers.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
2019-11-28 15:20:37 -03:00
Andreas Rheinhardt 3a58ec7c77 avformat/matroskaenc: Avoid allocation for small dynamic buffers
By using avio_get_dyn_buf() + ffio_free_dyn_buf() instead of
avio_close_dyn_buf() + av_free() one can avoid an allocation + copy for
small dynamic buffers (i.e. small master elements).

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
2019-11-28 15:20:37 -03:00
Andreas Rheinhardt aad82270b6 avformat/id3v2enc: Avoid allocation for small tags
By using avio_get_dyn_buf() + ffio_free_dyn_buf() instead of
avio_close_dyn_buf() + av_free() one can avoid an allocation + copy for
small tags. Furthermore, it simplifies freeing.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
2019-11-28 15:20:37 -03:00
Andreas Rheinhardt c36a3df676 avformat/avc: Avoid allocation for small SPS/PPS arrays
By using avio_get_dyn_buf() + ffio_free_dyn_buf() instead of
avio_close_dyn_buf() + av_free() one can avoid an allocation + copy for
small extradata. Furthermore, it simplifies freeing.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
2019-11-28 15:20:37 -03:00
Andreas Rheinhardt a31f68fb44 avformat/av1: Avoid allocation for small headers
By using avio_get_dyn_buf() + ffio_free_dyn_buf() instead of
avio_close_dyn_buf() + av_free() one can avoid an allocation + copy for
small headers. Furthermore, it simplifies freeing.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
2019-11-28 15:20:37 -03:00
Andreas Rheinhardt 67ce9e0463 avformat/apetag: Avoid allocation for small tags
By using avio_get_dyn_buf() + ffio_free_dyn_buf() instead of
avio_close_dyn_buf() + av_free() one can avoid an allocation + copy for
small tags. Furthermore, it simplifies freeing.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
2019-11-28 15:20:36 -03:00
Andreas Rheinhardt 163bb9ac0a avformat/aviobuf: Avoid allocating buffer when using dynamic buffer
Up until now, using a dynamic buffer entailed at least three
allocations: One for the AVIOContext, one for the AVIOContext's opaque
(which, among other things, contains the small write buffer), and one
for the big buffer that is independently allocated that is returned when
calling avio_close_dyn_buf().

It is possible to avoid the third allocation if one doesn't use a
packetized dynamic buffer, if all the data written so far fit into the
write buffer and if one does not require the actual (big) buffer to have
an indefinite lifetime. This is done by making avio_get_dyn_buf() return
a pointer to the data in the write buffer if nothing has been written to
the main buffer yet. The dynamic buffer will then be freed using
ffio_free_dynamic_buffer (which needed to be modified not to call
avio_close_dyn_buf() internally).

So a typical use-case like:

size = avio_close_dyn_buf(dyn_pb, &buf);
do something with buf
av_free(buf);

can be converted to:

size = avio_get_dyn_buf(dyn_pb, &buf);
do something with buf
ffio_free_dynamic_buffer(&dyn_pb);

In more complex scenarios this can simplify freeing as well, because it
is now clear that freeing always has to be performed via
ffio_free_dynamic_buffer().

Of course, in case this saves an allocation it also saves a memcpy.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
2019-11-28 15:20:36 -03:00
Limin Wang 8aa143eaa8 avfilter/vf_libvmaf: Check for av_frame_alloc failure
Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Limin Wang <lance.lmwang@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-28 18:44:01 +01:00
Andreas Rheinhardt df912bbc5f avformat/flacenc: Add const to ff_flac_write_header() parameter
The extradata is not changed at all.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-28 18:44:01 +01:00
Zhao Zhili ed5cdf3d5a avcodec/vp8: fix multiple ff_thread_finish_setup() calls
webp decoder doesn't set update_thread_context field

$ ffmpeg -i rgb_q80.webp -f null -
[webp @ 0x7ffbd5823200] Multiple ff_thread_finish_setup() calls

Reviewed-by: Peter Ross <pross@xvid.org>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-28 18:44:01 +01:00
Limin Wang 3a6ec10d90 avfilter/vf_colorconstancy: av_frame_free(&in) in case of error or direct flag is false
Signed-off-by: Limin Wang <lance.lmwang@gmail.com>
Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-28 18:44:01 +01:00
Linjie Fu df625057af lavc/qsvenc: Fix some code indentations
Signed-off-by: Linjie Fu <linjie.fu@intel.com>
2019-11-28 23:34:40 +08:00
Zhong Li 1b831bc729 doc/encoder: add the missing qsv encoders
Reviewed-by: Gyan Doshi <ffmpeg@gyani.pro>
Signed-off-by: Zhong Li <zhongli_dev@126.com>
2019-11-28 23:30:31 +08:00
Zhong Li 846e26b8c9 lavc/rav1e: log and doc updated for const quantizer mode
Signed-off-by: Zhong Li <zhongli_dev@126.com>
2019-11-28 23:27:46 +08:00
Thierry Foucu eaf566ce0b Add options for spatial layers.
Disable by default to output all the layers, to match libaomdec wrapper.
Add option to select the operating point for the spatial layers.
Update the documentation with the new options.

Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-28 10:21:20 -03:00
Paul B Mahol ed58f8475f avcodec: add mvha video decoder 2019-11-27 23:54:20 +01:00
Paul B Mahol 61dc7add30 avcodec: add mvdv video decoder 2019-11-27 23:53:43 +01:00
Steven Liu afee801276 avformat/hls: correct grammatical errors of m3u8_hold_counters option
Suggested-by: Gyan <ffmpeg@gyani.pro>
Suggested-by: Rodney Baker <rodney.baker@iinet.net.au>
Signed-off-by: Steven Liu <lq@chinaffmpeg.org>
2019-11-27 19:04:00 +08:00
Carl Eugen Hoyos a2fbdc6898 lavc/qsvenc: Fix compilation for some build environments.
Reported and tested by Sean Darcy.
2019-11-27 00:03:33 +01:00
Oleg Dobkin 32ba563cfc avutil/hwcontext_cuda: allow using primary CUDA device context
Signed-off-by: Timo Rothenpieler <timo@rothenpieler.org>
2019-11-26 16:24:40 +01:00
Steven Liu 59d264b0a1 avformat/hlsenc: set strict_std_compliance from the parent AVFormatContext
fix ticket: 8388

Reviewed-by: Jan Ekström <jeebjp@gmail.com>
Signed-off-by: Steven Liu <lq@chinaffmpeg.org>
2019-11-26 11:32:03 +08:00
Limin Wang d07d1c1dd0 avcodec/v210dec: add support for frame and slice threading
1, Test server configure:
[root@localhost ~]# cat /proc/cpuinfo  |grep "model name"
 model name      : Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz
 model name      : Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz
 ...

[root@localhost ~]# free -h
              total        used        free      shared  buff/cache   available
              Mem:           102G        1.1G        100G         16M        657M        100G
              Swap:          4.0G          0B        4.0G

2, Test result:
encode the v210 input data for testing:
./ffmpeg -y -i 4k_422.ts  -c:v v210 -vframes 10 test.avi

master:
./ffmpeg -y -threads 1 -stream_loop 1000 -i ./test.avi -benchmark -f null -
frame=10010 fps= 60 q=-0.0 Lsize=N/A time=00:38:26.30 bitrate=N/A speed=13.7x
video:5240kB audio:432432kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: unknown
bench: utime=101.869s stime=66.181s rtime=167.996s
bench: maxrss=186552kB

patch applied:
./ffmpeg -y -threads 2 -thread_type slice -stream_loop 1000 -i ./test.avi -benchmark -f null -
frame=10010 fps= 72 q=-0.0 Lsize=N/A time=00:38:26.30 bitrate=N/A speed=16.5x
video:5240kB audio:432432kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: unknown
bench: utime=103.562s stime=74.858s rtime=139.599s
bench: maxrss=188616kB

./ffmpeg -y -threads 2 -thread_type frame -stream_loop 1000 -i ./test.avi -benchmark -f null -
frame=10010 fps= 85 q=-0.0 Lsize=N/A time=00:38:26.30 bitrate=N/A speed=19.6x
video:5240kB audio:432432kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: unknown
bench: utime=114.310s stime=92.685s rtime=117.693s
bench: maxrss=231896kB

Signed-off-by: Limin Wang <lance.lmwang@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-25 09:54:27 +01:00
Limin Wang f0dbeb5eaa avcodec/v410dec: add support for frame and slice threading
1, Test server configure:
[root@localhost ~]# cat /proc/cpuinfo  |grep "model name"
model name	: Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz
model name	: Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz
...

[root@localhost ~]# free -h
              total        used        free      shared  buff/cache   available
              Mem:           102G        1.1G        100G         16M        657M        100G
              Swap:          4.0G          0B        4.0G

2, Test result:
encode the v410 input data for testing:
 ./ffmpeg -y -i 4k_422.ts  -c:v v410 -vframes 10 test.avi

master:
./ffmpeg -y -stream_loop 1000 -i ./test.avi -benchmark -f null -
frame=10010 fps= 37 q=-0.0 Lsize=N/A time=00:38:26.30 bitrate=N/A speed= 8.6x
video:5240kB audio:432432kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: unknown
bench: utime=166.016s stime=102.192s rtime=268.120s
bench: maxrss=273400kB

patch applied:
./ffmpeg -y -threads 2 -thread_type slice -stream_loop 1000 -i ./test.avi -benchmark -f null -
frame=10010 fps= 53 q=-0.0 Lsize=N/A time=00:38:26.30 bitrate=N/A speed=12.3x
video:5240kB audio:432432kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: unknown
bench: utime=165.135s stime=100.456s rtime=187.994s
bench: maxrss=275476kB

./ffmpeg -y -threads 2 -thread_type frame -stream_loop 1000 -i ./test.avi -benchmark -f null -
frame=10010 fps= 61 q=-0.0 Lsize=N/A time=00:38:26.30 bitrate=N/A speed=14.1x
video:5240kB audio:432432kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: unknown
bench: utime=171.386s stime=122.102s rtime=163.637s
bench: maxrss=340308kB

Signed-off-by: Limin Wang <lance.lmwang@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-25 09:54:27 +01:00
Steven Liu d5e3d8e2f7 avformat/hls: add option for the m3u8 list load max times
set max times for load m3u8 when the m3u8 list refresh do not with new
segments any times.

Signed-off-by: Steven Liu <lq@chinaffmpeg.org>
2019-11-25 11:12:20 +08:00
Michael Niedermayer 1ca978d636 avcodec/nuv: Use ff_set_dimensions()
Fixes: OOM
Fixes: 18956/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_NUV_fuzzer-5766505644163072

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-25 00:07:07 +01:00
Michael Niedermayer d83002179f avformat/vividas: Error out on audio packets in the absence of audio streams
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-25 00:07:07 +01:00
Michael Niedermayer 3e5a528bbe avformat/vividas: Check and require 1 video stream
The decoder hardcodes that audio is stream_id = 1 so it does not
currently work with more or less than 1 video stream at st=0

Fixes: assertion failure
Fixes: 18602/clusterfuzz-testcase-minimized-ffmpeg_DEMUXER_fuzzer-6259277199310848

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-25 00:07:07 +01:00
Michael Niedermayer d82ab96e76 avcodec/ffwavesynth: Fix integer overflow with pink_ts_cur/next
Fixes: signed integer overflow: 6175076100092079360 - -5034989061050195840 cannot be represented in type 'long'
Fixes: 18614/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_FFWAVESYNTH_fuzzer-5704508847423488

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-25 00:07:07 +01:00
Michael Niedermayer 721624c2f6 avcodec/ralf: Fix integer overflows with the filter coefficient in decode_channel()
Fixes: signed integer overflow: 1145975808 - -1146173210 cannot be represented in type 'int'
Fixes: 18616/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_RALF_fuzzer-5121296757424128

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-25 00:07:07 +01:00
Michael Niedermayer bf9c4a1275 avcodec/g729dec: Use 64bit and clip in scalar product
The G729 reference decoder clips after each individual operation and keeps track if overflow
occurred (in the fixed point implementation), this here is
simpler and faster but not 1:1 the same what the reference does.

Non fuzzed samples which trigger any such overflow are welcome, so
the need and impact of different clipping solutions can be evaluated.

Fixes: signed integer overflow: 1271483721 + 1073676289 cannot be represented in type 'int'
Fixes: 18617/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_ACELP_KELVIN_fuzzer-5137705679978496

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-25 00:07:07 +01:00
Michael Niedermayer 75b64e5aa3 avcodec/mxpegdec: Check for multiple SOF
Fixes: Timeout (14sec -> 9ms)
Fixes: 18598/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_MXPEG_fuzzer-5726095261564928

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-25 00:07:07 +01:00
Michael Niedermayer 1138cdecbe avcodec/nuv: Move comptype check up
Fixes: Timeout (23sec -> 5ms)
Fixes: 18517/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_NUV_fuzzer-5753135536013312

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-25 00:07:07 +01:00
Michael Niedermayer cf323f4d38 avcodec/wmavoice: Fix integer overflow in synth_frame()
Fixes: left shift of negative value -3
Fixes: 18518/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_WMAVOICE_fuzzer-6560514359951360

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-24 23:00:25 +01:00
Michael Niedermayer 5634e20525 avcodec/rawdec: Check bits_per_coded_sample more pedantically for 16bit cases
Fixes: shift exponent -14 is negative
Fixes: 18335/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_RAWVIDEO_fuzzer-5723267192586240

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-24 23:00:01 +01:00
Michael Niedermayer 6014bcf1b7 avutil/lfg: Correct index increment type to avoid undefined behavior
Fixes: signed integer overflow: 2147483647 + 1 cannot be represented in type 'int'
Fixes: 18333/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_COMFORTNOISE_fuzzer-5668481831272448

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-24 22:58:59 +01:00
Michael Niedermayer 327a968817 avcodec/cngdec: Remove AV_CODEC_CAP_DELAY
As is the decoder will never stop, it will cause an infinite loop. The RFC seems only
to speak of non empty packets so endlessly generating noise from the last empty flush
packets seems wrong.

Fixes: infinite loop
Fixes: 18333/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_COMFORTNOISE_fuzzer-5668481831272448

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-24 22:57:02 +01:00
Michael Niedermayer e21ec54fdf tools/target_dec_fuzzer: Adjust threshold for VP9
The threshold is chosen so that the worse frames would together not take
excessive time.
A better solution is welcome!

Fixes: Timeout (308sec ->102ms)
Fixes: 18314/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_VP9_fuzzer-5701689176227840

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-24 22:56:32 +01:00
Andreas Rheinhardt 6eb88daed1 avformat/matroskaenc: Remove outdated comment
This comment does not account for the fact that the limits on cluster
size and duration are configurable by the user since 98308bd4.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-24 16:01:56 +01:00
Andreas Rheinhardt 4470ab1e0e avformat/matroskaenc: Fix potential leak of cached packet
If mkv_write_trailer() is not called, the cached audio packet might
leak; so unref it in mkv_deinit().

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-24 16:01:56 +01:00
Baptiste Coudurier d65aaf8ea9 avformat/mxfenc: simplify d-10 ul handling 2019-11-23 11:02:04 -08:00
Baptiste Coudurier 635a5c11a6 avformat/mxfenc: simplify dv ul handling 2019-11-23 11:02:04 -08:00
Baptiste Coudurier eafa8e8592 avformat/dvenc: support muxing dvcprohd 2019-11-23 11:02:04 -08:00
Paul B Mahol 93414ce831 avfilter: add axcorrelate filter 2019-11-23 11:54:20 +01:00
Paul B Mahol aaac48fb50 avfilter/vf_normalize: add support for commands 2019-11-23 11:07:02 +01:00
Paul B Mahol 89aa1342b1 avfilter/af_aiir: normalize biquads only if divisor is big enough 2019-11-22 21:10:43 +01:00
Paul B Mahol f46b04c4c3 avfilter/af_biquads: add new normalize/n option 2019-11-22 21:10:43 +01:00
Andriy Gelman e759fbfbc4 avcodec/v4l2_context: Fix indentation
Signed-off-by: Andriy Gelman <andriy.gelman@gmail.com>
Signed-off-by: Aman Gupta <aman@tmm1.net>
2019-11-22 12:06:28 -08:00
Andriy Gelman ccc83ff11e avcodec/v4l2_m2m_dec: Fix decoding on Odroid XU4
c0c7946196 unintentianally changed the initialization flow of the
decoder: It caused the capture buffers to be initialized on
v4l2_m2m.c:180 in v4l2_configure_contexts(). This breaks h264 decoding
on the Odroid XU4 (RPI4 was not affected).

This commit postpones capture buffer initialization
as before c0c7946196 to fix the issue.

Signed-off-by: Andriy Gelman <andriy.gelman@gmail.com>
Signed-off-by: Aman Gupta <aman@tmm1.net>
2019-11-22 12:06:28 -08:00
Paul B Mahol f2a01b4c8b avfilter/af_aiir: fix biquads normalization 2019-11-22 20:24:29 +01:00
Paul B Mahol eecc45cea5 avfilter/af_aiir: add missing normalization of biquads gains 2019-11-22 17:42:04 +01:00
Paul B Mahol 2f5fb9e60f avfilter/af_aiir: make a/b coefficients array 2019-11-22 16:13:06 +01:00
Paul B Mahol e169d3756e avfilter/af_aiir: factor out response calculation 2019-11-22 16:07:03 +01:00
Paul B Mahol c36e72ed27 avfilter/af_aiir: check for stability 2019-11-22 16:07:02 +01:00
Gyan Doshi 1b78da449e doc/filters: complete and correct vmafmotion section 2019-11-22 19:55:35 +05:30
Andreas Rheinhardt 27c6c92534 avformat/av1: Fix leak of dynamic buffer in case of parsing failure
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-22 00:09:05 -03:00
Paul B Mahol 9cd56bb94c avfilter/af_aiir: fix array length when selecting conjugate poles 2019-11-21 23:27:34 +01:00
Zhao Zhili bbb68be0cc avfilter/graphdump: fix use of uninitialized variables
In case of av_bprint_finalize failed.

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-21 21:26:12 +01:00
leozhang 4a3aa77d74 avfilter/avfilter: fix indentation
Signed-off-by: leozhang <leozhang@qiyi.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-21 21:26:12 +01:00
Paul B Mahol 55ca21d54e avfilter/vf_amplify: add timeline support 2019-11-21 18:34:07 +01:00
Paul B Mahol 103effebc1 avfilter/vf_datascope: add support for commands in oscilloscope 2019-11-21 18:08:48 +01:00
Paul B Mahol c98d8b2bf5 avfilter/vsrc_sierpinski: change seed option type 2019-11-21 17:38:48 +01:00
Paul B Mahol d83304d539 avfilter/vsrc_sierpinski: fix another typos 2019-11-21 17:35:35 +01:00
Paul B Mahol 09fd1b18f0 avfilter/vsrc_testsrc: simplify color filter commands parsing 2019-11-21 17:32:04 +01:00
Paul B Mahol 94c0b27397 avfilter/vf_chromakey: add support for commands 2019-11-21 17:19:40 +01:00
Paul B Mahol ae6c4168e6 avfilter/vf_lumakey: add support for commands 2019-11-21 16:59:39 +01:00
Paul B Mahol 6b9862f614 avfilter/vf_lumakey: change options to doubles, so that values are automatically scaled 2019-11-21 16:52:48 +01:00
Paul B Mahol 08f7968fc4 avfilter/vf_lumakey: add support for 12bit yuva formats 2019-11-21 16:40:17 +01:00
Paul B Mahol f89ebf88a1 avfilter/vf_scroll: add support for slice threading 2019-11-21 12:59:08 +01:00
Paul B Mahol 9bd4df1654 avfilter/vf_chromashift: add support for commands 2019-11-21 12:24:02 +01:00
Paul B Mahol fbcb141c06 avfilter/vf_fillborders: add support for commands 2019-11-21 12:07:58 +01:00
Paul B Mahol 84e9a55d8e avfilter/af_afftdn: simplify changing commands 2019-11-21 11:49:23 +01:00
Paul B Mahol 8e2a832a55 avfilter/vf_median: clip radius instead of erroring out 2019-11-21 11:21:31 +01:00
Paul B Mahol 7ead0daa24 avfilter/vf_median: add support for commands 2019-11-20 22:41:19 +01:00
Alex Mogurenko b6571777d2 avcodec/mjpegbdec: Fix yuv444 pix_fmt detection
by default adobe_transform set to 0 and because of that mjpegb decoder detects yuv444 pix fmt as bgrp
2019-11-20 18:35:27 +01:00
Paul B Mahol 176ac987aa avfilter/f_graphmonitor: output frames in pts gaps 2019-11-20 17:44:18 +01:00
Michael Niedermayer a1f8b36cc4 avcodec/iff: Move index use after check in decodeplane8()
Fixes: index 9 out of bounds for type 'const uint64_t [8][256]'
Fixes: 18409/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_IFF_ILBM_fuzzer-5767030560522240
Fixes: 18720/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_IFF_ILBM_fuzzer-5651995784642560

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-20 15:50:15 +01:00
Michael Niedermayer f09151fff9 avcodec/atrac3: Check for huge block aligns
The largest documented frame size = block align is 1024 bytes
(https://wiki.multimedia.cx/index.php/ATRAC3)

Without a limit this can allocate arbitrary memory and trigger OOM
Fixes: OOM
Fixes: 18337/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_ATRAC3_fuzzer-5763861478637568
Fixes: 18556/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_ATRAC3AL_fuzzer-5646183334936576

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-20 15:50:15 +01:00
Michael Niedermayer 1b7d02642b avcodec/ralf: use multiply instead of shift to avoid undefined behavior in decode_block()
Fixes: left shift of negative value -249
Fixes: 18566/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_RALF_fuzzer-5649394561187840

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-20 15:50:15 +01:00
Michael Niedermayer c54b9fc42f avcodec/wmadec: Require previous exponents for reuse
Fixes: division by zero
Fixes: 18474/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_WMAV2_fuzzer-5764986962182144

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-20 15:50:15 +01:00
Michael Niedermayer 7fc1baf0ca avcodec/vc1_block: Fix undefined behavior in ac prediction rescaling
The intermediates are required to fit in 12bit (8.1.3.9 Coefficient Scaling)
See SMPTE 421M-2006 and Amendment 1-2007

Fixes: signed integer overflow: -20691 * 262144 cannot be represented in type 'int'
Fixes: 18479/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_VC1_fuzzer-5128912371187712

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-20 15:50:15 +01:00
Michael Niedermayer e36ccb5048 avcodec/qdm2: The smallest header seems to have 2 bytes so treat 1 as invalid
Fixes: Timeout (217sec -> 2ms)
Fixes: 18488/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_QDM2_fuzzer-5708293662310400

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-20 15:50:15 +01:00
Michael Niedermayer dc3f327e74 avcodec/apedec: Fixes integer overflow of res+*data in do_apply_filter()
Fixes: signed integer overflow: 7400 + 2147482786 cannot be represented in type 'int'
Fixes: 18405/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_APE_fuzzer-5708834760294400

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-20 15:50:15 +01:00
Michael Niedermayer b6abdb1faf avcodec/ra288: Check block_align for the assumed value
Fixes: Timeout (224sec -> 1ms)
Fixes: 18408/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_RA_288_fuzzer-5740382570151936

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-20 15:50:15 +01:00
Michael Niedermayer c8c17b8cef avcodec/sonic: Fix integer overflow in predictor_calc_error()
Fixes: signed integer overflow: 5 * -1094995529 cannot be represented in type 'int'
Fixes: 18346/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_SONIC_fuzzer-5709623893426176

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-20 15:50:15 +01:00
Michael Niedermayer faea5b4462 avformat/vividas: Add EOF check in val_1 loop in track_header()
Fixes: Timeout (148sec -> 0.1sec)
Fixes: 18427/clusterfuzz-testcase-minimized-ffmpeg_DEMUXER_fuzzer-5682124627116032

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-20 15:50:15 +01:00
Michael Niedermayer 19b8db2908 avcodec/atrac9dec: Check precision_fine/coarse
Clipping is done as it was preferred in review
See: [FFmpeg-devel] [PATCH 1/5] avcodec/atrac9dec: Check precision_fine/coarse

Fixes: out of array access
Fixes: 18330/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_ATRAC9_fuzzer-5641113058148352

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-20 15:50:15 +01:00
Andreas Rheinhardt dbc50f8a93 avformat/matroskadec: Fix default value of BlockAddID
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-20 11:22:14 -03:00
Paul B Mahol a16de215c9 avfilter/af_rubberband: fix sample overqueueing
Fixes #8389
2019-11-20 13:05:50 +01:00
Paul B Mahol e21d4a7ca4 avfilter/vf_chromakey: add >8 bit support 2019-11-20 11:15:27 +01:00
Michael Niedermayer d73f062706 avcodec/dvdec: Use av_clip_uint8 instead of ff_crop_tab
Fixes: out of array access
Fixes: 18788/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_DVVIDEO_fuzzer-6254863113781248

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Reviewed-by: Baptiste Coudurier <baptiste.coudurier@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-20 00:14:03 +01:00
James Almer fdf46b4a6b avcodec/amfnec: allocate packets using av_new_packet()
This ensures they will be reference counted, as required by the AVCodec.receive_packet()
API.

Should fix ticket #8386.

Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-19 19:42:48 -03:00
James Almer f18a5efb52 fate/cbs: add initial AV1 tests
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-19 14:49:03 -03:00
Paul B Mahol 804fce8bc2 avfilter/vf_midequalizer: add 16bit formats 2019-11-19 13:08:07 +01:00
Paul B Mahol 258f66998f avfilter/vf_deblock: add 12bit yuva formats 2019-11-19 12:43:26 +01:00
Paul B Mahol 18d25ecede avfilter/vf_weave: pal and hwaccel formats are not supported 2019-11-19 12:35:55 +01:00
Paul B Mahol 5ed6b735ab avfilter/vf_blend: cosmetics: reindent 2019-11-19 12:14:36 +01:00
Paul B Mahol c8f269f24f avfilter/vf_chromashift: remove unused header
Reverts ef479ee660.
2019-11-19 10:37:12 +01:00
Gyan Doshi 0cfda90b34 avfilter/Makefile: add missing dependency for lut3d
lut3d requires framesync
2019-11-19 14:11:20 +05:30
Gyan Doshi ef479ee660 avfilter/Makefile: add missing dependency for chromashift
chromashift requires framesync
2019-11-19 14:10:42 +05:30
Gyan Doshi 0321bde0a2 doc/filters: correct libvmaf example
AVTB is 1/AV_TIME_BASE
2019-11-19 14:09:43 +05:30
Gyan Doshi f394d7b382 doc/filters: correct ssim example
AVTB is 1/AV_TIME_BASE
2019-11-19 14:08:39 +05:30
Gyan Doshi 6e0461d8d4 doc/filters: correct psnr example
AVTB is 1/AV_TIME_BASE
2019-11-19 14:04:23 +05:30
Gyan Doshi 2ff444bd3a avfilter/Makefile: add missing dependency for scale_cuda
scale_cuda includes scale.h
2019-11-19 12:07:03 +05:30
James Almer 4e2bef6a82 avcodec/cbs_av1: keep separate reference frame state for reading and writing
In scearios where a Temporal Unit is written right after reading it using the same
CBS context (av1_metadata, av1_frame_merge, etc), the reference frame state used
by the writer must not be the state that's the result of the reader having already
parsed the current frame in question.

This fixes writing Switch frames, and frames using short ref signaling.

Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-18 21:30:05 -03:00
Paul B Mahol 6c2f866309 avfilter/vf_bm3d: improve threshold scaling with different block_size and depth 2019-11-19 00:12:47 +01:00
Paul B Mahol eae292919b avfilter/vf_dedot: add 12bit yuva formats 2019-11-18 18:35:32 +01:00
Paul B Mahol fa00f80086 avfilter/vf_deflicker: add support for alpha formats 2019-11-18 18:35:32 +01:00
Paul B Mahol 9c85e1a091 avfilter/vf_amplify: add support for alpha formats 2019-11-18 18:35:32 +01:00
Paul B Mahol 2f7da8ca36 avfilter/vf_limiter: add 12bit yuva formats 2019-11-18 18:35:32 +01:00
Paul B Mahol 4670f8126a avfilter/vf_fillborders: add 12bit yuva formats 2019-11-18 18:35:32 +01:00
Paul B Mahol 9277510766 avfilter/vf_premultiply: add support for 12bit yuva format 2019-11-18 18:35:32 +01:00
Paul B Mahol a960d33112 avfilter/vf_chromashift: add 12bit yuva formats 2019-11-18 18:35:32 +01:00
Paul B Mahol 39a4d69d05 avfilter/vf_midequalizer: add 12bit yuva formats 2019-11-18 18:08:13 +01:00
Paul B Mahol ee8fe9c94d avfilter/vf_convolution: add 12bit yuva formats 2019-11-18 18:01:12 +01:00
Paul B Mahol 4b554382a0 avfilter/vf_neighbor: add 12bit yuva formats 2019-11-18 18:00:25 +01:00
Paul B Mahol 37bf725296 avfilter/vf_maskedminmax: add 12bit yuva formats 2019-11-18 17:56:34 +01:00
Paul B Mahol be8487798a avfilter/vf_maskedmerge: add 12bit yuva formats 2019-11-18 17:55:21 +01:00
Paul B Mahol b565d63894 avfilter/vf_maskedclamp: add 12bit yuva formats 2019-11-18 17:53:45 +01:00
Paul B Mahol 69f5a77dec avfilter/vf_scroll: add support for 12bit yuva formats 2019-11-18 17:50:05 +01:00
Paul B Mahol c7abb07331 avfilter/vf_blend: add 12bit yuva formats 2019-11-18 17:47:35 +01:00
Paul B Mahol 619f530812 avfilter/vf_vaguedenoiser: add support for alpha formats 2019-11-18 17:44:10 +01:00
Paul B Mahol 76ef2ec471 avfilter/vf_bm3d: use boolean for ref option 2019-11-18 17:39:45 +01:00
Paul B Mahol cb844376c3 avfilter/vf_fftdnoiz: add support for alpha formats 2019-11-18 17:36:56 +01:00
Paul B Mahol 3e524a11d9 avfilter/vf_bm3d: add support for alpha formats 2019-11-18 17:33:19 +01:00
Paul B Mahol 86f636348c avfilter/vf_w3fdif: add support for more >8 bit alpha formats 2019-11-18 17:30:44 +01:00
Paul B Mahol 6a38538a09 avfilter/vf_avgblur: add support for 12bit yuva formats 2019-11-18 17:27:42 +01:00
Paul B Mahol 1b26f27026 avfilter/vf_gblur: add support for 12bit yuva formats 2019-11-18 17:26:59 +01:00
Paul B Mahol 97cf49b7fe avfilter/vf_median: add support for 12bit yuva formats 2019-11-18 17:24:52 +01:00
Paul B Mahol f490c71553 avfilter/vf_remap: add support for 12bit yuva format 2019-11-18 17:21:09 +01:00
Paul B Mahol 410f81f822 avfilter/vf_lut2: add 12bit depth alpha formats 2019-11-18 17:15:29 +01:00
Paul B Mahol a50bd3a50e avfilter/vf_atadenoise: support alpha formats 2019-11-18 17:06:59 +01:00
Paul B Mahol 73b730e3e6 avfilter/vf_xmedian: add support for alpha formats 2019-11-18 17:06:05 +01:00
Gyan Doshi f897d8c863 doc/filters: improve libvmaf section
Added default values, correct strings for default model path and pool
method.
2019-11-18 19:32:07 +05:30
Gyan Doshi d831edc387 avdevice/decklink: add option to drop frames till timecode is seen
Option wait_for_tc only takes effect if tc_format is set
2019-11-18 10:01:03 +05:30
Andreas Rheinhardt cda3e8ca04 avcodec/cbs: Fix potential overflow
The number of bits in a PutBitContext must fit into an int, yet nothing
guaranteed the size argument cbs_write_unit_data() uses in init_put_bits()
to be in the range 0..INT_MAX / 8. This has been changed.

Furthermore, the check 8 * data_size > data_bit_start that there is
data beyond the initial padding when writing mpeg2 or H.264/5 slices
could also overflow, so divide it by 8 to get an equivalent check
without this problem.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
2019-11-17 23:31:45 +00:00
Andreas Rheinhardt 7c92eaace2 avcodec/cbs: Factor out common code for writing units
All cbs-functions to write units share a common pattern:
1. They check whether they have a write buffer (that is used to store
the unit's data until the needed size becomes known after writing the
unit when a dedicated buffer will be allocated).
2. They use this buffer for a PutBitContext.
3. The (codec-specific) writing takes place through the PutBitContext.
4. The return value is checked. AVERROR(ENOSPC) here always indicates
that the buffer was too small and leads to a reallocation of said
buffer.
5. The final buffer will be allocated and the data copied.

This commit factors this common code out in a single function in cbs.c.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
2019-11-17 23:31:44 +00:00
Andreas Rheinhardt 5d8d9e032c avformat/flacdec: Remove useless packet
flac_read_timestamp() applied av_init_packet() to a packet (which
initializes all fields of the packet except for data and size) and then
went on to use only the data and size fields. In other words: Said
packet can be removed and replaced by an uint8_t * and an int.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-17 23:44:17 +01:00
Carl Eugen Hoyos 56f25c11a9 lavf/Makefile: Specify sln demuxer requirements.
Mentioned in ticket #8378.
2019-11-17 23:40:14 +01:00
Carl Eugen Hoyos ce8faea9fd configure: Add fft dependency for showspatial filter.
Mentioned in ticket #8378.
2019-11-17 23:29:23 +01:00
Carl Eugen Hoyos 8038a87d49 configure: Add fft dependency for headphone filter.
Mentioned in ticket #8378.
2019-11-17 23:24:38 +01:00
Nomis101 5e0a3278bb avcodec: Add more kCVImageBufferColorPrimaries to videotoolboxenc
Signed-off-by: Rick Kern <kernrj@gmail.com>
2019-11-17 16:30:15 -05:00
Paul B Mahol 315a4496ea avfilter/f_loop: switch aloop to activate 2019-11-17 16:20:58 +01:00
Paul B Mahol 2e7ccd493a avfilter/f_loop: fix pts handling when timebase and 1/samplerate differ 2019-11-17 16:19:07 +01:00
Paul B Mahol f7ad9a6c16 avfilter/af_sidechaincompress: fix pts handling when timebase and 1/samplerate differ 2019-11-17 12:30:10 +01:00
Paul B Mahol b66acf4a34 avfilter/af_anlmdn: fix pts handling when timebase and 1/samplerate differ 2019-11-17 12:25:54 +01:00
Paul B Mahol 0a17a30150 avfilter/af_agate: fix pts handling when timebase and 1/samplerate differ 2019-11-17 12:23:48 +01:00
Paul B Mahol 115537f487 avfilter/af_afftdn: fix pts handling when timebase and 1/samplerate differ 2019-11-17 12:21:10 +01:00
Paul B Mahol c588a0f528 avfilter/af_afftfilt: fix pts handling when timebase and 1/samplerate differ 2019-11-17 12:19:39 +01:00
Paul B Mahol 7db61bf0e3 avfilter/af_adeclick: fix pts handling when timebase and 1/samplerate differ 2019-11-17 12:17:51 +01:00
Paul B Mahol 90622f9e8f avfilter/af_aecho: switch to activate 2019-11-17 11:57:52 +01:00
Gyan Doshi b741a84a15 doc/APIchanges: update for av_expr_count_vars 2019-11-17 11:09:13 +05:30
Gyan Doshi 1c23abc88f avutil/eval: add function to track variable use
1)Some filters allow cross-referenced expressions e.g. x=y+10. In
such cases, filters evaluate expressions multiple times for
successful evaluation of all expressions. If the expression for one or
more variables contains a RNG, the result may vary across evaluation
leading to inconsistent values across the cross-referenced expressions.

2)A related case is circular expressions e.g. x=y+10 and y=x+10 which
cannot be succesfully resolved.

3)Certain filter variables may only be applicable in specific eval modes
and lead to a failure of evaluation in other modes e.g. pts is only
relevant for frame eval mode.

At present, there is no reliable means to identify these occurrences and
thus the error messages provided are broad or inaccurate. The helper
function introduced - av_expr_count_vars - allows developers to identify
the use and count of variables in expressions and thus tailor the error
message, allow for a graceful fallback and/or decide evaluation order.
2019-11-17 11:07:05 +05:30
Jun Zhao 487e7e9670 lavf/rtmpproto: fix the playpath truncation if the len > 512
fix the playpath truncation if the len > 512

Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Found-by: liuwenhuang <liuwenhuang@tencent.com>
Signed-off-by: Jun Zhao <barryjzhao@tencent.com>
2019-11-17 09:09:38 +08:00
Timo Rothenpieler 4fb6ce27f0 avcodec/nvenc: make sure newly allocated packets are refcounted
Fixes ticket 8383

Signed-off-by: Timo Rothenpieler <timo@rothenpieler.org>
2019-11-17 01:37:34 +01:00
Carl Eugen Hoyos eeab18329f lavc/libopencore-amr: Silence a warning about an unused function. 2019-11-17 00:35:14 +01:00
Paul B Mahol 9498ce0c23 avfilter/vf_framepack: really fix ef466a8b29 2019-11-17 00:23:03 +01:00
Paul B Mahol 9db24ee26d avfilter/vf_framepack: switch to activate 2019-11-17 00:23:03 +01:00
Limin Wang c22665ae69 avformat/mp3dec: Check for occurances of headers within frames during probing
Fixes misdetection of zYLx.wav

Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-16 23:40:39 +01:00
Michael Niedermayer e9a335150a avformat/mp3dec: Check that the frame fits within the probe buffer
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-16 23:38:43 +01:00
Paul B Mahol ef466a8b29 avfilter/vf_framepack: fix timestamps for frameseq format 2019-11-16 23:33:02 +01:00
Paul B Mahol d52342a563 avfilter/vf_framepack: add missing filtering flag 2019-11-16 23:33:02 +01:00
Michael Niedermayer ec3d8a0e69 avcodec/mjpeg_parser: Make parser a bit more robust with unclean input
Helps: test_roman.mjpeg (note this is not really just mjpeg)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-16 22:04:54 +01:00
Andreas Rheinhardt 27522fb64c avcodec/put_bits: Relax requirements to rebase PutBitContext
The earlier requirement was for the new buffer to be bigger than the old
one. This has been relaxed to only demand that the new buffer can hold
all the data written so far. This is in preparation for further commits.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-16 20:57:45 +01:00
Zhao Zhili 57db9f488f avformat/mov: fix typo in help text
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-16 20:57:45 +01:00
James Almer 81d54531f7 avformat/av1dec: simplify annexb_probe()
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-16 15:56:36 -03:00
James Almer 2703068110 avcodec/cbs_av1: fix reading reference order hint in skip_mode_params()
Reviewed-by: Ronald S. Bultje <rsbultje@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-16 15:56:36 -03:00
Paul B Mahol d7ffa4bd3e avcodec/codec_desc: add mising .type for acelp.kelvin 2019-11-16 15:06:20 +01:00
Michael Niedermayer bfa8272f40 vcodec/agm: Alloc based on coded dimensions
Fixes: out of array read
Fixes: 18715/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_AGM_fuzzer-5659333417500672

Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg
Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-15 21:31:43 +01:00
James Almer 73ee53f317 avcodec/encode: add missing assert to avcodec_receive_packet()
Encoders must return reference counted packets.

This was checked only for encoders using the AVCodec->encode2() API, while
blindly accepting whatever encoders using the AVCodec->receive_packet() API
were returning.

Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-14 12:30:51 -03:00
Andreas Rheinhardt f01f9f1793 avformat/av1dec: Redo flushing of bsf
The current approach has two different calls to av_bsf_send_packet():
A normal one, sending a packet; and an extraordinary one just for
flushing. These can be unified into one by making use of the newly
documented fact that av_bsf_send_packet() allows to signal flushing via
empty packets (i.e. packets without data and side-data).

This also fixes CID 1455685 which resulted from the fact that the call
for flushing was not checked given that it couldn't fail.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-14 12:02:51 -03:00
Andreas Rheinhardt 41b05b849f avcodec/avcodec: Adapt the doc of av_bsf_send_packet to match its actual implementation.
Explicitly allowing empty packets to signal flushing helps getting rid
of special cases. It does not hinder the ability to send i.e.
timing-only packets, because one can send packets with zero size and
pkt->data set.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Signed-off-by: James Almer <jamrial@gmail.com>
2019-11-14 12:02:41 -03:00
Andreas Rheinhardt a7245adee3 avformat/id3v2: Avoid av_strdup for key and value of dict
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com>
Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2019-11-13 23:35:21 +01:00
Michael Niedermayer 8d5f9daacd tests/ref/vsynth: add missing reference files, fix build 2019-11-13 23:34:56 +01:00
Derek Buitenhuis 1354c39c78 librav1e: Don't make users explicitly set -qp -1 to use bit rate mode
Signed-off-by: Derek Buitenhuis <derek.buitenhuis@gmail.com>
2019-11-13 18:00:51 +00:00
Baptiste Coudurier a1403032c8 avcodec/dvenc: support encoding dvcprohd 2019-11-13 09:08:43 -08:00
Baptiste Coudurier 11a38be99c avformat/mxfenc: correctly set width values for dvcprohd 2019-11-13 08:53:27 -08:00
261 changed files with 5023 additions and 1768 deletions

View File

@ -24,6 +24,9 @@ version <next>:
- AV1 encoding support via librav1e
- AV1 frame merge bitstream filter
- AV1 Annex B demuxer
- axcorrelate filter
- mvdv decoder
- mvha decoder
version 4.2:

View File

@ -198,6 +198,7 @@ Codecs:
libvorbis.c David Conrad
libvpx* James Zern
libxavs.c Stefan Gehrer
libxavs2.c Huiwen Ren
libzvbi-teletextdec.c Marton Balint
lzo.h, lzo.c Reimar Doeffinger
mdec.c Michael Niedermayer
@ -369,6 +370,8 @@ Filters:
Sources:
vsrc_mandelbrot.c Michael Niedermayer
dnn Yejun Guo
libavformat
===========

View File

@ -50,6 +50,9 @@ $(TOOLS): %$(EXESUF): %.o
target_dec_%_fuzzer$(EXESUF): target_dec_%_fuzzer.o $(FF_DEP_LIBS)
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
tools/target_bsf_%_fuzzer$(EXESUF): tools/target_bsf_%_fuzzer.o $(FF_DEP_LIBS)
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
tools/target_dem_fuzzer$(EXESUF): tools/target_dem_fuzzer.o $(FF_DEP_LIBS)
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)

12
configure vendored
View File

@ -2779,6 +2779,7 @@ msmpeg4v3_decoder_select="h263_decoder"
msmpeg4v3_encoder_select="h263_encoder"
mss2_decoder_select="mpegvideo qpeldsp vc1_decoder"
mts2_decoder_select="mss34dsp"
mvha_decoder_deps="zlib"
mwsc_decoder_deps="zlib"
mxpeg_decoder_select="mjpeg_decoder"
nellymoser_decoder_select="mdct sinewin"
@ -3499,6 +3500,7 @@ frei0r_filter_deps="frei0r libdl"
frei0r_src_filter_deps="frei0r libdl"
fspp_filter_deps="gpl"
geq_filter_deps="gpl"
headphone_filter_select="fft"
histeq_filter_deps="gpl"
hqdn3d_filter_deps="gpl"
interlace_filter_deps="gpl"
@ -3547,6 +3549,7 @@ showcqt_filter_suggest="libfontconfig libfreetype"
showcqt_filter_select="fft"
showfreqs_filter_deps="avcodec"
showfreqs_filter_select="fft"
showspatial_filter_select="fft"
showspectrum_filter_deps="avcodec"
showspectrum_filter_select="fft"
showspectrumpic_filter_deps="avcodec"
@ -6137,10 +6140,10 @@ fi
if ! disabled ffnvcodec; then
ffnv_hdr_list="ffnvcodec/nvEncodeAPI.h ffnvcodec/dynlink_cuda.h ffnvcodec/dynlink_cuviddec.h ffnvcodec/dynlink_nvcuvid.h"
check_pkg_config ffnvcodec "ffnvcodec >= 9.0.18.0" "$ffnv_hdr_list" "" || \
check_pkg_config ffnvcodec "ffnvcodec >= 8.2.15.8 ffnvcodec < 8.3" "$ffnv_hdr_list" "" || \
check_pkg_config ffnvcodec "ffnvcodec >= 8.1.24.9 ffnvcodec < 8.2" "$ffnv_hdr_list" "" || \
check_pkg_config ffnvcodec "ffnvcodec >= 8.0.14.9 ffnvcodec < 8.1" "$ffnv_hdr_list" ""
check_pkg_config ffnvcodec "ffnvcodec >= 9.1.23.1" "$ffnv_hdr_list" "" || \
check_pkg_config ffnvcodec "ffnvcodec >= 9.0.18.3 ffnvcodec < 9.1" "$ffnv_hdr_list" "" || \
check_pkg_config ffnvcodec "ffnvcodec >= 8.2.15.10 ffnvcodec < 8.3" "$ffnv_hdr_list" "" || \
check_pkg_config ffnvcodec "ffnvcodec >= 8.1.24.11 ffnvcodec < 8.2" "$ffnv_hdr_list" ""
fi
check_cpp_condition winrt windows.h "!WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)"
@ -6400,6 +6403,7 @@ enabled omx_rpi && { test_code cc OMX_Core.h OMX_IndexConfigBrcmVideoR
enabled omx && require_headers OMX_Core.h
enabled openssl && { check_pkg_config openssl openssl openssl/ssl.h OPENSSL_init_ssl ||
check_pkg_config openssl openssl openssl/ssl.h SSL_library_init ||
check_lib openssl openssl/ssl.h OPENSSL_init_ssl -lssl -lcrypto ||
check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto ||
check_lib openssl openssl/ssl.h SSL_library_init -lssl32 -leay32 ||
check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto -lws2_32 -lgdi32 ||

View File

@ -15,6 +15,9 @@ libavutil: 2017-10-21
API changes, most recent first:
2019-11-17 - 1c23abc88f - lavu 56.36.100 - eval API
Add av_expr_count_vars().
2019-10-14 - f3746d31f9 - lavu 56.35.101 - opt.h
Add AV_OPT_FLAG_RUNTIME_PARAM.

View File

@ -48,6 +48,8 @@ config
tools/target_dec_<decoder>_fuzzer
Build fuzzer to fuzz the specified decoder.
tools/target_bsf_<filter>_fuzzer
Build fuzzer to fuzz the specified bitstream filter.
Useful standard make commands:
make -t <target>

View File

@ -71,6 +71,13 @@ Set amount of tile threads to use during decoding. The default value is 0 (autod
Apply film grain to the decoded video if present in the bitstream. Defaults to the
internal default of the library.
@item oppoint
Select an operating point of a scalable AV1 bitstream (0 - 31). Defaults to the
internal default of the library.
@item alllayers
Output all spatial layers of a scalable AV1 bitstream. The default value is false.
@end table
@section libdavs2

View File

@ -331,6 +331,10 @@ segment index to start live streams at (negative values are from the end).
Maximum number of times a insufficient list is attempted to be reloaded.
Default value is 1000.
@item m3u8_hold_counters
The maximum number of times to load m3u8 when it refreshes without new segments.
Default value is 1000.
@item http_persistent
Use persistent HTTP connections. Applicable only for HTTP streams.
Enabled by default.

View File

@ -1395,7 +1395,7 @@ Sets the maximum quantizer to use when using bitrate mode.
Sets the minimum quantizer to use when using bitrate mode.
@item qp
Uses quantizer mode to encode at the given quantizer.
Uses quantizer mode to encode at the given quantizer (0-255).
@item speed
Selects the speed preset (0-10) to encode with.
@ -2776,7 +2776,7 @@ recommended value) and do not set a size constraint.
@section QSV encoders
The family of Intel QuickSync Video encoders (MPEG-2, H.264 and HEVC)
The family of Intel QuickSync Video encoders (MPEG-2, H.264, HEVC, JPEG/MJPEG and VP9)
The ratecontrol method is selected as follows:

View File

@ -1547,6 +1547,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Commands
@ -2527,6 +2531,39 @@ ffmpeg -i INPUT -af atrim=end_sample=1000
@end itemize
@section axcorrelate
Calculate normalized cross-correlation between two input audio streams.
Resulted samples are always between -1 and 1 inclusive.
If result is 1 it means two input samples are highly correlated in that selected segment.
Result 0 means they are not correlated at all.
If result is -1 it means two input samples are out of phase, which means they cancel each
other.
The filter accepts the following options:
@table @option
@item size
Set size of segment over which cross-correlation is calculated.
Default is 256. Allowed range is from 2 to 131072.
@item algo
Set algorithm for cross-correlation. Can be @code{slow} or @code{fast}.
Default is @code{slow}. Fast algorithm assumes mean values over any given segment
are always zero and thus need much less calculations to make.
This is generally not true, but is valid for typical audio streams.
@end table
@subsection Examples
@itemize
@item
Calculate correlation between channels in stereo audio stream:
@example
ffmpeg -i stereo.wav -af channelsplit,axcorrelate=size=1024:algo=fast correlation.wav
@end example
@end itemize
@section bandpass
Apply a two-pole Butterworth band-pass filter with central
@ -2568,6 +2605,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Commands
@ -2627,6 +2668,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Commands
@ -2693,6 +2738,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Commands
@ -2744,6 +2793,13 @@ Syntax for the command is : "@var{value}"
@item mix, m
How much to use filtered signal in output. Default is 1.
Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@section bs2b
@ -3439,6 +3495,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Examples
@ -3908,6 +3968,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Commands
@ -4224,6 +4288,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Examples
@ -5332,6 +5400,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Commands
@ -6863,6 +6935,13 @@ Literal colors like "green" or "red" don't make sense with this enabled anymore.
This can be used to pass exact YUV values as hexadecimal numbers.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@section chromakey
YUV colorspace color/chroma keying.
@ -6892,6 +6971,13 @@ Literal colors like "green" or "red" don't make sense with this enabled anymore.
This can be used to pass exact YUV values as hexadecimal numbers.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@subsection Examples
@itemize
@ -6925,6 +7011,10 @@ Set amount to shift chroma-red vertically.
Set edge mode, can be @var{smear}, default, or @var{warp}.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section ciescope
Display CIE color diagram with pixels overlaid onto it.
@ -10745,6 +10835,13 @@ Default is @var{smear}.
Set color for pixels in fixed mode. Default is @var{black}.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@section find_rect
Find a rectangular object
@ -11668,6 +11765,13 @@ A floating point number which specifies chroma temporal strength. It defaults to
@var{luma_tmp}*@var{chroma_spatial}/@var{luma_spatial}.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@anchor{hwdownload}
@section hwdownload
@ -12380,7 +12484,7 @@ The filter has following options:
@table @option
@item model_path
Set the model path which is to be used for SVM.
Default value: @code{"vmaf_v0.6.1.pkl"}
Default value: @code{"/usr/local/share/model/vmaf_v0.6.1.pkl"}
@item log_path
Set the file path to be used to store logs.
@ -12396,27 +12500,35 @@ Default value: @code{false}
@item phone_model
Invokes the phone model which will generate VMAF scores higher than in the
regular model, which is more suitable for laptop, TV, etc. viewing conditions.
Default value: @code{false}
@item psnr
Enables computing psnr along with vmaf.
Default value: @code{false}
@item ssim
Enables computing ssim along with vmaf.
Default value: @code{false}
@item ms_ssim
Enables computing ms_ssim along with vmaf.
Default value: @code{false}
@item pool
Set the pool method (mean, min or harmonic mean) to be used for computing vmaf.
Set the pool method to be used for computing vmaf.
Options are @code{min}, @code{harmonic_mean} or @code{mean} (default).
@item n_threads
Set number of threads to be used when computing vmaf.
Default value: @code{0}, which makes use of all available logical processors.
@item n_subsample
Set interval for frame subsampling used when computing vmaf.
Default value: @code{1}
@item enable_conf_interval
Enables confidence interval.
Default value: @code{false}
@end table
This filter also supports the @ref{framesync} options.
@ -12440,7 +12552,7 @@ ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf="psnr=1:log_fmt=json" -f null -
@item
Example with options and different containers:
@example
ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=1/AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=1/AVTB,setpts=PTS-STARTPTS[ref];[main][ref]libvmaf=psnr=1:log_fmt=json" -f null -
ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]libvmaf=psnr=1:log_fmt=json" -f null -
@end example
@end itemize
@ -12590,13 +12702,20 @@ Default value is @code{0}.
@item tolerance
Set the range of luma values to be keyed out.
Default value is @code{0}.
Default value is @code{0.01}.
@item softness
Set the range of softness. Default value is @code{0}.
Use this to control gradual transition from zero to full transparency.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@section lut, lutrgb, lutyuv
Compute a look-up table for binding each pixel component input value
@ -12982,6 +13101,13 @@ Allowed range is integer from 0 to 127.
If it is 0, value will be picked from horizontal @code{radius} option.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@section mergeplanes
Merge color channel components from several video streams.
@ -13550,6 +13676,13 @@ expensive no-op. Defaults to 1.0 (full strength).
@end table
@subsection Commands
This filter supports same @ref{commands} as options, excluding @var{smoothing} option.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@subsection Examples
Stretch video contrast to use the full dynamic range, with no temporal
@ -13763,6 +13896,13 @@ Draw some statistics. By default is enabled.
Draw scope. By default is enabled.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@subsection Examples
@itemize
@ -15061,7 +15201,7 @@ is stored in @file{stats.log}.
@item
Another example with different containers:
@example
ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=1/AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=1/AVTB,setpts=PTS-STARTPTS[ref];[main][ref]psnr" -f null -
ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]psnr" -f null -
@end example
@end itemize
@ -15489,6 +15629,10 @@ Set amount to shift alpha vertically.
Set edge mode, can be @var{smear}, default, or @var{warp}.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section roberts
Apply roberts cross operator to input video stream.
@ -16066,6 +16210,46 @@ Supersampling
@item lanczos
@end table
@item force_original_aspect_ratio
Enable decreasing or increasing output video width or height if necessary to
keep the original aspect ratio. Possible values:
@table @samp
@item disable
Scale the video as specified and disable this feature.
@item decrease
The output video dimensions will automatically be decreased if needed.
@item increase
The output video dimensions will automatically be increased if needed.
@end table
One useful instance of this option is that when you know a specific device's
maximum allowed resolution, you can use this to limit the output video to
that, while retaining the aspect ratio. For example, device A allows
1280x720 playback, and your video is 1920x800. Using this option (set it to
decrease) and specifying 1280x720 to the command line makes the output
1280x533.
Please note that this is a different thing than specifying -1 for @option{w}
or @option{h}, you still need to specify the output resolution for this option
to work.
@item force_divisible_by
Ensures that both the output dimensions, width and height, are divisible by the
given integer when used together with @option{force_original_aspect_ratio}. This
works similar to using @code{-n} in the @option{w} and @option{h} options.
This option respects the value set for @option{force_original_aspect_ratio},
increasing or decreasing the resolution accordingly. The video's aspect ratio
may be slightly modified.
This option can be handy if you need to have a video fit within or exceed
a defined resolution using @option{force_original_aspect_ratio} but also have
encoder restrictions on width or height divisibility.
@end table
@section scale2ref
@ -17128,7 +17312,7 @@ ffmpeg -i main.mpg -i ref.mpg -lavfi "ssim;[0:v][1:v]psnr" -f null -
@item
Another example with different containers:
@example
ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=1/AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=1/AVTB,setpts=PTS-STARTPTS[ref];[main][ref]ssim" -f null -
ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]ssim" -f null -
@end example
@end itemize
@ -19257,16 +19441,23 @@ vignette='PI/4+random(1)*PI/50':eval=frame
@section vmafmotion
Obtain the average vmaf motion score of a video.
It is one of the component filters of VMAF.
Obtain the average VMAF motion score of a video.
It is one of the component metrics of VMAF.
The obtained average motion score is printed through the logging system.
In the below example the input file @file{ref.mpg} is being processed and score
is computed.
The filter accepts the following options:
@table @option
@item stats_file
If specified, the filter will use the named file to save the motion score of
each frame with respect to the previous frame.
When filename equals "-" the data is sent to standard output.
@end table
Example:
@example
ffmpeg -i ref.mpg -lavfi vmafmotion -f null -
ffmpeg -i ref.mpg -vf vmafmotion -f null -
@end example
@section vstack
@ -19767,6 +19958,28 @@ Only deinterlace frames marked as interlaced.
The default value is @code{all}.
@end table
@section yaepblur
Apply blur filter while preserving edges ("yaepblur" means "yet another edge preserving blur filter").
The algorithm is described in
"J. S. Lee, Digital image enhancement and noise filtering by use of local statistics, IEEE Trans. Pattern Anal. Mach. Intell. PAMI-2, 1980."
It accepts the following parameters:
@table @option
@item radius, r
Set the window radius. Default value is 3.
@item planes, p
Set which planes to filter. Default is only the first plane.
@item sigma, s
Set blur strength. Default value is 128.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
@section zoompan
Apply Zoom & Pan effect.

View File

@ -395,6 +395,14 @@ Either sync could go wrong by 1 frame or in a rarer case
@option{timestamp_align} seconds.
Defaults to @samp{0}.
@item wait_for_tc (@emph{bool})
Drop frames till a frame with timecode is received. Sometimes serial timecode
isn't received with the first input frame. If that happens, the stored stream
timecode will be inaccurate. If this option is set to @option{true}, input frames
are dropped till a frame with timecode is received.
Option @var{timecode_format} must be specified.
Defaults to @option{false}.
@end table
@subsection Examples

View File

@ -1582,11 +1582,12 @@ Advanced Codec Digital HDTV service.
@end table
@item mpegts_pmt_start_pid @var{integer}
Set the first PID for PMT. Default is @code{0x1000}. Max is @code{0x1f00}.
Set the first PID for PMTs. Default is @code{0x1000}, minimum is @code{0x0020},
maximum is @code{0x1ffa}.
@item mpegts_start_pid @var{integer}
Set the first PID for data packets. Default is @code{0x0100}. Max is
@code{0x0f00}.
Set the first PID for elementary streams. Default is @code{0x0100}, minimum is
@code{0x0020}, maximum is @code{0x1ffa}.
@item mpegts_m2ts_mode @var{boolean}
Enable m2ts mode if set to @code{1}. Default value is @code{-1} which

View File

@ -1282,6 +1282,26 @@ only if @option{pbkeylen} is non-zero. It is used on
the receiver only if the received data is encrypted.
The configured passphrase cannot be recovered (write-only).
@item enforced_encryption=@var{1|0}
If true, both connection parties must have the same password
set (including empty, that is, with no encryption). If the
password doesn't match or only one side is unencrypted,
the connection is rejected. Default is true.
@item kmrefreshrate=@var{packets}
The number of packets to be transmitted after which the
encryption key is switched to a new key. Default is -1.
-1 means auto (0x1000000 in srt library). The range for
this option is integers in the 0 - @code{INT_MAX}.
@item kmpreannounce=@var{packets}
The interval between when a new encryption key is sent and
when switchover occurs. This value also applies to the
subsequent interval between when switchover occurs and
when the old encryption key is decommissioned. Default is -1.
-1 means auto (0x1000 in srt library). The range for
this option is integers in the 0 - @code{INT_MAX}.
@item payload_size=@var{bytes}
Sets the maximum declared size of a packet transferred
during the single call to the sending function in Live
@ -1426,6 +1446,12 @@ the overhead transmission (retransmitted and control packets).
file: Set options as for non-live transmission. See @option{messageapi}
for further explanations
@item linger=@var{seconds}
The number of seconds that the socket waits for unsent data when closing.
Default is -1. -1 means auto (off with 0 seconds in live mode, on with 180
seconds in file mode). The range for this option is integers in the
0 - @code{INT_MAX}.
@end table
For more information see: @url{https://github.com/Haivision/srt}.

View File

@ -493,6 +493,8 @@ OBJS-$(CONFIG_MSZH_DECODER) += lcldec.o
OBJS-$(CONFIG_MTS2_DECODER) += mss4.o
OBJS-$(CONFIG_MVC1_DECODER) += mvcdec.o
OBJS-$(CONFIG_MVC2_DECODER) += mvcdec.o
OBJS-$(CONFIG_MVDV_DECODER) += midivid.o
OBJS-$(CONFIG_MVHA_DECODER) += mvha.o
OBJS-$(CONFIG_MWSC_DECODER) += mwsc.o
OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o

View File

@ -460,8 +460,8 @@ static int decode_inter_plane(AGMContext *s, GetBitContext *gb, int size,
return ret;
if (orig_mv_x >= -32) {
if (y * 8 + mv_y < 0 || y * 8 + mv_y >= h ||
x * 8 + mv_x < 0 || x * 8 + mv_x >= w)
if (y * 8 + mv_y < 0 || y * 8 + mv_y + 8 > h ||
x * 8 + mv_x < 0 || x * 8 + mv_x + 8 > w)
return AVERROR_INVALIDDATA;
copy_block8(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
@ -827,7 +827,7 @@ static int decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame
static int decode_motion_vectors(AVCodecContext *avctx, GetBitContext *gb)
{
AGMContext *s = avctx->priv_data;
int nb_mvs = ((avctx->height + 15) >> 4) * ((avctx->width + 15) >> 4);
int nb_mvs = ((avctx->coded_height + 15) >> 4) * ((avctx->coded_width + 15) >> 4);
int ret, skip = 0, value, map;
av_fast_padded_malloc(&s->mvectors, &s->mvectors_size,

View File

@ -227,7 +227,7 @@ static void lpc_prediction(int32_t *error_buffer, uint32_t *buffer_out,
val = d - pred[j];
sign = sign_only(val) * error_sign;
lpc_coefs[j] -= sign;
val *= sign;
val *= (unsigned)sign;
error_val -= (val >> lpc_quant) * (j + 1);
}
}

View File

@ -218,6 +218,8 @@ extern AVCodec ff_mszh_decoder;
extern AVCodec ff_mts2_decoder;
extern AVCodec ff_mvc1_decoder;
extern AVCodec ff_mvc2_decoder;
extern AVCodec ff_mvdv_decoder;
extern AVCodec ff_mvha_decoder;
extern AVCodec ff_mwsc_decoder;
extern AVCodec ff_mxpeg_decoder;
extern AVCodec ff_nuv_decoder;

View File

@ -236,6 +236,7 @@ typedef struct ALSDecContext {
int **raw_mantissa; ///< decoded mantissa bits of the difference signal
unsigned char *larray; ///< buffer to store the output of masked lz decompression
int *nbits; ///< contains the number of bits to read for masked lz decompression for all samples
int highest_decoded_channel;
} ALSDecContext;
@ -1678,6 +1679,7 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
memmove(ctx->raw_samples[c] - sconf->max_order,
ctx->raw_samples[c] - sconf->max_order + sconf->frame_length,
sizeof(*ctx->raw_samples[c]) * sconf->max_order);
ctx->highest_decoded_channel = c;
}
} else { // multi-channel coding
ALSBlockData bd = { 0 };
@ -1746,6 +1748,8 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
if ((ret = decode_block(ctx, &bd)) < 0)
return ret;
ctx->highest_decoded_channel = FFMAX(ctx->highest_decoded_channel, c);
}
memset(reverted_channels, 0, avctx->channels * sizeof(*reverted_channels));
@ -1802,11 +1806,15 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
else
ctx->cur_frame_length = sconf->frame_length;
ctx->highest_decoded_channel = 0;
// decode the frame data
if ((invalid_frame = read_frame_data(ctx, ra_frame)) < 0)
av_log(ctx->avctx, AV_LOG_WARNING,
"Reading frame data failed. Skipping RA unit.\n");
if (ctx->highest_decoded_channel == 0)
return AVERROR_INVALIDDATA;
ctx->frame_id++;
/* get output buffer */
@ -1819,16 +1827,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
{ \
int##bps##_t *dest = (int##bps##_t*)frame->data[0]; \
int channels = avctx->channels; \
int32_t **raw_samples = ctx->raw_samples; \
int32_t *raw_samples = ctx->raw_samples[0]; \
int raw_step = channels > 1 ? ctx->raw_samples[1] - raw_samples : 1; \
shift = bps - ctx->avctx->bits_per_raw_sample; \
if (!ctx->cs_switch) { \
for (sample = 0; sample < ctx->cur_frame_length; sample++) \
for (c = 0; c < channels; c++) \
*dest++ = raw_samples[c][sample] * (1U << shift); \
*dest++ = raw_samples[c*raw_step + sample] * (1U << shift); \
} else { \
for (sample = 0; sample < ctx->cur_frame_length; sample++) \
for (c = 0; c < channels; c++) \
*dest++ = raw_samples[sconf->chan_pos[c]][sample] * (1U << shift);\
*dest++ = raw_samples[sconf->chan_pos[c]*raw_step + sample] * (1U << shift);\
} \
}

View File

@ -451,7 +451,7 @@ static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt, AMFBuffer *buff
int64_t timestamp = AV_NOPTS_VALUE;
int64_t size = buffer->pVtbl->GetSize(buffer);
if ((ret = ff_alloc_packet2(avctx, pkt, size, 0)) < 0) {
if ((ret = av_new_packet(pkt, size)) < 0) {
return ret;
}
memcpy(pkt->data, buffer->pVtbl->GetNative(buffer), size);

View File

@ -1234,7 +1234,7 @@ static void predictor_decode_mono_3950(APEContext *ctx, int count)
p->buf = p->historybuffer;
}
p->filterA[0] = currentA + ((int)(p->filterA[0] * 31U) >> 5);
p->filterA[0] = currentA + (unsigned)((int)(p->filterA[0] * 31U) >> 5);
*(decoded0++) = p->filterA[0];
}
@ -1272,7 +1272,7 @@ static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
f->adaptcoeffs - order,
order, APESIGN(*data));
res = (int)(res + (1U << (fracbits - 1))) >> fracbits;
res += *data;
res += (unsigned)*data;
*data++ = res;
/* Update the output history */
@ -1302,7 +1302,7 @@ static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
else
*f->adaptcoeffs = 0;
f->avg += (absres - f->avg) / 16;
f->avg += (int)(absres - (unsigned)f->avg) / 16;
f->adaptcoeffs[-1] >>= 1;
f->adaptcoeffs[-2] >>= 1;

View File

@ -964,7 +964,7 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
return AVERROR_INVALIDDATA;
}
if (avctx->block_align >= UINT_MAX / 2 || avctx->block_align <= 0)
if (avctx->block_align > 1024 || avctx->block_align <= 0)
return AVERROR(EINVAL);
q->decoded_bytes_buffer = av_mallocz(FFALIGN(avctx->block_align, 4) +

View File

@ -187,7 +187,7 @@ static inline void calc_precision(ATRAC9Context *s, ATRAC9BlockData *b,
for (int i = 0; i < b->q_unit_cnt; i++) {
c->precision_fine[i] = 0;
if (c->precision_coarse[i] > 15) {
c->precision_fine[i] = c->precision_coarse[i] - 15;
c->precision_fine[i] = FFMIN(c->precision_coarse[i], 30) - 15;
c->precision_coarse[i] = 15;
}
}
@ -199,7 +199,7 @@ static inline int parse_band_ext(ATRAC9Context *s, ATRAC9BlockData *b,
int ext_band = 0;
if (b->has_band_ext) {
if (b->q_unit_cnt < 13)
if (b->q_unit_cnt < 13 || b->q_unit_cnt > 20)
return AVERROR_INVALIDDATA;
ext_band = at9_tab_band_ext_group[b->q_unit_cnt - 13][2];
if (stereo) {

View File

@ -23,6 +23,7 @@
#include "av1_parse.h"
#include "cbs.h"
#include "cbs_av1.h"
#include "internal.h"
#include "parser.h"
typedef struct AV1ParseContext {
@ -100,6 +101,9 @@ static int av1_parser_parse(AVCodecParserContext *ctx,
else
continue;
if (obu->header.spatial_id > 0)
continue;
if (frame->show_existing_frame) {
AV1ReferenceFrameState *ref = &av1->ref[frame->frame_to_show_map_idx];
@ -155,6 +159,12 @@ static int av1_parser_parse(AVCodecParserContext *ctx,
break;
}
av_assert2(ctx->format != AV_PIX_FMT_NONE);
if (ctx->width != avctx->width || ctx->height != avctx->height) {
ret = ff_set_dimensions(avctx, ctx->width, ctx->height);
if (ret < 0)
goto end;
}
}
if (avctx->framerate.num)

View File

@ -458,6 +458,8 @@ enum AVCodecID {
AV_CODEC_ID_LSCR,
AV_CODEC_ID_VP4,
AV_CODEC_ID_IMM5,
AV_CODEC_ID_MVDV,
AV_CODEC_ID_MVHA,
/* various PCM "codecs" */
AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
@ -5946,11 +5948,13 @@ int av_bsf_init(AVBSFContext *ctx);
*
* @param pkt the packet to filter. The bitstream filter will take ownership of
* the packet and reset the contents of pkt. pkt is not touched if an error occurs.
* This parameter may be NULL, which signals the end of the stream (i.e. no more
* packets will be sent). That will cause the filter to output any packets it
* may have buffered internally.
* If pkt is empty (i.e. NULL, or pkt->data is NULL and pkt->side_data_elems zero),
* it signals the end of the stream (i.e. no more non-empty packets will be sent;
* sending more empty packets does nothing) and will cause the filter to output
* any packets it may have buffered internally.
*
* @return 0 on success, a negative AVERROR on error.
* @return 0 on success, a negative AVERROR on error. This function never fails if
* pkt is empty.
*/
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt);

View File

@ -95,10 +95,12 @@ int ff_cbs_init(CodedBitstreamContext **ctx_ptr,
ctx->log_ctx = log_ctx;
ctx->codec = type;
ctx->priv_data = av_mallocz(ctx->codec->priv_data_size);
if (!ctx->priv_data) {
av_freep(&ctx);
return AVERROR(ENOMEM);
if (type->priv_data_size) {
ctx->priv_data = av_mallocz(ctx->codec->priv_data_size);
if (!ctx->priv_data) {
av_freep(&ctx);
return AVERROR(ENOMEM);
}
}
ctx->decompose_unit_types = NULL;
@ -120,6 +122,7 @@ void ff_cbs_close(CodedBitstreamContext **ctx_ptr)
if (ctx->codec && ctx->codec->close)
ctx->codec->close(ctx);
av_freep(&ctx->write_buffer);
av_freep(&ctx->priv_data);
av_freep(ctx_ptr);
}
@ -280,6 +283,59 @@ int ff_cbs_read(CodedBitstreamContext *ctx,
return cbs_read_fragment_content(ctx, frag);
}
static int cbs_write_unit_data(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
{
PutBitContext pbc;
int ret;
if (!ctx->write_buffer) {
// Initial write buffer size is 1MB.
ctx->write_buffer_size = 1024 * 1024;
reallocate_and_try_again:
ret = av_reallocp(&ctx->write_buffer, ctx->write_buffer_size);
if (ret < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
"sufficiently large write buffer (last attempt "
"%"SIZE_SPECIFIER" bytes).\n", ctx->write_buffer_size);
return ret;
}
}
init_put_bits(&pbc, ctx->write_buffer, ctx->write_buffer_size);
ret = ctx->codec->write_unit(ctx, unit, &pbc);
if (ret < 0) {
if (ret == AVERROR(ENOSPC)) {
// Overflow.
if (ctx->write_buffer_size == INT_MAX / 8)
return AVERROR(ENOMEM);
ctx->write_buffer_size = FFMIN(2 * ctx->write_buffer_size, INT_MAX / 8);
goto reallocate_and_try_again;
}
// Write failed for some other reason.
return ret;
}
// Overflow but we didn't notice.
av_assert0(put_bits_count(&pbc) <= 8 * ctx->write_buffer_size);
if (put_bits_count(&pbc) % 8)
unit->data_bit_padding = 8 - put_bits_count(&pbc) % 8;
else
unit->data_bit_padding = 0;
flush_put_bits(&pbc);
ret = ff_cbs_alloc_unit_data(ctx, unit, put_bits_count(&pbc) / 8);
if (ret < 0)
return ret;
memcpy(unit->data, ctx->write_buffer, unit->data_size);
return 0;
}
int ff_cbs_write_fragment_data(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag)
@ -295,7 +351,7 @@ int ff_cbs_write_fragment_data(CodedBitstreamContext *ctx,
av_buffer_unref(&unit->data_ref);
unit->data = NULL;
err = ctx->codec->write_unit(ctx, unit);
err = cbs_write_unit_data(ctx, unit);
if (err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Failed to write unit %d "
"(type %"PRIu32").\n", i, unit->type);

View File

@ -210,6 +210,13 @@ typedef struct CodedBitstreamContext {
* From AV_LOG_*; defaults to AV_LOG_TRACE.
*/
int trace_level;
/**
* Write buffer. Used as intermediate buffer when writing units.
* For internal use of cbs only.
*/
uint8_t *write_buffer;
size_t write_buffer_size;
} CodedBitstreamContext;

View File

@ -939,6 +939,8 @@ static int cbs_av1_read_unit(CodedBitstreamContext *ctx,
priv->spatial_id = 0;
}
priv->ref = (AV1ReferenceFrameState *)&priv->read_ref;
switch (obu->header.obu_type) {
case AV1_OBU_SEQUENCE_HEADER:
{
@ -1037,6 +1039,7 @@ static int cbs_av1_read_unit(CodedBitstreamContext *ctx,
if (obu->obu_size > 0 &&
obu->header.obu_type != AV1_OBU_TILE_GROUP &&
obu->header.obu_type != AV1_OBU_TILE_LIST &&
obu->header.obu_type != AV1_OBU_FRAME) {
int nb_bits = obu->obu_size * 8 + start_pos - end_pos;
@ -1081,6 +1084,8 @@ static int cbs_av1_write_obu(CodedBitstreamContext *ctx,
td = NULL;
start_pos = put_bits_count(pbc);
priv->ref = (AV1ReferenceFrameState *)&priv->write_ref;
switch (obu->header.obu_type) {
case AV1_OBU_SEQUENCE_HEADER:
{
@ -1199,66 +1204,19 @@ static int cbs_av1_write_obu(CodedBitstreamContext *ctx,
return AVERROR(ENOSPC);
if (obu->obu_size > 0) {
memmove(priv->write_buffer + data_pos,
priv->write_buffer + start_pos, header_size);
memmove(pbc->buf + data_pos,
pbc->buf + start_pos, header_size);
skip_put_bytes(pbc, header_size);
if (td) {
memcpy(priv->write_buffer + data_pos + header_size,
memcpy(pbc->buf + data_pos + header_size,
td->data, td->data_size);
skip_put_bytes(pbc, td->data_size);
}
}
return 0;
}
static int cbs_av1_write_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
{
CodedBitstreamAV1Context *priv = ctx->priv_data;
PutBitContext pbc;
int err;
if (!priv->write_buffer) {
// Initial write buffer size is 1MB.
priv->write_buffer_size = 1024 * 1024;
reallocate_and_try_again:
err = av_reallocp(&priv->write_buffer, priv->write_buffer_size);
if (err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
"sufficiently large write buffer (last attempt "
"%"SIZE_SPECIFIER" bytes).\n", priv->write_buffer_size);
return err;
}
}
init_put_bits(&pbc, priv->write_buffer, priv->write_buffer_size);
err = cbs_av1_write_obu(ctx, unit, &pbc);
if (err == AVERROR(ENOSPC)) {
// Overflow.
priv->write_buffer_size *= 2;
goto reallocate_and_try_again;
}
if (err < 0)
return err;
// Overflow but we didn't notice.
av_assert0(put_bits_count(&pbc) <= 8 * priv->write_buffer_size);
// OBU data must be byte-aligned.
av_assert0(put_bits_count(&pbc) % 8 == 0);
unit->data_size = put_bits_count(&pbc) / 8;
flush_put_bits(&pbc);
err = ff_cbs_alloc_unit_data(ctx, unit, unit->data_size);
if (err < 0)
return err;
memcpy(unit->data, priv->write_buffer, unit->data_size);
av_assert0(put_bits_count(pbc) % 8 == 0);
return 0;
}
@ -1297,8 +1255,6 @@ static void cbs_av1_close(CodedBitstreamContext *ctx)
av_buffer_unref(&priv->sequence_header_ref);
av_buffer_unref(&priv->frame_header_ref);
av_freep(&priv->write_buffer);
}
const CodedBitstreamType ff_cbs_type_av1 = {
@ -1308,7 +1264,7 @@ const CodedBitstreamType ff_cbs_type_av1 = {
.split_fragment = &cbs_av1_split_fragment,
.read_unit = &cbs_av1_read_unit,
.write_unit = &cbs_av1_write_unit,
.write_unit = &cbs_av1_write_obu,
.assemble_fragment = &cbs_av1_assemble_fragment,
.close = &cbs_av1_close,

View File

@ -441,11 +441,9 @@ typedef struct CodedBitstreamAV1Context {
int tile_cols;
int tile_rows;
AV1ReferenceFrameState ref[AV1_NUM_REF_FRAMES];
// Write buffer.
uint8_t *write_buffer;
size_t write_buffer_size;
AV1ReferenceFrameState *ref;
AV1ReferenceFrameState read_ref[AV1_NUM_REF_FRAMES];
AV1ReferenceFrameState write_ref[AV1_NUM_REF_FRAMES];
} CodedBitstreamAV1Context;

View File

@ -339,6 +339,117 @@ static int FUNC(temporal_delimiter_obu)(CodedBitstreamContext *ctx, RWContext *r
return 0;
}
static int FUNC(set_frame_refs)(CodedBitstreamContext *ctx, RWContext *rw,
AV1RawFrameHeader *current)
{
CodedBitstreamAV1Context *priv = ctx->priv_data;
const AV1RawSequenceHeader *seq = priv->sequence_header;
static const uint8_t ref_frame_list[AV1_NUM_REF_FRAMES - 2] = {
AV1_REF_FRAME_LAST2, AV1_REF_FRAME_LAST3, AV1_REF_FRAME_BWDREF,
AV1_REF_FRAME_ALTREF2, AV1_REF_FRAME_ALTREF
};
int8_t ref_frame_idx[AV1_REFS_PER_FRAME], used_frame[AV1_NUM_REF_FRAMES];
int8_t shifted_order_hints[AV1_NUM_REF_FRAMES];
int cur_frame_hint, latest_order_hint, earliest_order_hint, ref;
int i, j;
for (i = 0; i < AV1_REFS_PER_FRAME; i++)
ref_frame_idx[i] = -1;
ref_frame_idx[AV1_REF_FRAME_LAST - AV1_REF_FRAME_LAST] = current->last_frame_idx;
ref_frame_idx[AV1_REF_FRAME_GOLDEN - AV1_REF_FRAME_LAST] = current->golden_frame_idx;
for (i = 0; i < AV1_NUM_REF_FRAMES; i++)
used_frame[i] = 0;
used_frame[current->last_frame_idx] = 1;
used_frame[current->golden_frame_idx] = 1;
cur_frame_hint = 1 << (seq->order_hint_bits_minus_1);
for (i = 0; i < AV1_NUM_REF_FRAMES; i++)
shifted_order_hints[i] = cur_frame_hint +
cbs_av1_get_relative_dist(seq, priv->ref[i].order_hint,
current->order_hint);
latest_order_hint = shifted_order_hints[current->last_frame_idx];
earliest_order_hint = shifted_order_hints[current->golden_frame_idx];
ref = -1;
for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
int hint = shifted_order_hints[i];
if (!used_frame[i] && hint >= cur_frame_hint &&
(ref < 0 || hint >= latest_order_hint)) {
ref = i;
latest_order_hint = hint;
}
}
if (ref >= 0) {
ref_frame_idx[AV1_REF_FRAME_ALTREF - AV1_REF_FRAME_LAST] = ref;
used_frame[ref] = 1;
}
ref = -1;
for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
int hint = shifted_order_hints[i];
if (!used_frame[i] && hint >= cur_frame_hint &&
(ref < 0 || hint < earliest_order_hint)) {
ref = i;
earliest_order_hint = hint;
}
}
if (ref >= 0) {
ref_frame_idx[AV1_REF_FRAME_BWDREF - AV1_REF_FRAME_LAST] = ref;
used_frame[ref] = 1;
}
ref = -1;
for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
int hint = shifted_order_hints[i];
if (!used_frame[i] && hint >= cur_frame_hint &&
(ref < 0 || hint < earliest_order_hint)) {
ref = i;
earliest_order_hint = hint;
}
}
if (ref >= 0) {
ref_frame_idx[AV1_REF_FRAME_ALTREF2 - AV1_REF_FRAME_LAST] = ref;
used_frame[ref] = 1;
}
for (i = 0; i < AV1_REFS_PER_FRAME - 2; i++) {
int ref_frame = ref_frame_list[i];
if (ref_frame_idx[ref_frame - AV1_REF_FRAME_LAST] < 0 ) {
ref = -1;
for (j = 0; j < AV1_NUM_REF_FRAMES; j++) {
int hint = shifted_order_hints[j];
if (!used_frame[j] && hint < cur_frame_hint &&
(ref < 0 || hint >= latest_order_hint)) {
ref = j;
latest_order_hint = hint;
}
}
if (ref >= 0) {
ref_frame_idx[ref_frame - AV1_REF_FRAME_LAST] = ref;
used_frame[ref] = 1;
}
}
}
ref = -1;
for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
int hint = shifted_order_hints[i];
if (ref < 0 || hint < earliest_order_hint) {
ref = i;
earliest_order_hint = hint;
}
}
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
if (ref_frame_idx[i] < 0)
ref_frame_idx[i] = ref;
infer(ref_frame_idx[i], ref_frame_idx[i]);
}
return 0;
}
static int FUNC(superres_params)(CodedBitstreamContext *ctx, RWContext *rw,
AV1RawFrameHeader *current)
{
@ -419,17 +530,16 @@ static int FUNC(frame_size_with_refs)(CodedBitstreamContext *ctx, RWContext *rw,
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
flags(found_ref[i], 1, i);
if (current->found_ref[i]) {
AV1ReferenceFrameState *ref;
AV1ReferenceFrameState *ref =
&priv->ref[current->ref_frame_idx[i]];
if (current->ref_frame_idx[i] < 0 ||
!priv->ref[current->ref_frame_idx[i]].valid) {
if (!ref->valid) {
av_log(ctx->log_ctx, AV_LOG_ERROR,
"Missing reference frame needed for frame size "
"(ref = %d, ref_frame_idx = %d).\n",
i, current->ref_frame_idx[i]);
return AVERROR_INVALIDDATA;
}
ref = &priv->ref[current->ref_frame_idx[i]];
priv->upscaled_width = ref->upscaled_width;
priv->frame_width = ref->frame_width;
@ -882,7 +992,7 @@ static int FUNC(skip_mode_params)(CodedBitstreamContext *ctx, RWContext *rw,
forward_idx = -1;
backward_idx = -1;
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
ref_hint = priv->ref[i].order_hint;
ref_hint = priv->ref[current->ref_frame_idx[i]].order_hint;
dist = cbs_av1_get_relative_dist(seq, ref_hint,
current->order_hint);
if (dist < 0) {
@ -913,7 +1023,7 @@ static int FUNC(skip_mode_params)(CodedBitstreamContext *ctx, RWContext *rw,
second_forward_idx = -1;
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
ref_hint = priv->ref[i].order_hint;
ref_hint = priv->ref[current->ref_frame_idx[i]].order_hint;
if (cbs_av1_get_relative_dist(seq, ref_hint,
forward_hint) < 0) {
if (second_forward_idx < 0 ||
@ -1307,16 +1417,7 @@ static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw,
if (current->frame_refs_short_signaling) {
fb(3, last_frame_idx);
fb(3, golden_frame_idx);
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
if (i == 0)
infer(ref_frame_idx[i], current->last_frame_idx);
else if (i == AV1_REF_FRAME_GOLDEN -
AV1_REF_FRAME_LAST)
infer(ref_frame_idx[i], current->golden_frame_idx);
else
infer(ref_frame_idx[i], -1);
}
CHECK(FUNC(set_frame_refs)(ctx, rw, current));
}
}

View File

@ -1101,7 +1101,7 @@ static int cbs_h2645_write_slice_data(CodedBitstreamContext *ctx,
const uint8_t *pos = data + data_bit_start / 8;
av_assert0(data_bit_start >= 0 &&
8 * data_size > data_bit_start);
data_size > data_bit_start / 8);
if (data_size * 8 + 8 > put_bits_left(pbc))
return AVERROR(ENOSPC);
@ -1380,65 +1380,6 @@ static int cbs_h265_write_nal_unit(CodedBitstreamContext *ctx,
return 0;
}
static int cbs_h2645_write_nal_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
{
CodedBitstreamH2645Context *priv = ctx->priv_data;
enum AVCodecID codec_id = ctx->codec->codec_id;
PutBitContext pbc;
int err;
if (!priv->write_buffer) {
// Initial write buffer size is 1MB.
priv->write_buffer_size = 1024 * 1024;
reallocate_and_try_again:
err = av_reallocp(&priv->write_buffer, priv->write_buffer_size);
if (err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
"sufficiently large write buffer (last attempt "
"%"SIZE_SPECIFIER" bytes).\n", priv->write_buffer_size);
return err;
}
}
init_put_bits(&pbc, priv->write_buffer, priv->write_buffer_size);
if (codec_id == AV_CODEC_ID_H264)
err = cbs_h264_write_nal_unit(ctx, unit, &pbc);
else
err = cbs_h265_write_nal_unit(ctx, unit, &pbc);
if (err == AVERROR(ENOSPC)) {
// Overflow.
priv->write_buffer_size *= 2;
goto reallocate_and_try_again;
}
// Overflow but we didn't notice.
av_assert0(put_bits_count(&pbc) <= 8 * priv->write_buffer_size);
if (err < 0) {
// Write failed for some other reason.
return err;
}
if (put_bits_count(&pbc) % 8)
unit->data_bit_padding = 8 - put_bits_count(&pbc) % 8;
else
unit->data_bit_padding = 0;
unit->data_size = (put_bits_count(&pbc) + 7) / 8;
flush_put_bits(&pbc);
err = ff_cbs_alloc_unit_data(ctx, unit, unit->data_size);
if (err < 0)
return err;
memcpy(unit->data, priv->write_buffer, unit->data_size);
return 0;
}
static int cbs_h2645_assemble_fragment(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag)
{
@ -1454,7 +1395,7 @@ static int cbs_h2645_assemble_fragment(CodedBitstreamContext *ctx,
max_size = 0;
for (i = 0; i < frag->nb_units; i++) {
// Start code + content with worst-case emulation prevention.
max_size += 3 + frag->units[i].data_size * 3 / 2;
max_size += 4 + frag->units[i].data_size * 3 / 2;
}
data = av_realloc(NULL, max_size + AV_INPUT_BUFFER_PADDING_SIZE);
@ -1533,8 +1474,6 @@ static void cbs_h264_close(CodedBitstreamContext *ctx)
ff_h2645_packet_uninit(&h264->common.read_packet);
av_freep(&h264->common.write_buffer);
for (i = 0; i < FF_ARRAY_ELEMS(h264->sps); i++)
av_buffer_unref(&h264->sps_ref[i]);
for (i = 0; i < FF_ARRAY_ELEMS(h264->pps); i++)
@ -1548,8 +1487,6 @@ static void cbs_h265_close(CodedBitstreamContext *ctx)
ff_h2645_packet_uninit(&h265->common.read_packet);
av_freep(&h265->common.write_buffer);
for (i = 0; i < FF_ARRAY_ELEMS(h265->vps); i++)
av_buffer_unref(&h265->vps_ref[i]);
for (i = 0; i < FF_ARRAY_ELEMS(h265->sps); i++)
@ -1565,7 +1502,7 @@ const CodedBitstreamType ff_cbs_type_h264 = {
.split_fragment = &cbs_h2645_split_fragment,
.read_unit = &cbs_h264_read_nal_unit,
.write_unit = &cbs_h2645_write_nal_unit,
.write_unit = &cbs_h264_write_nal_unit,
.assemble_fragment = &cbs_h2645_assemble_fragment,
.close = &cbs_h264_close,
@ -1578,7 +1515,7 @@ const CodedBitstreamType ff_cbs_type_h265 = {
.split_fragment = &cbs_h2645_split_fragment,
.read_unit = &cbs_h265_read_nal_unit,
.write_unit = &cbs_h2645_write_nal_unit,
.write_unit = &cbs_h265_write_nal_unit,
.assemble_fragment = &cbs_h2645_assemble_fragment,
.close = &cbs_h265_close,

View File

@ -19,9 +19,6 @@
#ifndef AVCODEC_CBS_H2645_H
#define AVCODEC_CBS_H2645_H
#include <stddef.h>
#include <stdint.h>
#include "h2645_parse.h"
@ -33,10 +30,6 @@ typedef struct CodedBitstreamH2645Context {
int nal_length_size;
// Packet reader.
H2645Packet read_packet;
// Write buffer
uint8_t *write_buffer;
size_t write_buffer_size;
} CodedBitstreamH2645Context;

View File

@ -44,9 +44,11 @@ typedef struct CodedBitstreamType {
int (*read_unit)(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit);
// Write the unit->data bitstream from unit->content.
// Write the data bitstream from unit->content into pbc.
// Return value AVERROR(ENOSPC) indicates that pbc was too small.
int (*write_unit)(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit);
CodedBitstreamUnit *unit,
PutBitContext *pbc);
// Read the data from all of frag->units and assemble it into
// a bitstream for the whole fragment.

View File

@ -377,58 +377,13 @@ static int cbs_jpeg_write_segment(CodedBitstreamContext *ctx,
}
static int cbs_jpeg_write_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
CodedBitstreamUnit *unit,
PutBitContext *pbc)
{
CodedBitstreamJPEGContext *priv = ctx->priv_data;
PutBitContext pbc;
int err;
if (!priv->write_buffer) {
// Initial write buffer size is 1MB.
priv->write_buffer_size = 1024 * 1024;
reallocate_and_try_again:
err = av_reallocp(&priv->write_buffer, priv->write_buffer_size);
if (err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
"sufficiently large write buffer (last attempt "
"%"SIZE_SPECIFIER" bytes).\n", priv->write_buffer_size);
return err;
}
}
init_put_bits(&pbc, priv->write_buffer, priv->write_buffer_size);
if (unit->type == JPEG_MARKER_SOS)
err = cbs_jpeg_write_scan(ctx, unit, &pbc);
return cbs_jpeg_write_scan (ctx, unit, pbc);
else
err = cbs_jpeg_write_segment(ctx, unit, &pbc);
if (err == AVERROR(ENOSPC)) {
// Overflow.
priv->write_buffer_size *= 2;
goto reallocate_and_try_again;
}
if (err < 0) {
// Write failed for some other reason.
return err;
}
if (put_bits_count(&pbc) % 8)
unit->data_bit_padding = 8 - put_bits_count(&pbc) % 8;
else
unit->data_bit_padding = 0;
unit->data_size = (put_bits_count(&pbc) + 7) / 8;
flush_put_bits(&pbc);
err = ff_cbs_alloc_unit_data(ctx, unit, unit->data_size);
if (err < 0)
return err;
memcpy(unit->data, priv->write_buffer, unit->data_size);
return 0;
return cbs_jpeg_write_segment(ctx, unit, pbc);
}
static int cbs_jpeg_assemble_fragment(CodedBitstreamContext *ctx,
@ -499,22 +454,11 @@ static int cbs_jpeg_assemble_fragment(CodedBitstreamContext *ctx,
return 0;
}
static void cbs_jpeg_close(CodedBitstreamContext *ctx)
{
CodedBitstreamJPEGContext *priv = ctx->priv_data;
av_freep(&priv->write_buffer);
}
const CodedBitstreamType ff_cbs_type_jpeg = {
.codec_id = AV_CODEC_ID_MJPEG,
.priv_data_size = sizeof(CodedBitstreamJPEGContext),
.split_fragment = &cbs_jpeg_split_fragment,
.read_unit = &cbs_jpeg_read_unit,
.write_unit = &cbs_jpeg_write_unit,
.assemble_fragment = &cbs_jpeg_assemble_fragment,
.close = &cbs_jpeg_close,
};

View File

@ -120,11 +120,4 @@ typedef struct JPEGRawComment {
} JPEGRawComment;
typedef struct CodedBitstreamJPEGContext {
// Write buffer.
uint8_t *write_buffer;
size_t write_buffer_size;
} CodedBitstreamJPEGContext;
#endif /* AVCODEC_CBS_JPEG_H */

View File

@ -337,7 +337,7 @@ static int cbs_mpeg2_write_slice(CodedBitstreamContext *ctx,
uint8_t *pos = slice->data + slice->data_bit_start / 8;
av_assert0(slice->data_bit_start >= 0 &&
8 * slice->data_size > slice->data_bit_start);
slice->data_size > slice->data_bit_start / 8);
if (slice->data_size * 8 + 8 > put_bits_left(pbc))
return AVERROR(ENOSPC);
@ -371,58 +371,13 @@ static int cbs_mpeg2_write_slice(CodedBitstreamContext *ctx,
}
static int cbs_mpeg2_write_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
CodedBitstreamUnit *unit,
PutBitContext *pbc)
{
CodedBitstreamMPEG2Context *priv = ctx->priv_data;
PutBitContext pbc;
int err;
if (!priv->write_buffer) {
// Initial write buffer size is 1MB.
priv->write_buffer_size = 1024 * 1024;
reallocate_and_try_again:
err = av_reallocp(&priv->write_buffer, priv->write_buffer_size);
if (err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
"sufficiently large write buffer (last attempt "
"%"SIZE_SPECIFIER" bytes).\n", priv->write_buffer_size);
return err;
}
}
init_put_bits(&pbc, priv->write_buffer, priv->write_buffer_size);
if (MPEG2_START_IS_SLICE(unit->type))
err = cbs_mpeg2_write_slice(ctx, unit, &pbc);
return cbs_mpeg2_write_slice (ctx, unit, pbc);
else
err = cbs_mpeg2_write_header(ctx, unit, &pbc);
if (err == AVERROR(ENOSPC)) {
// Overflow.
priv->write_buffer_size *= 2;
goto reallocate_and_try_again;
}
if (err < 0) {
// Write failed for some other reason.
return err;
}
if (put_bits_count(&pbc) % 8)
unit->data_bit_padding = 8 - put_bits_count(&pbc) % 8;
else
unit->data_bit_padding = 0;
unit->data_size = (put_bits_count(&pbc) + 7) / 8;
flush_put_bits(&pbc);
err = ff_cbs_alloc_unit_data(ctx, unit, unit->data_size);
if (err < 0)
return err;
memcpy(unit->data, priv->write_buffer, unit->data_size);
return 0;
return cbs_mpeg2_write_header(ctx, unit, pbc);
}
static int cbs_mpeg2_assemble_fragment(CodedBitstreamContext *ctx,
@ -462,13 +417,6 @@ static int cbs_mpeg2_assemble_fragment(CodedBitstreamContext *ctx,
return 0;
}
static void cbs_mpeg2_close(CodedBitstreamContext *ctx)
{
CodedBitstreamMPEG2Context *priv = ctx->priv_data;
av_freep(&priv->write_buffer);
}
const CodedBitstreamType ff_cbs_type_mpeg2 = {
.codec_id = AV_CODEC_ID_MPEG2VIDEO,
@ -478,6 +426,4 @@ const CodedBitstreamType ff_cbs_type_mpeg2 = {
.read_unit = &cbs_mpeg2_read_unit,
.write_unit = &cbs_mpeg2_write_unit,
.assemble_fragment = &cbs_mpeg2_assemble_fragment,
.close = &cbs_mpeg2_close,
};

View File

@ -225,10 +225,6 @@ typedef struct CodedBitstreamMPEG2Context {
uint8_t scalable_mode;
uint8_t progressive_sequence;
uint8_t number_of_frame_centre_offsets;
// Write buffer.
uint8_t *write_buffer;
size_t write_buffer_size;
} CodedBitstreamMPEG2Context;

View File

@ -522,62 +522,28 @@ static int cbs_vp9_read_unit(CodedBitstreamContext *ctx,
}
static int cbs_vp9_write_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
CodedBitstreamUnit *unit,
PutBitContext *pbc)
{
CodedBitstreamVP9Context *priv = ctx->priv_data;
VP9RawFrame *frame = unit->content;
PutBitContext pbc;
int err;
if (!priv->write_buffer) {
// Initial write buffer size is 1MB.
priv->write_buffer_size = 1024 * 1024;
reallocate_and_try_again:
err = av_reallocp(&priv->write_buffer, priv->write_buffer_size);
if (err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
"sufficiently large write buffer (last attempt "
"%"SIZE_SPECIFIER" bytes).\n", priv->write_buffer_size);
return err;
}
}
init_put_bits(&pbc, priv->write_buffer, priv->write_buffer_size);
err = cbs_vp9_write_frame(ctx, &pbc, frame);
if (err == AVERROR(ENOSPC)) {
priv->write_buffer_size *= 2;
goto reallocate_and_try_again;
}
err = cbs_vp9_write_frame(ctx, pbc, frame);
if (err < 0)
return err;
// Frame must be byte-aligned.
av_assert0(put_bits_count(&pbc) % 8 == 0);
unit->data_size = put_bits_count(&pbc) / 8;
unit->data_bit_padding = 0;
flush_put_bits(&pbc);
av_assert0(put_bits_count(pbc) % 8 == 0);
if (frame->data) {
if (unit->data_size + frame->data_size >
priv->write_buffer_size) {
priv->write_buffer_size *= 2;
goto reallocate_and_try_again;
}
if (frame->data_size > put_bits_left(pbc) / 8)
return AVERROR(ENOSPC);
memcpy(priv->write_buffer + unit->data_size,
frame->data, frame->data_size);
unit->data_size += frame->data_size;
flush_put_bits(pbc);
memcpy(put_bits_ptr(pbc), frame->data, frame->data_size);
skip_put_bytes(pbc, frame->data_size);
}
err = ff_cbs_alloc_unit_data(ctx, unit, unit->data_size);
if (err < 0)
return err;
memcpy(unit->data, priv->write_buffer, unit->data_size);
return 0;
}
@ -671,13 +637,6 @@ static int cbs_vp9_assemble_fragment(CodedBitstreamContext *ctx,
return 0;
}
static void cbs_vp9_close(CodedBitstreamContext *ctx)
{
CodedBitstreamVP9Context *priv = ctx->priv_data;
av_freep(&priv->write_buffer);
}
const CodedBitstreamType ff_cbs_type_vp9 = {
.codec_id = AV_CODEC_ID_VP9,
@ -687,6 +646,4 @@ const CodedBitstreamType ff_cbs_type_vp9 = {
.read_unit = &cbs_vp9_read_unit,
.write_unit = &cbs_vp9_write_unit,
.assemble_fragment = &cbs_vp9_assemble_fragment,
.close = &cbs_vp9_close,
};

View File

@ -207,10 +207,6 @@ typedef struct CodedBitstreamVP9Context {
int bit_depth;
VP9ReferenceFrameState ref[VP9_NUM_REF_FRAMES];
// Write buffer.
uint8_t *write_buffer;
size_t write_buffer_size;
} CodedBitstreamVP9Context;

View File

@ -173,7 +173,7 @@ AVCodec ff_comfortnoise_decoder = {
.close = cng_decode_close,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1,
.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
};

View File

@ -1733,6 +1733,20 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("Infinity IMM5"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MVDV,
.type = AVMEDIA_TYPE_VIDEO,
.name = "mvdv",
.long_name = NULL_IF_CONFIG_SMALL("MidiVid VQ"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MVHA,
.type = AVMEDIA_TYPE_VIDEO,
.name = "mvha",
.long_name = NULL_IF_CONFIG_SMALL("MidiVid Archive Codec"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
/* various PCM "codecs" */
{
@ -2994,6 +3008,7 @@ static const AVCodecDescriptor codec_descriptors[] = {
},
{
.id = AV_CODEC_ID_ACELP_KELVIN,
.type = AVMEDIA_TYPE_AUDIO,
.name = "acelp.kelvin",
.long_name = NULL_IF_CONFIG_SMALL("Sipro ACELP.KELVIN"),
.props = AV_CODEC_PROP_LOSSY,

View File

@ -83,6 +83,7 @@ enum dv_pack_type {
#define DV_PROFILE_IS_HD(p) ((p)->video_stype & 0x10)
#define DV_PROFILE_IS_1080i50(p) (((p)->video_stype == 0x14) && ((p)->dsf == 1))
#define DV_PROFILE_IS_1080i60(p) (((p)->video_stype == 0x14) && ((p)->dsf == 0))
#define DV_PROFILE_IS_720p50(p) (((p)->video_stype == 0x18) && ((p)->dsf == 1))
/**

View File

@ -272,11 +272,10 @@ static inline void bit_copy(PutBitContext *pb, GetBitContext *gb)
static av_always_inline void put_block_8x4(int16_t *block, uint8_t *av_restrict p, int stride)
{
int i, j;
const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
for (i = 0; i < 4; i++) {
for (j = 0; j < 8; j++)
p[j] = cm[block[j]];
p[j] = av_clip_uint8(block[j]);
block += 8;
p += stride;
}

View File

@ -60,10 +60,7 @@ static av_cold int dvvideo_encode_init(AVCodecContext *avctx)
ff_dv_print_profiles(avctx, AV_LOG_ERROR);
return AVERROR(EINVAL);
}
if (avctx->height > 576) {
av_log(avctx, AV_LOG_ERROR, "DVCPRO HD encoding is not supported.\n");
return AVERROR_PATCHWELCOME;
}
ret = ff_dv_init_dynamic_tables(s, s->sys);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Error initializing work tables.\n");
@ -90,6 +87,7 @@ static av_cold int dvvideo_encode_init(AVCodecContext *avctx)
}
/* bit budget for AC only in 5 MBs */
static const int vs_total_ac_bits_hd = (68 * 6 + 52*2) * 5;
static const int vs_total_ac_bits = (100 * 4 + 68 * 2) * 5;
static const int mb_area_start[5] = { 1, 6, 21, 43, 64 };
@ -158,6 +156,11 @@ typedef struct EncBlockInfo {
uint8_t sign[64];
uint8_t partial_bit_count;
uint32_t partial_bit_buffer; /* we can't use uint16_t here */
/* used by DV100 only: a copy of the weighted and classified but
not-yet-quantized AC coefficients. This is necessary for
re-quantizing at different steps. */
int16_t save[64];
int min_qlevel; /* DV100 only: minimum qlevel (for AC coefficients >255) */
} EncBlockInfo;
static av_always_inline PutBitContext *dv_encode_ac(EncBlockInfo *bi,
@ -243,13 +246,123 @@ static const int dv_weight_248[64] = {
170627, 170627, 153560, 153560, 165371, 165371, 144651, 144651,
};
static av_always_inline int dv_init_enc_block(EncBlockInfo *bi, uint8_t *data,
ptrdiff_t linesize,
DVVideoContext *s, int bias)
/* setting this to 1 results in a faster codec but
* somewhat lower image quality */
#define DV100_SACRIFICE_QUALITY_FOR_SPEED 1
#define DV100_ENABLE_FINER 1
/* pack combination of QNO and CNO into a single 8-bit value */
#define DV100_MAKE_QLEVEL(qno,cno) ((qno<<2) | (cno))
#define DV100_QLEVEL_QNO(qlevel) (qlevel>>2)
#define DV100_QLEVEL_CNO(qlevel) (qlevel&0x3)
#define DV100_NUM_QLEVELS 31
/* The quantization step is determined by a combination of QNO and
CNO. We refer to these combinations as "qlevels" (this term is our
own, it's not mentioned in the spec). We use CNO, a multiplier on
the quantization step, to "fill in the gaps" between quantization
steps associated with successive values of QNO. e.g. there is no
QNO for a quantization step of 10, but we can use QNO=5 CNO=1 to
get the same result. The table below encodes combinations of QNO
and CNO in order of increasing quantization coarseness. */
static const uint8_t dv100_qlevels[DV100_NUM_QLEVELS] = {
DV100_MAKE_QLEVEL( 1,0), // 1*1= 1
DV100_MAKE_QLEVEL( 1,0), // 1*1= 1
DV100_MAKE_QLEVEL( 2,0), // 2*1= 2
DV100_MAKE_QLEVEL( 3,0), // 3*1= 3
DV100_MAKE_QLEVEL( 4,0), // 4*1= 4
DV100_MAKE_QLEVEL( 5,0), // 5*1= 5
DV100_MAKE_QLEVEL( 6,0), // 6*1= 6
DV100_MAKE_QLEVEL( 7,0), // 7*1= 7
DV100_MAKE_QLEVEL( 8,0), // 8*1= 8
DV100_MAKE_QLEVEL( 5,1), // 5*2=10
DV100_MAKE_QLEVEL( 6,1), // 6*2=12
DV100_MAKE_QLEVEL( 7,1), // 7*2=14
DV100_MAKE_QLEVEL( 9,0), // 16*1=16
DV100_MAKE_QLEVEL(10,0), // 18*1=18
DV100_MAKE_QLEVEL(11,0), // 20*1=20
DV100_MAKE_QLEVEL(12,0), // 22*1=22
DV100_MAKE_QLEVEL(13,0), // 24*1=24
DV100_MAKE_QLEVEL(14,0), // 28*1=28
DV100_MAKE_QLEVEL( 9,1), // 16*2=32
DV100_MAKE_QLEVEL(10,1), // 18*2=36
DV100_MAKE_QLEVEL(11,1), // 20*2=40
DV100_MAKE_QLEVEL(12,1), // 22*2=44
DV100_MAKE_QLEVEL(13,1), // 24*2=48
DV100_MAKE_QLEVEL(15,0), // 52*1=52
DV100_MAKE_QLEVEL(14,1), // 28*2=56
DV100_MAKE_QLEVEL( 9,2), // 16*4=64
DV100_MAKE_QLEVEL(10,2), // 18*4=72
DV100_MAKE_QLEVEL(11,2), // 20*4=80
DV100_MAKE_QLEVEL(12,2), // 22*4=88
DV100_MAKE_QLEVEL(13,2), // 24*4=96
// ...
DV100_MAKE_QLEVEL(15,3), // 52*8=416
};
static const int dv100_min_bias = 0;
static const int dv100_chroma_bias = 0;
static const int dv100_starting_qno = 1;
#if DV100_SACRIFICE_QUALITY_FOR_SPEED
static const int dv100_qlevel_inc = 4;
#else
static const int dv100_qlevel_inc = 1;
#endif
// 1/qstep, shifted up by 16 bits
static const int dv100_qstep_bits = 16;
static const int dv100_qstep_inv[16] = {
65536, 65536, 32768, 21845, 16384, 13107, 10923, 9362, 8192, 4096, 3641, 3277, 2979, 2731, 2341, 1260,
};
/* DV100 weights are pre-zigzagged, inverted and multiplied by 2^(dv100_weight_shift)
(in DV100 the AC components are divided by the spec weights) */
static const int dv100_weight_shift = 16;
static const int dv_weight_1080[2][64] = {
{ 8192, 65536, 65536, 61681, 61681, 61681, 58254, 58254,
58254, 58254, 58254, 58254, 55188, 58254, 58254, 55188,
55188, 55188, 55188, 55188, 55188, 24966, 27594, 26214,
26214, 26214, 27594, 24966, 23831, 24385, 25575, 25575,
25575, 25575, 24385, 23831, 23302, 23302, 24966, 24966,
24966, 23302, 23302, 21845, 22795, 24385, 24385, 22795,
21845, 21400, 21845, 23831, 21845, 21400, 10382, 10700,
10700, 10382, 10082, 9620, 10082, 9039, 9039, 8525, },
{ 8192, 65536, 65536, 61681, 61681, 61681, 41943, 41943,
41943, 41943, 40330, 41943, 40330, 41943, 40330, 40330,
40330, 38836, 38836, 40330, 40330, 24966, 27594, 26214,
26214, 26214, 27594, 24966, 23831, 24385, 25575, 25575,
25575, 25575, 24385, 23831, 11523, 11523, 12483, 12483,
12483, 11523, 11523, 10923, 11275, 12193, 12193, 11275,
10923, 5323, 5490, 5924, 5490, 5323, 5165, 5323,
5323, 5165, 5017, 4788, 5017, 4520, 4520, 4263, }
};
static const int dv_weight_720[2][64] = {
{ 8192, 65536, 65536, 61681, 61681, 61681, 58254, 58254,
58254, 58254, 58254, 58254, 55188, 58254, 58254, 55188,
55188, 55188, 55188, 55188, 55188, 24966, 27594, 26214,
26214, 26214, 27594, 24966, 23831, 24385, 25575, 25575,
25575, 25575, 24385, 23831, 15420, 15420, 16644, 16644,
16644, 15420, 15420, 10923, 11398, 12193, 12193, 11398,
10923, 10700, 10923, 11916, 10923, 10700, 5191, 5350,
5350, 5191, 5041, 4810, 5041, 4520, 4520, 4263, },
{ 8192, 43691, 43691, 40330, 40330, 40330, 29127, 29127,
29127, 29127, 29127, 29127, 27594, 29127, 29127, 27594,
27594, 27594, 27594, 27594, 27594, 12483, 13797, 13107,
13107, 13107, 13797, 12483, 11916, 12193, 12788, 12788,
12788, 12788, 12193, 11916, 5761, 5761, 6242, 6242,
6242, 5761, 5761, 5461, 5638, 5461, 6096, 5638,
5461, 2661, 2745, 2962, 2745, 2661, 2583, 2661,
2661, 2583, 2509, 2394, 2509, 2260, 2260, 2131, }
};
static av_always_inline int dv_set_class_number_sd(DVVideoContext *s,
int16_t *blk, EncBlockInfo *bi,
const uint8_t *zigzag_scan,
const int *weight, int bias)
{
const int *weight;
const uint8_t *zigzag_scan;
LOCAL_ALIGNED_16(int16_t, blk, [64]);
int i, area;
/* We offer two different methods for class number assignment: the
* method suggested in SMPTE 314M Table 22, and an improved
@ -271,31 +384,8 @@ static av_always_inline int dv_init_enc_block(EncBlockInfo *bi, uint8_t *data,
const unsigned deadzone = s->quant_deadzone;
const unsigned threshold = 2 * deadzone;
av_assert2((((int) blk) & 15) == 0);
bi->area_q[0] =
bi->area_q[1] =
bi->area_q[2] =
bi->area_q[3] = 0;
bi->partial_bit_count = 0;
bi->partial_bit_buffer = 0;
bi->cur_ac = 0;
if (data) {
bi->dct_mode = dv_guess_dct_mode(s, data, linesize);
s->get_pixels(blk, data, linesize);
s->fdct[bi->dct_mode](blk);
} else {
/* We rely on the fact that encoding all zeros leads to an immediate
* EOB, which is precisely what the spec calls for in the "dummy"
* blocks. */
memset(blk, 0, 64 * sizeof(*blk));
bi->dct_mode = 0;
}
bi->mb[0] = blk[0];
zigzag_scan = bi->dct_mode ? ff_dv_zigzag248_direct : ff_zigzag_direct;
weight = bi->dct_mode ? dv_weight_248 : dv_weight_88;
for (area = 0; area < 4; area++) {
bi->prev[area] = prev;
bi->bit_size[area] = 1; // 4 areas 4 bits for EOB :)
@ -350,6 +440,309 @@ static av_always_inline int dv_init_enc_block(EncBlockInfo *bi, uint8_t *data,
bi->bit_size[2] + bi->bit_size[3];
}
/* this function just copies the DCT coefficients and performs
the initial (non-)quantization. */
static inline void dv_set_class_number_hd(DVVideoContext *s,
int16_t *blk, EncBlockInfo *bi,
const uint8_t *zigzag_scan,
const int *weight, int bias)
{
int i, max = 0;
/* the first quantization (none at all) */
bi->area_q[0] = 1;
/* weigh AC components and store to save[] */
/* (i=0 is the DC component; we only include it to make the
number of loop iterations even, for future possible SIMD optimization) */
for (i = 0; i < 64; i += 2) {
int level0, level1;
/* get the AC component (in zig-zag order) */
level0 = blk[zigzag_scan[i+0]];
level1 = blk[zigzag_scan[i+1]];
/* extract sign and make it the lowest bit */
bi->sign[i+0] = (level0>>31)&1;
bi->sign[i+1] = (level1>>31)&1;
/* take absolute value of the level */
level0 = FFABS(level0);
level1 = FFABS(level1);
/* weigh it */
level0 = (level0*weight[i+0] + 4096 + (1<<17)) >> 18;
level1 = (level1*weight[i+1] + 4096 + (1<<17)) >> 18;
/* save unquantized value */
bi->save[i+0] = level0;
bi->save[i+1] = level1;
/* find max component */
if (bi->save[i+0] > max)
max = bi->save[i+0];
if (bi->save[i+1] > max)
max = bi->save[i+1];
}
/* copy DC component */
bi->mb[0] = blk[0];
/* the EOB code is 4 bits */
bi->bit_size[0] = 4;
bi->bit_size[1] = bi->bit_size[2] = bi->bit_size[3] = 0;
/* ensure that no AC coefficients are cut off */
bi->min_qlevel = ((max+256) >> 8);
bi->area_q[0] = 25; /* set to an "impossible" value */
bi->cno = 0;
}
static av_always_inline int dv_init_enc_block(EncBlockInfo* bi, uint8_t *data, int linesize,
DVVideoContext *s, int chroma)
{
LOCAL_ALIGNED_16(int16_t, blk, [64]);
bi->area_q[0] = bi->area_q[1] = bi->area_q[2] = bi->area_q[3] = 0;
bi->partial_bit_count = 0;
bi->partial_bit_buffer = 0;
bi->cur_ac = 0;
if (data) {
if (DV_PROFILE_IS_HD(s->sys)) {
s->get_pixels(blk, data, linesize << bi->dct_mode);
s->fdct[0](blk);
} else {
bi->dct_mode = dv_guess_dct_mode(s, data, linesize);
s->get_pixels(blk, data, linesize);
s->fdct[bi->dct_mode](blk);
}
} else {
/* We rely on the fact that encoding all zeros leads to an immediate EOB,
which is precisely what the spec calls for in the "dummy" blocks. */
memset(blk, 0, 64*sizeof(*blk));
bi->dct_mode = 0;
}
if (DV_PROFILE_IS_HD(s->sys)) {
const int *weights;
if (s->sys->height == 1080) {
weights = dv_weight_1080[chroma];
} else { /* 720p */
weights = dv_weight_720[chroma];
}
dv_set_class_number_hd(s, blk, bi,
ff_zigzag_direct,
weights,
dv100_min_bias+chroma*dv100_chroma_bias);
} else {
dv_set_class_number_sd(s, blk, bi,
bi->dct_mode ? ff_dv_zigzag248_direct : ff_zigzag_direct,
bi->dct_mode ? dv_weight_248 : dv_weight_88,
chroma);
}
return bi->bit_size[0] + bi->bit_size[1] + bi->bit_size[2] + bi->bit_size[3];
}
/* DV100 quantize
Perform quantization by divinding the AC component by the qstep.
As an optimization we use a fixed-point integer multiply instead
of a divide. */
static av_always_inline int dv100_quantize(int level, int qsinv)
{
/* this code is equivalent to */
/* return (level + qs/2) / qs; */
return (level * qsinv + 1024 + (1<<(dv100_qstep_bits-1))) >> dv100_qstep_bits;
/* the extra +1024 is needed to make the rounding come out right. */
/* I (DJM) have verified that the results are exactly the same as
division for level 0-2048 at all QNOs. */
}
static int dv100_actual_quantize(EncBlockInfo *b, int qlevel)
{
int prev, k, qsinv;
int qno = DV100_QLEVEL_QNO(dv100_qlevels[qlevel]);
int cno = DV100_QLEVEL_CNO(dv100_qlevels[qlevel]);
if (b->area_q[0] == qno && b->cno == cno)
return b->bit_size[0];
qsinv = dv100_qstep_inv[qno];
/* record the new qstep */
b->area_q[0] = qno;
b->cno = cno;
/* reset encoded size (EOB = 4 bits) */
b->bit_size[0] = 4;
/* visit nonzero components and quantize */
prev = 0;
for (k = 1; k < 64; k++) {
/* quantize */
int ac = dv100_quantize(b->save[k], qsinv) >> cno;
if (ac) {
if (ac > 255)
ac = 255;
b->mb[k] = ac;
b->bit_size[0] += dv_rl2vlc_size(k - prev - 1, ac);
b->next[prev] = k;
prev = k;
}
}
b->next[prev] = k;
return b->bit_size[0];
}
static inline void dv_guess_qnos_hd(EncBlockInfo *blks, int *qnos)
{
EncBlockInfo *b;
int min_qlevel[5];
int qlevels[5];
int size[5];
int i, j;
/* cache block sizes at hypothetical qlevels */
uint16_t size_cache[5*8][DV100_NUM_QLEVELS] = {{0}};
/* get minimum qlevels */
for (i = 0; i < 5; i++) {
min_qlevel[i] = 1;
for (j = 0; j < 8; j++) {
if (blks[8*i+j].min_qlevel > min_qlevel[i])
min_qlevel[i] = blks[8*i+j].min_qlevel;
}
}
/* initialize sizes */
for (i = 0; i < 5; i++) {
qlevels[i] = dv100_starting_qno;
if (qlevels[i] < min_qlevel[i])
qlevels[i] = min_qlevel[i];
qnos[i] = DV100_QLEVEL_QNO(dv100_qlevels[qlevels[i]]);
size[i] = 0;
for (j = 0; j < 8; j++) {
size_cache[8*i+j][qlevels[i]] = dv100_actual_quantize(&blks[8*i+j], qlevels[i]);
size[i] += size_cache[8*i+j][qlevels[i]];
}
}
/* must we go coarser? */
if (size[0]+size[1]+size[2]+size[3]+size[4] > vs_total_ac_bits_hd) {
int largest = size[0] % 5; /* 'random' number */
int qlevels_done = 0;
do {
/* find the macroblock with the lowest qlevel */
for (i = 0; i < 5; i++) {
if (qlevels[i] < qlevels[largest])
largest = i;
}
i = largest;
/* ensure that we don't enter infinite loop */
largest = (largest+1) % 5;
/* quantize a little bit more */
qlevels[i] += dv100_qlevel_inc;
if (qlevels[i] > DV100_NUM_QLEVELS-1) {
qlevels[i] = DV100_NUM_QLEVELS-1;
qlevels_done++;
}
qnos[i] = DV100_QLEVEL_QNO(dv100_qlevels[qlevels[i]]);
size[i] = 0;
/* for each block */
b = &blks[8*i];
for (j = 0; j < 8; j++, b++) {
/* accumulate block size into macroblock */
if(size_cache[8*i+j][qlevels[i]] == 0) {
/* it is safe to use actual_quantize() here because we only go from finer to coarser,
and it saves the final actual_quantize() down below */
size_cache[8*i+j][qlevels[i]] = dv100_actual_quantize(b, qlevels[i]);
}
size[i] += size_cache[8*i+j][qlevels[i]];
} /* for each block */
} while (vs_total_ac_bits_hd < size[0] + size[1] + size[2] + size[3] + size[4] && qlevels_done < 5);
// can we go finer?
} else if (DV100_ENABLE_FINER &&
size[0]+size[1]+size[2]+size[3]+size[4] < vs_total_ac_bits_hd) {
int save_qlevel;
int largest = size[0] % 5; /* 'random' number */
while (qlevels[0] > min_qlevel[0] ||
qlevels[1] > min_qlevel[1] ||
qlevels[2] > min_qlevel[2] ||
qlevels[3] > min_qlevel[3] ||
qlevels[4] > min_qlevel[4]) {
/* find the macroblock with the highest qlevel */
for (i = 0; i < 5; i++) {
if (qlevels[i] > min_qlevel[i] && qlevels[i] > qlevels[largest])
largest = i;
}
i = largest;
/* ensure that we don't enter infinite loop */
largest = (largest+1) % 5;
if (qlevels[i] <= min_qlevel[i]) {
/* can't unquantize any more */
continue;
}
/* quantize a little bit less */
save_qlevel = qlevels[i];
qlevels[i] -= dv100_qlevel_inc;
if (qlevels[i] < min_qlevel[i])
qlevels[i] = min_qlevel[i];
qnos[i] = DV100_QLEVEL_QNO(dv100_qlevels[qlevels[i]]);
size[i] = 0;
/* for each block */
b = &blks[8*i];
for (j = 0; j < 8; j++, b++) {
/* accumulate block size into macroblock */
if(size_cache[8*i+j][qlevels[i]] == 0) {
size_cache[8*i+j][qlevels[i]] = dv100_actual_quantize(b, qlevels[i]);
}
size[i] += size_cache[8*i+j][qlevels[i]];
} /* for each block */
/* did we bust the limit? */
if (vs_total_ac_bits_hd < size[0] + size[1] + size[2] + size[3] + size[4]) {
/* go back down and exit */
qlevels[i] = save_qlevel;
qnos[i] = DV100_QLEVEL_QNO(dv100_qlevels[qlevels[i]]);
break;
}
}
}
/* now do the actual quantization */
for (i = 0; i < 5; i++) {
/* for each block */
b = &blks[8*i];
size[i] = 0;
for (j = 0; j < 8; j++, b++) {
/* accumulate block size into macroblock */
size[i] += dv100_actual_quantize(b, qlevels[i]);
} /* for each block */
}
}
static inline void dv_guess_qnos(EncBlockInfo *blks, int *qnos)
{
int size[5];
@ -422,6 +815,26 @@ static inline void dv_guess_qnos(EncBlockInfo *blks, int *qnos)
}
}
/* update all cno values into the blocks, over-writing the old values without
touching anything else. (only used for DV100) */
static inline void dv_revise_cnos(uint8_t *dif, EncBlockInfo *blk, const AVDVProfile *profile)
{
uint8_t *data;
int mb_index, i;
for (mb_index = 0; mb_index < 5; mb_index++) {
data = dif + mb_index*80 + 4;
for (i = 0; i < profile->bpm; i++) {
/* zero out the class number */
data[1] &= 0xCF;
/* add the new one */
data[1] |= blk[profile->bpm*mb_index+i].cno << 4;
data += profile->block_sizes[i] >> 3;
}
}
}
static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
{
DVVideoContext *s = avctx->priv_data;
@ -430,26 +843,38 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
int mb_x, mb_y, c_offset;
ptrdiff_t linesize, y_stride;
uint8_t *y_ptr;
uint8_t *dif;
uint8_t *dif, *p;
LOCAL_ALIGNED_8(uint8_t, scratch, [128]);
EncBlockInfo enc_blks[5 * DV_MAX_BPM];
PutBitContext pbs[5 * DV_MAX_BPM];
PutBitContext *pb;
EncBlockInfo *enc_blk;
int vs_bit_size = 0;
int qnos[5] = { 15, 15, 15, 15, 15 }; /* No quantization */
int qnos[5];
int *qnosp = &qnos[0];
dif = &s->buf[work_chunk->buf_offset * 80];
p = dif = &s->buf[work_chunk->buf_offset * 80];
enc_blk = &enc_blks[0];
for (mb_index = 0; mb_index < 5; mb_index++) {
dv_calculate_mb_xy(s, work_chunk, mb_index, &mb_x, &mb_y);
qnos[mb_index] = DV_PROFILE_IS_HD(s->sys) ? 1 : 15;
y_ptr = s->frame->data[0] + ((mb_y * s->frame->linesize[0] + mb_x) << 3);
linesize = s->frame->linesize[0];
if (s->sys->height == 1080 && mb_y < 134)
enc_blk->dct_mode = dv_guess_dct_mode(s, y_ptr, linesize);
else
enc_blk->dct_mode = 0;
for (i = 1; i < 8; i++)
enc_blk[i].dct_mode = enc_blk->dct_mode;
/* initializing luminance blocks */
if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) ||
(s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
(s->sys->height >= 720 && mb_y != 134)) {
y_stride = s->frame->linesize[0] << 3;
y_stride = s->frame->linesize[0] << (3*!enc_blk->dct_mode);
} else {
y_stride = 16;
}
@ -478,7 +903,7 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
for (j = 2; j; j--) {
uint8_t *c_ptr = s->frame->data[j] + c_offset;
linesize = s->frame->linesize[j];
y_stride = (mb_y == 134) ? 8 : (s->frame->linesize[j] << 3);
y_stride = (mb_y == 134) ? 8 : (s->frame->linesize[j] << (3*!enc_blk->dct_mode));
if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
uint8_t *d;
uint8_t *b = scratch;
@ -506,27 +931,31 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
}
}
if (vs_total_ac_bits < vs_bit_size)
if (DV_PROFILE_IS_HD(s->sys)) {
/* unconditional */
dv_guess_qnos_hd(&enc_blks[0], qnosp);
} else if (vs_total_ac_bits < vs_bit_size) {
dv_guess_qnos(&enc_blks[0], qnosp);
}
/* DIF encoding process */
for (j = 0; j < 5 * s->sys->bpm;) {
int start_mb = j;
dif[3] = *qnosp++;
dif += 4;
p[3] = *qnosp++;
p += 4;
/* First pass over individual cells only */
for (i = 0; i < s->sys->bpm; i++, j++) {
int sz = s->sys->block_sizes[i] >> 3;
init_put_bits(&pbs[j], dif, sz);
init_put_bits(&pbs[j], p, sz);
put_sbits(&pbs[j], 9, ((enc_blks[j].mb[0] >> 3) - 1024 + 2) >> 2);
put_bits(&pbs[j], 1, enc_blks[j].dct_mode);
put_bits(&pbs[j], 1, DV_PROFILE_IS_HD(s->sys) && i ? 1 : enc_blks[j].dct_mode);
put_bits(&pbs[j], 2, enc_blks[j].cno);
dv_encode_ac(&enc_blks[j], &pbs[j], &pbs[j + 1]);
dif += sz;
p += sz;
}
/* Second pass over each MB space */
@ -559,6 +988,9 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
memset(pbs[j].buf + pos, 0xff, size - pos);
}
if (DV_PROFILE_IS_HD(s->sys))
dv_revise_cnos(dif, enc_blks, s->sys);
return 0;
}
@ -583,12 +1015,19 @@ static inline int dv_write_pack(enum dv_pack_type pack_id, DVVideoContext *c,
* 2. It is not at all clear what STYPE is used for 4:2:0 PAL
* compression scheme (if any).
*/
int apt = (c->sys->pix_fmt == AV_PIX_FMT_YUV420P ? 0 : 1);
int fs = c->frame->top_field_first ? 0x00 : 0x40;
uint8_t aspect = 0;
if ((int) (av_q2d(c->avctx->sample_aspect_ratio) *
c->avctx->width / c->avctx->height * 10) >= 17) /* 16:9 */
int apt = (c->sys->pix_fmt == AV_PIX_FMT_YUV420P ? 0 : 1);
int fs;
if (c->avctx->height >= 720)
fs = c->avctx->height == 720 || c->frame->top_field_first ? 0x40 : 0x00;
else
fs = c->frame->top_field_first ? 0x00 : 0x40;
if (DV_PROFILE_IS_HD(c->sys) ||
(int)(av_q2d(c->avctx->sample_aspect_ratio) *
c->avctx->width / c->avctx->height * 10) >= 17)
/* HD formats are always 16:9 */
aspect = 0x02;
buf[0] = (uint8_t) pack_id;
@ -643,10 +1082,14 @@ static inline int dv_write_dif_id(enum dv_section_type t, uint8_t chan_num,
uint8_t seq_num, uint8_t dif_num,
uint8_t *buf)
{
int fsc = chan_num & 1;
int fsp = 1 - (chan_num >> 1);
buf[0] = (uint8_t) t; /* Section type */
buf[1] = (seq_num << 4) | /* DIF seq number 0-9 for 525/60; 0-11 for 625/50 */
(chan_num << 3) | /* FSC: for 50Mb/s 0 - first channel; 1 - second */
7; /* reserved -- always 1 */
(fsc << 3) | /* FSC: for 50 and 100Mb/s 0 - first channel; 1 - second */
(fsp << 2) | /* FSP: for 100Mb/s 1 - channels 0-1; 0 - channels 2-3 */
3; /* reserved -- always 1 */
buf[2] = dif_num; /* DIF block number Video: 0-134, Audio: 0-8 */
return 3;
}
@ -674,20 +1117,22 @@ static inline int dv_write_ssyb_id(uint8_t syb_num, uint8_t fr, uint8_t *buf)
static void dv_format_frame(DVVideoContext *c, uint8_t *buf)
{
int chan, i, j, k;
/* We work with 720p frames split in half. The odd half-frame is chan 2,3 */
int chan_offset = 2*(c->sys->height == 720 && c->avctx->frame_number & 1);
for (chan = 0; chan < c->sys->n_difchan; chan++) {
for (i = 0; i < c->sys->difseg_size; i++) {
memset(buf, 0xff, 80 * 6); /* first 6 DIF blocks are for control data */
/* DV header: 1DIF */
buf += dv_write_dif_id(dv_sect_header, chan, i, 0, buf);
buf += dv_write_dif_id(dv_sect_header, chan+chan_offset, i, 0, buf);
buf += dv_write_pack((c->sys->dsf ? dv_header625 : dv_header525),
c, buf);
buf += 72; /* unused bytes */
/* DV subcode: 2DIFs */
for (j = 0; j < 2; j++) {
buf += dv_write_dif_id(dv_sect_subcode, chan, i, j, buf);
buf += dv_write_dif_id(dv_sect_subcode, chan+chan_offset, i, j, buf);
for (k = 0; k < 6; k++)
buf += dv_write_ssyb_id(k, (i < c->sys->difseg_size / 2), buf) + 5;
buf += 29; /* unused bytes */
@ -695,7 +1140,7 @@ static void dv_format_frame(DVVideoContext *c, uint8_t *buf)
/* DV VAUX: 3DIFS */
for (j = 0; j < 3; j++) {
buf += dv_write_dif_id(dv_sect_vaux, chan, i, j, buf);
buf += dv_write_dif_id(dv_sect_vaux, chan+chan_offset, i, j, buf);
buf += dv_write_pack(dv_video_source, c, buf);
buf += dv_write_pack(dv_video_control, c, buf);
buf += 7 * 5;
@ -708,10 +1153,10 @@ static void dv_format_frame(DVVideoContext *c, uint8_t *buf)
for (j = 0; j < 135; j++) {
if (j % 15 == 0) {
memset(buf, 0xff, 80);
buf += dv_write_dif_id(dv_sect_audio, chan, i, j / 15, buf);
buf += dv_write_dif_id(dv_sect_audio, chan+chan_offset, i, j/15, buf);
buf += 77; /* audio control & shuffled PCM audio */
}
buf += dv_write_dif_id(dv_sect_video, chan, i, j, buf);
buf += dv_write_dif_id(dv_sect_video, chan+chan_offset, i, j, buf);
buf += 77; /* 1 video macroblock: 1 bytes control
* 4 * 14 bytes Y 8x8 data
* 10 bytes Cr 8x8 data
@ -738,15 +1183,15 @@ FF_DISABLE_DEPRECATION_WARNINGS
c->coded_frame->pict_type = AV_PICTURE_TYPE_I;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
s->buf = pkt->data;
dv_format_frame(s, pkt->data);
c->execute(c, dv_encode_video_segment, s->work_chunks, NULL,
dv_work_pool_size(s->sys), sizeof(DVwork_chunk));
emms_c();
dv_format_frame(s, pkt->data);
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;

View File

@ -428,9 +428,15 @@ int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *
return AVERROR(EINVAL);
if (avctx->codec->receive_packet) {
int ret;
if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
return AVERROR_EOF;
return avctx->codec->receive_packet(avctx, avpkt);
ret = avctx->codec->receive_packet(avctx, avpkt);
if (!ret)
// Encoders must always return ref-counted buffers.
// Side-data only packets have no data and can be not ref-counted.
av_assert0(!avpkt->data || avpkt->buf);
return ret;
}
// Emulation via old API.

View File

@ -27,6 +27,7 @@
#include "av1.h"
#include "av1_parse.h"
#include "bsf.h"
#include "bytestream.h"
#include "h2645_parse.h"
#include "h264.h"
#include "hevc.h"
@ -85,8 +86,9 @@ static int extract_extradata_av1(AVBSFContext *ctx, AVPacket *pkt,
}
if (extradata_size && has_seq) {
AVBufferRef *filtered_buf;
uint8_t *extradata, *filtered_data;
AVBufferRef *filtered_buf = NULL;
PutByteContext pb_filtered_data, pb_extradata;
uint8_t *extradata;
if (s->remove) {
filtered_buf = av_buffer_alloc(filtered_size + AV_INPUT_BUFFER_PADDING_SIZE);
@ -94,8 +96,6 @@ static int extract_extradata_av1(AVBSFContext *ctx, AVPacket *pkt,
return AVERROR(ENOMEM);
}
memset(filtered_buf->data + filtered_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
filtered_data = filtered_buf->data;
}
extradata = av_malloc(extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
@ -108,15 +108,17 @@ static int extract_extradata_av1(AVBSFContext *ctx, AVPacket *pkt,
*data = extradata;
*size = extradata_size;
bytestream2_init_writer(&pb_extradata, extradata, extradata_size);
if (s->remove)
bytestream2_init_writer(&pb_filtered_data, filtered_buf->data, filtered_size);
for (i = 0; i < s->av1_pkt.nb_obus; i++) {
AV1OBU *obu = &s->av1_pkt.obus[i];
if (val_in_array(extradata_obu_types, nb_extradata_obu_types,
obu->type)) {
memcpy(extradata, obu->raw_data, obu->raw_size);
extradata += obu->raw_size;
bytestream2_put_bufferu(&pb_extradata, obu->raw_data, obu->raw_size);
} else if (s->remove) {
memcpy(filtered_data, obu->raw_data, obu->raw_size);
filtered_data += obu->raw_size;
bytestream2_put_bufferu(&pb_filtered_data, obu->raw_data, obu->raw_size);
}
}
@ -179,8 +181,9 @@ static int extract_extradata_h2645(AVBSFContext *ctx, AVPacket *pkt,
if (extradata_size &&
((ctx->par_in->codec_id == AV_CODEC_ID_HEVC && has_sps && has_vps) ||
(ctx->par_in->codec_id == AV_CODEC_ID_H264 && has_sps))) {
AVBufferRef *filtered_buf;
uint8_t *extradata, *filtered_data;
AVBufferRef *filtered_buf = NULL;
PutByteContext pb_filtered_data, pb_extradata;
uint8_t *extradata;
if (s->remove) {
filtered_buf = av_buffer_alloc(filtered_size + AV_INPUT_BUFFER_PADDING_SIZE);
@ -188,8 +191,6 @@ static int extract_extradata_h2645(AVBSFContext *ctx, AVPacket *pkt,
return AVERROR(ENOMEM);
}
memset(filtered_buf->data + filtered_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
filtered_data = filtered_buf->data;
}
extradata = av_malloc(extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
@ -202,17 +203,19 @@ static int extract_extradata_h2645(AVBSFContext *ctx, AVPacket *pkt,
*data = extradata;
*size = extradata_size;
bytestream2_init_writer(&pb_extradata, extradata, extradata_size);
if (s->remove)
bytestream2_init_writer(&pb_filtered_data, filtered_buf->data, filtered_size);
for (i = 0; i < s->h2645_pkt.nb_nals; i++) {
H2645NAL *nal = &s->h2645_pkt.nals[i];
if (val_in_array(extradata_nal_types, nb_extradata_nal_types,
nal->type)) {
AV_WB24(extradata, 1); // startcode
memcpy(extradata + 3, nal->raw_data, nal->raw_size);
extradata += 3 + nal->raw_size;
bytestream2_put_be24u(&pb_extradata, 1); //startcode
bytestream2_put_bufferu(&pb_extradata, nal->raw_data, nal->raw_size);
} else if (s->remove) {
AV_WB24(filtered_data, 1); // startcode
memcpy(filtered_data + 3, nal->raw_data, nal->raw_size);
filtered_data += 3 + nal->raw_size;
bytestream2_put_be24u(&pb_filtered_data, 1); // startcode
bytestream2_put_bufferu(&pb_filtered_data, nal->raw_data, nal->raw_size);
}
}

View File

@ -217,8 +217,8 @@ static void wavesynth_seek(struct wavesynth_context *ws, int64_t ts)
*last = -1;
lcg_seek(&ws->dither_state, (uint32_t)ts - (uint32_t)ws->cur_ts);
if (ws->pink_need) {
int64_t pink_ts_cur = (ws->cur_ts + PINK_UNIT - 1) & ~(PINK_UNIT - 1);
int64_t pink_ts_next = ts & ~(PINK_UNIT - 1);
uint64_t pink_ts_cur = (ws->cur_ts + PINK_UNIT - 1) & ~(PINK_UNIT - 1);
uint64_t pink_ts_next = ts & ~(PINK_UNIT - 1);
int pos = ts & (PINK_UNIT - 1);
lcg_seek(&ws->pink_state, (uint32_t)(pink_ts_next - pink_ts_cur) * 2);
if (pos) {

View File

@ -279,7 +279,7 @@ static int fits_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
for (j = 0; j < avctx->width; j++) { \
t = rd; \
if (!header.blank_found || t != header.blank) { \
*dst++ = ((t - header.data_min) * ((1 << (sizeof(type) * 8)) - 1)) * scale; \
*dst++ = lrint(((t - header.data_min) * ((1 << (sizeof(type) * 8)) - 1)) * scale); \
} else { \
*dst++ = fitsctx->blank_val; \
} \

View File

@ -97,6 +97,7 @@ typedef struct {
uint8_t gc_2nd_index_bits; ///< gain codebook (second stage) index (size in bits)
uint8_t fc_signs_bits; ///< number of pulses in fixed-codebook vector
uint8_t fc_indexes_bits; ///< size (in bits) of fixed-codebook index entry
uint8_t block_size;
} G729FormatDescription;
typedef struct {
@ -165,6 +166,7 @@ static const G729FormatDescription format_g729_8k = {
.gc_2nd_index_bits = GC_2ND_IDX_BITS_8K,
.fc_signs_bits = 4,
.fc_indexes_bits = 13,
.block_size = G729_8K_BLOCK_SIZE,
};
static const G729FormatDescription format_g729d_6k4 = {
@ -174,6 +176,7 @@ static const G729FormatDescription format_g729d_6k4 = {
.gc_2nd_index_bits = GC_2ND_IDX_BITS_6K4,
.fc_signs_bits = 2,
.fc_indexes_bits = 9,
.block_size = G729D_6K4_BLOCK_SIZE,
};
/**
@ -332,11 +335,14 @@ static int16_t g729d_voice_decision(int onset, int prev_voice_decision, const in
static int32_t scalarproduct_int16_c(const int16_t * v1, const int16_t * v2, int order)
{
int res = 0;
int64_t res = 0;
while (order--)
res += *v1++ * *v2++;
if (res > INT32_MAX) return INT32_MAX;
else if (res < INT32_MIN) return INT32_MIN;
return res;
}
@ -424,14 +430,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
if (buf_size % ((G729_8K_BLOCK_SIZE + (avctx->codec_id == AV_CODEC_ID_ACELP_KELVIN)) * avctx->channels) == 0) {
if (buf_size && buf_size % ((G729_8K_BLOCK_SIZE + (avctx->codec_id == AV_CODEC_ID_ACELP_KELVIN)) * avctx->channels) == 0) {
packet_type = FORMAT_G729_8K;
format = &format_g729_8k;
//Reset voice decision
ctx->onset = 0;
ctx->voice_decision = DECISION_VOICE;
av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729 @ 8kbit/s");
} else if (buf_size == G729D_6K4_BLOCK_SIZE * avctx->channels) {
} else if (buf_size == G729D_6K4_BLOCK_SIZE * avctx->channels && avctx->codec_id != AV_CODEC_ID_ACELP_KELVIN) {
packet_type = FORMAT_G729D_6K4;
format = &format_g729d_6k4;
av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729D @ 6.4kbit/s");
@ -451,11 +457,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
buf++;
}
for (i = 0; i < buf_size; i++)
for (i = 0; i < format->block_size; i++)
frame_erasure |= buf[i];
frame_erasure = !frame_erasure;
init_get_bits(&gb, buf, 8*buf_size);
init_get_bits8(&gb, buf, format->block_size);
ma_predictor = get_bits(&gb, 1);
quantizer_1st = get_bits(&gb, VQ_1ST_BITS);
@ -728,12 +734,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
/* Save signal for use in next frame. */
memmove(ctx->exc_base, ctx->exc_base + 2 * SUBFRAME_SIZE, (PITCH_DELAY_MAX+INTERPOL_LEN)*sizeof(int16_t));
buf += packet_type == FORMAT_G729_8K ? G729_8K_BLOCK_SIZE : G729D_6K4_BLOCK_SIZE;
buf += format->block_size;
ctx++;
}
*got_frame_ptr = 1;
return packet_type == FORMAT_G729_8K ? (G729_8K_BLOCK_SIZE + (avctx->codec_id == AV_CODEC_ID_ACELP_KELVIN)) * avctx->channels : G729D_6K4_BLOCK_SIZE * avctx->channels;
return (format->block_size + (avctx->codec_id == AV_CODEC_ID_ACELP_KELVIN)) * avctx->channels;
}
static av_cold int decode_close(AVCodecContext *avctx)

View File

@ -456,11 +456,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
*/
static void decodeplane8(uint8_t *dst, const uint8_t *buf, int buf_size, int plane)
{
const uint64_t *lut = plane8_lut[plane];
const uint64_t *lut;
if (plane >= 8) {
av_log(NULL, AV_LOG_WARNING, "Ignoring extra planes beyond 8\n");
return;
}
lut = plane8_lut[plane];
do {
uint64_t v = AV_RN64A(dst) | lut[*buf++];
AV_WN64A(dst, v);

View File

@ -40,6 +40,8 @@ typedef struct Libdav1dContext {
int tile_threads;
int frame_threads;
int apply_grain;
int operating_point;
int all_layers;
} Libdav1dContext;
static const enum AVPixelFormat pix_fmt[][3] = {
@ -134,6 +136,10 @@ static av_cold int libdav1d_init(AVCodecContext *c)
if (dav1d->apply_grain >= 0)
s.apply_grain = dav1d->apply_grain;
s.all_layers = dav1d->all_layers;
if (dav1d->operating_point >= 0)
s.operating_point = dav1d->operating_point;
s.n_tile_threads = dav1d->tile_threads
? dav1d->tile_threads
: FFMIN(floor(sqrt(threads)), DAV1D_MAX_TILE_THREADS);
@ -378,6 +384,8 @@ static const AVOption libdav1d_options[] = {
{ "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_TILE_THREADS, VD },
{ "framethreads", "Frame threads", OFFSET(frame_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_THREADS, VD },
{ "filmgrain", "Apply Film Grain", OFFSET(apply_grain), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD },
{ "oppoint", "Select an operating point of the scalable bitstream", OFFSET(operating_point), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 31, VD },
{ "alllayers", "Output all spatial layers", OFFSET(all_layers), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
{ NULL }
};

View File

@ -29,6 +29,7 @@
#include "audio_frame_queue.h"
#include "internal.h"
#if CONFIG_LIBOPENCORE_AMRNB_DECODER || CONFIG_LIBOPENCORE_AMRWB_DECODER
static int amr_decode_fix_avctx(AVCodecContext *avctx)
{
const int is_amr_wb = 1 + (avctx->codec_id == AV_CODEC_ID_AMR_WB);
@ -46,6 +47,7 @@ static int amr_decode_fix_avctx(AVCodecContext *avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
return 0;
}
#endif
#if CONFIG_LIBOPENCORE_AMRNB

View File

@ -533,7 +533,7 @@ retry:
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "qp", "use constant quantizer mode", OFFSET(quantizer), AV_OPT_TYPE_INT, { .i64 = 100 }, -1, 255, VE },
{ "qp", "use constant quantizer mode", OFFSET(quantizer), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 255, VE },
{ "speed", "what speed preset to use", OFFSET(speed), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 10, VE },
{ "tiles", "number of tiles encode with", OFFSET(tiles), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
{ "tile-rows", "number of tiles rows to encode with", OFFSET(tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },

View File

@ -59,7 +59,7 @@ typedef struct XAVS2EContext {
static av_cold int xavs2_init(AVCodecContext *avctx)
{
XAVS2EContext *cae= avctx->priv_data;
XAVS2EContext *cae = avctx->priv_data;
int bit_depth, code;
bit_depth = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 8 : 10;
@ -67,13 +67,13 @@ static av_cold int xavs2_init(AVCodecContext *avctx)
/* get API handler */
cae->api = xavs2_api_get(bit_depth);
if (!cae->api) {
av_log(avctx, AV_LOG_ERROR, "api get failed\n");
av_log(avctx, AV_LOG_ERROR, "Failed to get xavs2 api context\n");
return AVERROR_EXTERNAL;
}
cae->param = cae->api->opt_alloc();
if (!cae->param) {
av_log(avctx, AV_LOG_ERROR, "param alloc failed\n");
av_log(avctx, AV_LOG_ERROR, "Failed to alloc xavs2 parameters\n");
return AVERROR(ENOMEM);
}
@ -115,15 +115,13 @@ static av_cold int xavs2_init(AVCodecContext *avctx)
xavs2_opt_set2("InitialQP", "%d", cae->qp);
}
ff_mpeg12_find_best_frame_rate(avctx->framerate, &code, NULL, NULL, 0);
xavs2_opt_set2("FrameRate", "%d", code);
cae->encoder = cae->api->encoder_create(cae->param);
if (!cae->encoder) {
av_log(avctx,AV_LOG_ERROR, "Can not create encoder. Null pointer returned\n");
av_log(avctx, AV_LOG_ERROR, "Failed to create xavs2 encoder instance.\n");
return AVERROR(EINVAL);
}
@ -132,29 +130,42 @@ static av_cold int xavs2_init(AVCodecContext *avctx)
static void xavs2_copy_frame_with_shift(xavs2_picture_t *pic, const AVFrame *frame, const int shift_in)
{
int j, k;
for (k = 0; k < 3; k++) {
int i_stride = pic->img.i_stride[k];
for (j = 0; j < pic->img.i_lines[k]; j++) {
uint16_t *p_plane = (uint16_t *)&pic->img.img_planes[k][j * i_stride];
int i;
uint8_t *p_buffer = frame->data[k] + frame->linesize[k] * j;
memset(p_plane, 0, i_stride);
for (i = 0; i < pic->img.i_width[k]; i++) {
p_plane[i] = p_buffer[i] << shift_in;
uint16_t *p_plane;
uint8_t *p_buffer;
int plane;
int hIdx;
int wIdx;
for (plane = 0; plane < 3; plane++) {
p_plane = (uint16_t *)pic->img.img_planes[plane];
p_buffer = frame->data[plane];
for (hIdx = 0; hIdx < pic->img.i_lines[plane]; hIdx++) {
memset(p_plane, 0, pic->img.i_stride[plane]);
for (wIdx = 0; wIdx < pic->img.i_width[plane]; wIdx++) {
p_plane[wIdx] = p_buffer[wIdx] << shift_in;
}
p_plane += pic->img.i_stride[plane];
p_buffer += frame->linesize[plane];
}
}
}
static void xavs2_copy_frame(xavs2_picture_t *pic, const AVFrame *frame)
{
int j, k;
for (k = 0; k < 3; k++) {
for (j = 0; j < pic->img.i_lines[k]; j++) {
memcpy( pic->img.img_planes[k] + pic->img.i_stride[k] * j,
frame->data[k]+frame->linesize[k] * j,
pic->img.i_width[k] * pic->img.in_sample_size);
uint8_t *p_plane;
uint8_t *p_buffer;
int plane;
int hIdx;
int stride;
for (plane = 0; plane < 3; plane++) {
p_plane = pic->img.img_planes[plane];
p_buffer = frame->data[plane];
stride = pic->img.i_width[plane] * pic->img.in_sample_size;
for (hIdx = 0; hIdx < pic->img.i_lines[plane]; hIdx++) {
memcpy(p_plane, p_buffer, stride);
p_plane += pic->img.i_stride[plane];
p_buffer += frame->linesize[plane];
}
}
}
@ -169,7 +180,7 @@ static int xavs2_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
/* create the XAVS2 video encoder */
/* read frame data and send to the XAVS2 video encoder */
if (cae->api->encoder_get_buffer(cae->encoder, &pic) < 0) {
av_log(avctx,AV_LOG_ERROR, "failed to get frame buffer\n");
av_log(avctx, AV_LOG_ERROR, "Failed to get xavs2 frame buffer\n");
return AVERROR_EXTERNAL;
}
if (frame) {
@ -200,7 +211,7 @@ static int xavs2_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
ret = cae->api->encoder_encode(cae->encoder, &pic, &cae->packet);
if (ret) {
av_log(avctx, AV_LOG_ERROR, "encode failed\n");
av_log(avctx, AV_LOG_ERROR, "Encoding error occured.\n");
return AVERROR_EXTERNAL;
}
@ -208,10 +219,9 @@ static int xavs2_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
cae->api->encoder_encode(cae->encoder, NULL, &cae->packet);
}
if ((cae->packet.len) && (cae->packet.state != XAVS2_STATE_FLUSH_END)){
if (av_new_packet(pkt, cae->packet.len) < 0){
av_log(avctx, AV_LOG_ERROR, "packet alloc failed\n");
if ((cae->packet.len) && (cae->packet.state != XAVS2_STATE_FLUSH_END)) {
if (av_new_packet(pkt, cae->packet.len) < 0) {
av_log(avctx, AV_LOG_ERROR, "Failed to alloc xavs2 packet.\n");
cae->api->encoder_packet_unref(cae->encoder, &cae->packet);
return AVERROR(ENOMEM);
}

283
libavcodec/midivid.c Normal file
View File

@ -0,0 +1,283 @@
/*
* MidiVid decoder
* Copyright (c) 2019 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mem.h"
#define BITSTREAM_READER_LE
#include "avcodec.h"
#include "get_bits.h"
#include "bytestream.h"
#include "internal.h"
typedef struct MidiVidContext {
GetByteContext gb;
uint8_t *uncompressed;
unsigned int uncompressed_size;
uint8_t *skip;
AVFrame *frame;
} MidiVidContext;
static int decode_mvdv(MidiVidContext *s, AVCodecContext *avctx, AVFrame *frame)
{
GetByteContext *gb = &s->gb;
GetBitContext mask;
GetByteContext idx9;
uint16_t nb_vectors, intra_flag;
const uint8_t *vec;
const uint8_t *mask_start;
uint8_t *skip;
uint32_t mask_size;
int idx9bits = 0;
int idx9val = 0;
uint32_t nb_blocks;
nb_vectors = bytestream2_get_le16(gb);
intra_flag = bytestream2_get_le16(gb);
if (intra_flag) {
nb_blocks = (avctx->width / 2) * (avctx->height / 2);
} else {
int skip_linesize;
nb_blocks = bytestream2_get_le32(gb);
skip_linesize = avctx->width >> 1;
mask_start = gb->buffer_start + bytestream2_tell(gb);
mask_size = (avctx->width >> 5) * (avctx->height >> 2);
if (bytestream2_get_bytes_left(gb) < mask_size)
return AVERROR_INVALIDDATA;
init_get_bits8(&mask, mask_start, mask_size);
bytestream2_skip(gb, mask_size);
skip = s->skip;
for (int y = 0; y < avctx->height >> 2; y++) {
for (int x = 0; x < avctx->width >> 2; x++) {
int flag = !get_bits1(&mask);
skip[(y*2) *skip_linesize + x*2 ] = flag;
skip[(y*2) *skip_linesize + x*2+1] = flag;
skip[(y*2+1)*skip_linesize + x*2 ] = flag;
skip[(y*2+1)*skip_linesize + x*2+1] = flag;
}
}
}
vec = gb->buffer_start + bytestream2_tell(gb);
if (bytestream2_get_bytes_left(gb) < nb_vectors * 12)
return AVERROR_INVALIDDATA;
bytestream2_skip(gb, nb_vectors * 12);
if (nb_vectors > 256) {
if (bytestream2_get_bytes_left(gb) < (nb_blocks + 7) / 8)
return AVERROR_INVALIDDATA;
bytestream2_init(&idx9, gb->buffer_start + bytestream2_tell(gb), (nb_blocks + 7) / 8);
bytestream2_skip(gb, (nb_blocks + 7) / 8);
}
skip = s->skip;
for (int y = avctx->height - 2; y >= 0; y -= 2) {
uint8_t *dsty = frame->data[0] + y * frame->linesize[0];
uint8_t *dstu = frame->data[1] + y * frame->linesize[1];
uint8_t *dstv = frame->data[2] + y * frame->linesize[2];
for (int x = 0; x < avctx->width; x += 2) {
int idx;
if (!intra_flag && *skip++)
continue;
if (bytestream2_get_bytes_left(gb) <= 0)
return AVERROR_INVALIDDATA;
if (nb_vectors <= 256) {
idx = bytestream2_get_byte(gb);
} else {
if (idx9bits == 0) {
idx9val = bytestream2_get_byte(&idx9);
idx9bits = 8;
}
idx9bits--;
idx = bytestream2_get_byte(gb) | (((idx9val >> (7 - idx9bits)) & 1) << 8);
}
dsty[x +frame->linesize[0]] = vec[idx * 12 + 0];
dsty[x+1+frame->linesize[0]] = vec[idx * 12 + 3];
dsty[x] = vec[idx * 12 + 6];
dsty[x+1] = vec[idx * 12 + 9];
dstu[x +frame->linesize[1]] = vec[idx * 12 + 1];
dstu[x+1+frame->linesize[1]] = vec[idx * 12 + 4];
dstu[x] = vec[idx * 12 + 7];
dstu[x+1] = vec[idx * 12 +10];
dstv[x +frame->linesize[2]] = vec[idx * 12 + 2];
dstv[x+1+frame->linesize[2]] = vec[idx * 12 + 5];
dstv[x] = vec[idx * 12 + 8];
dstv[x+1] = vec[idx * 12 +11];
}
}
return intra_flag;
}
static ptrdiff_t lzss_uncompress(MidiVidContext *s, GetByteContext *gb, uint8_t *dst, unsigned int size)
{
uint8_t *dst_start = dst;
uint8_t *dst_end = dst + size;
for (;bytestream2_get_bytes_left(gb) >= 3;) {
int op = bytestream2_get_le16(gb);
for (int i = 0; i < 16; i++) {
if (op & 1) {
int s0 = bytestream2_get_byte(gb);
int s1 = bytestream2_get_byte(gb);
int offset = ((s0 & 0xF0) << 4) | s1;
int length = (s0 & 0xF) + 3;
if (dst + length > dst_end ||
dst - offset < dst_start)
return AVERROR_INVALIDDATA;
if (offset > 0) {
for (int j = 0; j < length; j++) {
dst[j] = dst[j - offset];
}
}
dst += length;
} else {
if (dst >= dst_end)
return AVERROR_INVALIDDATA;
*dst++ = bytestream2_get_byte(gb);
}
op >>= 1;
}
}
return dst - dst_start;
}
static int decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
MidiVidContext *s = avctx->priv_data;
GetByteContext *gb = &s->gb;
AVFrame *frame = s->frame;
int ret, key, uncompressed;
if (avpkt->size <= 13)
return AVERROR_INVALIDDATA;
bytestream2_init(gb, avpkt->data, avpkt->size);
bytestream2_skip(gb, 8);
uncompressed = bytestream2_get_le32(gb);
if ((ret = ff_reget_buffer(avctx, s->frame, 0)) < 0)
return ret;
if (uncompressed) {
ret = decode_mvdv(s, avctx, frame);
} else {
av_fast_padded_malloc(&s->uncompressed, &s->uncompressed_size, 16LL * (avpkt->size - 12));
if (!s->uncompressed)
return AVERROR(ENOMEM);
ret = lzss_uncompress(s, gb, s->uncompressed, s->uncompressed_size);
if (ret < 0)
return ret;
bytestream2_init(gb, s->uncompressed, ret);
ret = decode_mvdv(s, avctx, frame);
}
if (ret < 0)
return ret;
key = ret;
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
frame->pict_type = key ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
frame->key_frame = key;
*got_frame = 1;
return avpkt->size;
}
static av_cold int decode_init(AVCodecContext *avctx)
{
MidiVidContext *s = avctx->priv_data;
int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n",
avctx->width, avctx->height);
return ret;
}
avctx->pix_fmt = AV_PIX_FMT_YUV444P;
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
s->skip = av_calloc(avctx->width >> 1, avctx->height >> 1);
if (!s->skip)
return AVERROR(ENOMEM);
return 0;
}
static void decode_flush(AVCodecContext *avctx)
{
MidiVidContext *s = avctx->priv_data;
av_frame_unref(s->frame);
}
static av_cold int decode_close(AVCodecContext *avctx)
{
MidiVidContext *s = avctx->priv_data;
av_frame_free(&s->frame);
av_freep(&s->uncompressed);
av_freep(&s->skip);
return 0;
}
AVCodec ff_mvdv_decoder = {
.name = "mvdv",
.long_name = NULL_IF_CONFIG_SMALL("MidiVid VQ"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MVDV,
.priv_data_size = sizeof(MidiVidContext),
.init = decode_init,
.decode = decode_frame,
.flush = decode_flush,
.close = decode_close,
.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
};

View File

@ -50,7 +50,7 @@ static int find_frame_end(MJPEGParserContext *m, const uint8_t *buf, int buf_siz
for(i=0; i<buf_size;){
state= (state<<8) | buf[i];
if(state>=0xFFC00000 && state<=0xFFFEFFFF){
if(state>=0xFFD80000 && state<=0xFFD8FFFF){
if(state>=0xFFD8FFC0 && state<=0xFFD8FFFF){
i++;
vop_found=1;
break;
@ -76,12 +76,14 @@ static int find_frame_end(MJPEGParserContext *m, const uint8_t *buf, int buf_siz
for(; i<buf_size;){
state= (state<<8) | buf[i];
if(state>=0xFFC00000 && state<=0xFFFEFFFF){
if(state>=0xFFD80000 && state<=0xFFD8FFFF){
if(state>=0xFFD8FFC0 && state<=0xFFD8FFFF){
pc->frame_start_found=0;
pc->state=0;
return i-3;
} else if(state<0xFFD00000 || state>0xFFD9FFFF){
m->size= (state&0xFFFF)-1;
if (m->size >= 0x8000)
m->size = 0;
}
}
if(m->size>0){

View File

@ -56,6 +56,7 @@ static int mjpegb_decode_frame(AVCodecContext *avctx,
buf_ptr = buf;
buf_end = buf + buf_size;
s->got_picture = 0;
s->adobe_transform = -1;
read_header:
/* reset on every SOI */

View File

@ -154,7 +154,7 @@ static void mpegvideo_extract_headers(AVCodecParserContext *s,
break;
}
}
the_end: ;
the_end:
if (set_dim_ret < 0)
av_log(avctx, AV_LOG_ERROR, "Failed to set dimensions\n");

315
libavcodec/mvha.c Normal file
View File

@ -0,0 +1,315 @@
/*
* MidiVid Archive codec
*
* Copyright (c) 2019 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define CACHED_BITSTREAM_READER !ARCH_X86_32
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
#include "get_bits.h"
#include "internal.h"
#include "lossless_videodsp.h"
#include <zlib.h>
typedef struct MVHAContext {
GetBitContext gb;
int nb_symbols;
uint8_t symb[256];
uint32_t prob[256];
VLC vlc;
z_stream zstream;
LLVidDSPContext llviddsp;
} MVHAContext;
typedef struct Node {
int16_t sym;
int16_t n0;
int16_t l, r;
uint32_t count;
} Node;
static void get_tree_codes(uint32_t *bits, int16_t *lens, uint8_t *xlat,
Node *nodes, int node,
uint32_t pfx, int pl, int *pos)
{
int s;
s = nodes[node].sym;
if (s != -1) {
bits[*pos] = (~pfx) & ((1ULL << FFMAX(pl, 1)) - 1);
lens[*pos] = FFMAX(pl, 1);
xlat[*pos] = s + (pl == 0);
(*pos)++;
} else {
pfx <<= 1;
pl++;
get_tree_codes(bits, lens, xlat, nodes, nodes[node].l, pfx, pl,
pos);
pfx |= 1;
get_tree_codes(bits, lens, xlat, nodes, nodes[node].r, pfx, pl,
pos);
}
}
static int build_vlc(AVCodecContext *avctx, VLC *vlc)
{
MVHAContext *s = avctx->priv_data;
Node nodes[512];
uint32_t bits[256];
int16_t lens[256];
uint8_t xlat[256];
int cur_node, i, j, pos = 0;
ff_free_vlc(vlc);
for (i = 0; i < s->nb_symbols; i++) {
nodes[i].count = s->prob[i];
nodes[i].sym = s->symb[i];
nodes[i].n0 = -2;
nodes[i].l = i;
nodes[i].r = i;
}
cur_node = s->nb_symbols;
j = 0;
do {
for (i = 0; ; i++) {
int new_node = j;
int first_node = cur_node;
int second_node = cur_node;
unsigned nd, st;
nodes[cur_node].count = -1;
do {
int val = nodes[new_node].count;
if (val && (val < nodes[first_node].count)) {
if (val >= nodes[second_node].count) {
first_node = new_node;
} else {
first_node = second_node;
second_node = new_node;
}
}
new_node += 1;
} while (new_node != cur_node);
if (first_node == cur_node)
break;
nd = nodes[second_node].count;
st = nodes[first_node].count;
nodes[second_node].count = 0;
nodes[first_node].count = 0;
if (nd >= UINT32_MAX - st) {
av_log(avctx, AV_LOG_ERROR, "count overflow\n");
return AVERROR_INVALIDDATA;
}
nodes[cur_node].count = nd + st;
nodes[cur_node].sym = -1;
nodes[cur_node].n0 = cur_node;
nodes[cur_node].l = first_node;
nodes[cur_node].r = second_node;
cur_node++;
}
j++;
} while (cur_node - s->nb_symbols == j);
get_tree_codes(bits, lens, xlat, nodes, cur_node - 1, 0, 0, &pos);
return ff_init_vlc_sparse(vlc, 12, pos, lens, 2, 2, bits, 4, 4, xlat, 1, 1, 0);
}
static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
MVHAContext *s = avctx->priv_data;
AVFrame *frame = data;
uint32_t type, size;
int ret;
if (avpkt->size <= 8)
return AVERROR_INVALIDDATA;
type = AV_RB32(avpkt->data);
size = AV_RL32(avpkt->data + 4);
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
if (type == MKTAG('L','Z','Y','V')) {
ret = inflateReset(&s->zstream);
if (ret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", ret);
return AVERROR_EXTERNAL;
}
s->zstream.next_in = avpkt->data + 8;
s->zstream.avail_in = avpkt->size - 8;
for (int p = 0; p < 3; p++) {
for (int y = 0; y < avctx->height; y++) {
s->zstream.next_out = frame->data[p] + (avctx->height - y - 1) * frame->linesize[p];
s->zstream.avail_out = avctx->width >> (p > 0);
ret = inflate(&s->zstream, Z_SYNC_FLUSH);
if (ret != Z_OK && ret != Z_STREAM_END) {
av_log(avctx, AV_LOG_ERROR, "Inflate error: %d\n", ret);
return AVERROR_EXTERNAL;
}
}
}
} else if (type == MKTAG('H','U','F','Y')) {
GetBitContext *gb = &s->gb;
int first_symbol, symbol;
ret = init_get_bits8(gb, avpkt->data + 8, avpkt->size - 8);
if (ret < 0)
return ret;
skip_bits(gb, 24);
first_symbol = get_bits(gb, 8);
s->nb_symbols = get_bits(gb, 8) + 1;
symbol = first_symbol;
for (int i = 0; i < s->nb_symbols; symbol++) {
int prob;
if (get_bits_left(gb) < 4)
return AVERROR_INVALIDDATA;
if (get_bits1(gb)) {
prob = get_bits(gb, 12);
} else {
prob = get_bits(gb, 3);
}
if (prob) {
s->symb[i] = symbol;
s->prob[i] = prob;
i++;
}
}
ret = build_vlc(avctx, &s->vlc);
if (ret < 0)
return ret;
for (int p = 0; p < 3; p++) {
int width = avctx->width >> (p > 0);
ptrdiff_t stride = frame->linesize[p];
uint8_t *dst;
dst = frame->data[p] + (avctx->height - 1) * frame->linesize[p];
for (int y = 0; y < avctx->height; y++) {
for (int x = 0; x < width; x++) {
int v = get_vlc2(gb, s->vlc.table, s->vlc.bits, 3);
if (v < 0)
return AVERROR_INVALIDDATA;
dst[x] = v;
}
dst -= stride;
}
}
} else {
return AVERROR_INVALIDDATA;
}
for (int p = 0; p < 3; p++) {
int left, lefttop;
int width = avctx->width >> (p > 0);
ptrdiff_t stride = frame->linesize[p];
uint8_t *dst;
dst = frame->data[p] + (avctx->height - 1) * frame->linesize[p];
s->llviddsp.add_left_pred(dst, dst, width, 0);
dst -= stride;
lefttop = left = dst[0];
for (int y = 1; y < avctx->height; y++) {
s->llviddsp.add_median_pred(dst, dst + stride, dst, width, &left, &lefttop);
lefttop = left = dst[0];
dst -= stride;
}
}
frame->pict_type = AV_PICTURE_TYPE_I;
frame->key_frame = 1;
*got_frame = 1;
return avpkt->size;
}
static av_cold int decode_init(AVCodecContext *avctx)
{
MVHAContext *s = avctx->priv_data;
int zret;
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
s->zstream.zalloc = Z_NULL;
s->zstream.zfree = Z_NULL;
s->zstream.opaque = Z_NULL;
zret = inflateInit(&s->zstream);
if (zret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
return AVERROR_EXTERNAL;
}
ff_llviddsp_init(&s->llviddsp);
return 0;
}
static av_cold int decode_close(AVCodecContext *avctx)
{
MVHAContext *s = avctx->priv_data;
inflateEnd(&s->zstream);
ff_free_vlc(&s->vlc);
return 0;
}
AVCodec ff_mvha_decoder = {
.name = "mvha",
.long_name = NULL_IF_CONFIG_SMALL("MidiVid Archive Codec"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MVHA,
.priv_data_size = sizeof(MVHAContext),
.init = decode_init,
.close = decode_close,
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
};

View File

@ -199,6 +199,7 @@ static int mxpeg_decode_frame(AVCodecContext *avctx,
buf_end = buf + buf_size;
jpg->got_picture = 0;
s->got_mxm_bitmask = 0;
s->got_sof_data = !!s->got_sof_data;
while (buf_ptr < buf_end) {
start_code = ff_mjpeg_find_marker(jpg, &buf_ptr, buf_end,
&unescaped_buf_ptr, &unescaped_buf_size);
@ -241,6 +242,11 @@ static int mxpeg_decode_frame(AVCodecContext *avctx,
return ret;
break;
case SOF0:
if (s->got_sof_data > 1) {
av_log(avctx, AV_LOG_ERROR,
"Multiple SOF in a frame\n");
return AVERROR_INVALIDDATA;
}
s->got_sof_data = 0;
ret = ff_mjpeg_decode_sof(jpg);
if (ret < 0) {
@ -253,7 +259,7 @@ static int mxpeg_decode_frame(AVCodecContext *avctx,
"Interlaced mode not supported in MxPEG\n");
return AVERROR(EINVAL);
}
s->got_sof_data = 1;
s->got_sof_data ++;
break;
case SOS:
if (!s->got_sof_data) {

View File

@ -131,10 +131,10 @@ static int codec_reinit(AVCodecContext *avctx, int width, int height,
+ RTJPEG_HEADER_SIZE;
if (buf_size > INT_MAX/8)
return -1;
if ((ret = av_image_check_size(height, width, 0, avctx)) < 0)
if ((ret = ff_set_dimensions(avctx, width, height)) < 0)
return ret;
avctx->width = c->width = width;
avctx->height = c->height = height;
c->width = width;
c->height = height;
av_fast_malloc(&c->decomp_buf, &c->decomp_size,
buf_size);
if (!c->decomp_buf) {
@ -219,6 +219,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
case NUV_RTJPEG:
minsize = c->width/16 * (c->height/16) * 6;
break;
case NUV_BLACK:
case NUV_COPY_LAST:
case NUV_LZO:
case NUV_RTJPEG_IN_LZO:
break;
default:
av_log(avctx, AV_LOG_ERROR, "unknown compression\n");
return AVERROR_INVALIDDATA;
}
if (buf_size < minsize / 4)
return AVERROR_INVALIDDATA;
@ -307,9 +315,6 @@ retry:
case NUV_COPY_LAST:
/* nothing more to do here */
break;
default:
av_log(avctx, AV_LOG_ERROR, "unknown compression\n");
return AVERROR_INVALIDDATA;
}
if ((result = av_frame_ref(picture, c->pic)) < 0)

View File

@ -1881,7 +1881,11 @@ static int process_output_surface(AVCodecContext *avctx, AVPacket *pkt, NvencSur
goto error;
}
if (res = ff_alloc_packet2(avctx, pkt, lock_params.bitstreamSizeInBytes,0)) {
res = pkt->data ?
ff_alloc_packet2(avctx, pkt, lock_params.bitstreamSizeInBytes, lock_params.bitstreamSizeInBytes) :
av_new_packet(pkt, lock_params.bitstreamSizeInBytes);
if (res < 0) {
p_nvenc->nvEncUnlockBitstream(ctx->nvencoder, tmpoutsurf->output_surface);
goto error;
}

View File

@ -61,24 +61,6 @@ static inline void init_put_bits(PutBitContext *s, uint8_t *buffer,
s->bit_buf = 0;
}
/**
* Rebase the bit writer onto a reallocated buffer.
*
* @param buffer the buffer where to put bits
* @param buffer_size the size in bytes of buffer,
* must be larger than the previous size
*/
static inline void rebase_put_bits(PutBitContext *s, uint8_t *buffer,
int buffer_size)
{
av_assert0(8*buffer_size > s->size_in_bits);
s->buf_end = buffer + buffer_size;
s->buf_ptr = buffer + (s->buf_ptr - s->buf);
s->buf = buffer;
s->size_in_bits = 8 * buffer_size;
}
/**
* @return the total number of bits written to the bitstream.
*/
@ -87,6 +69,24 @@ static inline int put_bits_count(PutBitContext *s)
return (s->buf_ptr - s->buf) * 8 + 32 - s->bit_left;
}
/**
* Rebase the bit writer onto a reallocated buffer.
*
* @param buffer the buffer where to put bits
* @param buffer_size the size in bytes of buffer,
* must be large enough to hold everything written so far
*/
static inline void rebase_put_bits(PutBitContext *s, uint8_t *buffer,
int buffer_size)
{
av_assert0(8*buffer_size >= put_bits_count(s));
s->buf_end = buffer + buffer_size;
s->buf_ptr = buffer + (s->buf_ptr - s->buf);
s->buf = buffer;
s->size_in_bits = 8 * buffer_size;
}
/**
* @return the number of bits available in the bitstream.
*/

View File

@ -1704,7 +1704,7 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
s->group_size = bytestream2_get_be32(&gb);
s->fft_size = bytestream2_get_be32(&gb);
s->checksum_size = bytestream2_get_be32(&gb);
if (s->checksum_size >= 1U << 28 || !s->checksum_size) {
if (s->checksum_size >= 1U << 28 || s->checksum_size <= 1) {
av_log(avctx, AV_LOG_ERROR, "data block size invalid (%u)\n", s->checksum_size);
return AVERROR_INVALIDDATA;
}

View File

@ -764,10 +764,10 @@ FF_ENABLE_DEPRECATION_WARNINGS
#if QSV_HAVE_EXT_VP9_PARAM
if (avctx->codec_id == AV_CODEC_ID_VP9) {
q->extvp9param.Header.BufferId = MFX_EXTBUFF_VP9_PARAM;
q->extvp9param.Header.BufferSz = sizeof(q->extvp9param);
q->extvp9param.WriteIVFHeaders = MFX_CODINGOPTION_OFF;
q->extparam_internal[q->nb_extparam_internal++] = (mfxExtBuffer *)&q->extvp9param;
q->extvp9param.Header.BufferId = MFX_EXTBUFF_VP9_PARAM;
q->extvp9param.Header.BufferSz = sizeof(q->extvp9param);
q->extvp9param.WriteIVFHeaders = MFX_CODINGOPTION_OFF;
q->extparam_internal[q->nb_extparam_internal++] = (mfxExtBuffer *)&q->extvp9param;
}
#endif
@ -824,7 +824,9 @@ static int qsv_retrieve_enc_vp9_params(AVCodecContext *avctx, QSVEncContext *q)
#endif
mfxExtBuffer *ext_buffers[] = {
#if QSV_HAVE_EXT_VP9_PARAM
(mfxExtBuffer*)&vp9_extend_buf,
#endif
#if QSV_HAVE_CO2
(mfxExtBuffer*)&co2,
#endif

View File

@ -125,7 +125,7 @@ typedef struct QSVEncContext {
mfxExtMultiFrameControl extmfc;
#endif
#if QSV_HAVE_EXT_VP9_PARAM
mfxExtVP9Param extvp9param;
mfxExtVP9Param extvp9param;
#endif
mfxExtOpaqueSurfaceAlloc opaque_alloc;

View File

@ -77,7 +77,7 @@ static av_cold int ra288_decode_init(AVCodecContext *avctx)
avctx->channel_layout = AV_CH_LAYOUT_MONO;
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
if (avctx->block_align <= 0) {
if (avctx->block_align != 38) {
av_log(avctx, AV_LOG_ERROR, "unsupported block align\n");
return AVERROR_PATCHWELCOME;
}

View File

@ -264,8 +264,8 @@ static int decode_channel(RALFContext *ctx, GetBitContext *gb, int ch,
t = get_vlc2(gb, vlc[cmode].table, vlc[cmode].bits, 2);
t = extend_code(gb, t, 21, add_bits);
if (!cmode)
coeff -= 12 << add_bits;
coeff = t - coeff;
coeff -= 12U << add_bits;
coeff = (unsigned)t - coeff;
ctx->filter[i] = coeff;
cmode = coeff >> add_bits;
@ -408,7 +408,7 @@ static int decode_block(AVCodecContext *avctx, GetBitContext *gb,
case 4:
for (i = 0; i < len; i++) {
t = ch1[i] + ctx->bias[1];
t2 = ((ch0[i] + ctx->bias[0]) << 1) | (t & 1);
t2 = ((ch0[i] + ctx->bias[0]) * 2) | (t & 1);
dst0[i] = (t2 + t) / 2;
dst1[i] = (t2 - t) / 2;
}

View File

@ -223,7 +223,7 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
FFALIGN(avctx->width, 16),
avctx->height, 1);
} else {
context->is_lt_16bpp = av_get_bits_per_pixel(desc) == 16 && avctx->bits_per_coded_sample && avctx->bits_per_coded_sample < 16;
context->is_lt_16bpp = av_get_bits_per_pixel(desc) == 16 && avctx->bits_per_coded_sample > 8 && avctx->bits_per_coded_sample < 16;
context->frame_size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width,
avctx->height, 1);
}

View File

@ -473,7 +473,7 @@ static int predictor_calc_error(int *k, int *state, int order, int error)
{
int k_value = *k_ptr, state_value = *state_ptr;
x -= shift_down(k_value * state_value, LATTICE_SHIFT);
state_ptr[1] = state_value + shift_down(k_value * x, LATTICE_SHIFT);
state_ptr[1] = state_value + shift_down(k_value * (unsigned)x, LATTICE_SHIFT);
}
#else
for (i = order-2; i >= 0; i--)

View File

@ -586,10 +586,10 @@ static inline void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int
last[0] = (int)((unsigned)last[1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3])>> 1;
last[2] = (int)((unsigned)last[1] + last[3]) >> 1;
t1 = ctx->D[0] + ctx->D[1];
t1 = ctx->D[0] + (unsigned)ctx->D[1];
ctx->D[0] = t1 >> 1;
ctx->D[1] = t1 - (t1 >> 1);
t2 = ctx->D[2] + ctx->D[3];
t2 = ctx->D[2] + (unsigned)ctx->D[3];
ctx->D[2] = t2 >> 1;
ctx->D[3] = t2 - (t2 >> 1);

View File

@ -255,7 +255,7 @@ static void truespeech_synth(TSContext *dec, int16_t *out, int quart)
int sum = 0;
for(k = 0; k < 8; k++)
sum += ptr0[k] * (unsigned)ptr1[k];
sum = out[i] + ((sum + 0x800) >> 12);
sum = out[i] + ((int)(sum + 0x800U) >> 12);
out[i] = av_clip(sum, -0x7FFE, 0x7FFE);
for(k = 7; k > 0; k--)
ptr0[k] = ptr0[k - 1];

View File

@ -129,7 +129,7 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
s->avctx = avctx;
// 30bytes includes TTA1 header
// 22 bytes for a TTA1 header
if (avctx->extradata_size < 22)
return AVERROR_INVALIDDATA;

View File

@ -28,6 +28,7 @@
#include "libavutil/internal.h"
#include "libavutil/mem.h"
#include "libavutil/intreadwrite.h"
#include "thread.h"
#define READ_PIXELS(a, b, c) \
do { \
@ -37,6 +38,12 @@
*c++ = (val >> 20) & 0x3FF; \
} while (0)
typedef struct ThreadData {
AVFrame *frame;
uint8_t *buf;
int stride;
} ThreadData;
static void v210_planar_unpack_c(const uint32_t *src, uint16_t *y, uint16_t *u, uint16_t *v, int width)
{
uint32_t val;
@ -64,62 +71,29 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
avctx->bits_per_raw_sample = 10;
s->thread_count = av_clip(avctx->thread_count, 1, avctx->height/4);
s->aligned_input = 0;
ff_v210dec_init(s);
return 0;
}
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
static int v210_decode_slice(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
{
V210DecContext *s = avctx->priv_data;
int h, w, ret, stride, aligned_input;
AVFrame *pic = data;
const uint8_t *psrc = avpkt->data;
int h, w;
ThreadData *td = arg;
AVFrame *frame = td->frame;
int stride = td->stride;
int slice_start = (avctx->height * jobnr) / s->thread_count;
int slice_end = (avctx->height * (jobnr+1)) / s->thread_count;
uint8_t *psrc = td->buf + stride * slice_start;
uint16_t *y, *u, *v;
if (s->custom_stride )
stride = s->custom_stride;
else {
int aligned_width = ((avctx->width + 47) / 48) * 48;
stride = aligned_width * 8 / 3;
}
if (avpkt->size < stride * avctx->height) {
if ((((avctx->width + 23) / 24) * 24 * 8) / 3 * avctx->height == avpkt->size) {
stride = avpkt->size / avctx->height;
if (!s->stride_warning_shown)
av_log(avctx, AV_LOG_WARNING, "Broken v210 with too small padding (64 byte) detected\n");
s->stride_warning_shown = 1;
} else {
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
return AVERROR_INVALIDDATA;
}
}
if ( avctx->codec_tag == MKTAG('C', '2', '1', '0')
&& avpkt->size > 64
&& AV_RN32(psrc) == AV_RN32("INFO")
&& avpkt->size - 64 >= stride * avctx->height)
psrc += 64;
aligned_input = !((uintptr_t)psrc & 0x1f) && !(stride & 0x1f);
if (aligned_input != s->aligned_input) {
s->aligned_input = aligned_input;
ff_v210dec_init(s);
}
if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
y = (uint16_t*)pic->data[0];
u = (uint16_t*)pic->data[1];
v = (uint16_t*)pic->data[2];
pic->pict_type = AV_PICTURE_TYPE_I;
pic->key_frame = 1;
for (h = 0; h < avctx->height; h++) {
y = (uint16_t*)frame->data[0] + slice_start * frame->linesize[0] / 2;
u = (uint16_t*)frame->data[1] + slice_start * frame->linesize[1] / 2;
v = (uint16_t*)frame->data[2] + slice_start * frame->linesize[2] / 2;
for (h = slice_start; h < slice_end; h++) {
const uint32_t *src = (const uint32_t*)psrc;
uint32_t val;
@ -155,11 +129,65 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
}
psrc += stride;
y += pic->linesize[0] / 2 - avctx->width + (avctx->width & 1);
u += pic->linesize[1] / 2 - avctx->width / 2;
v += pic->linesize[2] / 2 - avctx->width / 2;
y += frame->linesize[0] / 2 - avctx->width + (avctx->width & 1);
u += frame->linesize[1] / 2 - avctx->width / 2;
v += frame->linesize[2] / 2 - avctx->width / 2;
}
return 0;
}
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
{
V210DecContext *s = avctx->priv_data;
ThreadData td;
int ret, stride, aligned_input;
ThreadFrame frame = { .f = data };
AVFrame *pic = data;
const uint8_t *psrc = avpkt->data;
if (s->custom_stride )
stride = s->custom_stride;
else {
int aligned_width = ((avctx->width + 47) / 48) * 48;
stride = aligned_width * 8 / 3;
}
td.stride = stride;
if (avpkt->size < stride * avctx->height) {
if ((((avctx->width + 23) / 24) * 24 * 8) / 3 * avctx->height == avpkt->size) {
stride = avpkt->size / avctx->height;
if (!s->stride_warning_shown)
av_log(avctx, AV_LOG_WARNING, "Broken v210 with too small padding (64 byte) detected\n");
s->stride_warning_shown = 1;
} else {
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
return AVERROR_INVALIDDATA;
}
}
if ( avctx->codec_tag == MKTAG('C', '2', '1', '0')
&& avpkt->size > 64
&& AV_RN32(psrc) == AV_RN32("INFO")
&& avpkt->size - 64 >= stride * avctx->height)
psrc += 64;
aligned_input = !((uintptr_t)psrc & 0x1f) && !(stride & 0x1f);
if (aligned_input != s->aligned_input) {
s->aligned_input = aligned_input;
ff_v210dec_init(s);
}
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
return ret;
pic->pict_type = AV_PICTURE_TYPE_I;
pic->key_frame = 1;
td.buf = (uint8_t*)psrc;
td.frame = pic;
avctx->execute2(avctx, v210_decode_slice, &td, NULL, s->thread_count);
if (avctx->field_order > AV_FIELD_PROGRESSIVE) {
/* we have interlaced material flagged in container */
pic->interlaced_frame = 1;
@ -194,6 +222,8 @@ AVCodec ff_v210_decoder = {
.priv_data_size = sizeof(V210DecContext),
.init = decode_init,
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1,
.capabilities = AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_FRAME_THREADS,
.priv_class = &v210dec_class,
};

View File

@ -27,6 +27,7 @@ typedef struct {
AVClass *av_class;
int custom_stride;
int aligned_input;
int thread_count;
int stride_warning_shown;
void (*unpack_frame)(const uint32_t *src, uint16_t *y, uint16_t *u, uint16_t *v, int width);
} V210DecContext;

View File

@ -24,6 +24,13 @@
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "internal.h"
#include "thread.h"
typedef struct ThreadData {
AVFrame *frame;
uint8_t *buf;
int stride;
} ThreadData;
static av_cold int v410_decode_init(AVCodecContext *avctx)
{
@ -42,31 +49,24 @@ static av_cold int v410_decode_init(AVCodecContext *avctx)
return 0;
}
static int v410_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
static int v410_decode_slice(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
{
AVFrame *pic = data;
uint8_t *src = avpkt->data;
ThreadData *td = arg;
AVFrame *pic = td->frame;
int stride = td->stride;
int thread_count = av_clip(avctx->thread_count, 1, avctx->height/4);
int slice_start = (avctx->height * jobnr) / thread_count;
int slice_end = (avctx->height * (jobnr+1)) / thread_count;
const uint8_t *src = td->buf + stride * slice_start;
uint16_t *y, *u, *v;
uint32_t val;
int i, j, ret;
int i, j;
if (avpkt->size < 4 * avctx->height * avctx->width) {
av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
return AVERROR(EINVAL);
}
y = (uint16_t*)pic->data[0] + slice_start * (pic->linesize[0] >> 1);
u = (uint16_t*)pic->data[1] + slice_start * (pic->linesize[1] >> 1);
v = (uint16_t*)pic->data[2] + slice_start * (pic->linesize[2] >> 1);
if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
pic->key_frame = 1;
pic->pict_type = AV_PICTURE_TYPE_I;
y = (uint16_t *)pic->data[0];
u = (uint16_t *)pic->data[1];
v = (uint16_t *)pic->data[2];
for (i = 0; i < avctx->height; i++) {
for (i = slice_start; i < slice_end; i++) {
for (j = 0; j < avctx->width; j++) {
val = AV_RL32(src);
@ -82,6 +82,35 @@ static int v410_decode_frame(AVCodecContext *avctx, void *data,
v += pic->linesize[2] >> 1;
}
return 0;
}
static int v410_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
ThreadData td;
ThreadFrame frame = { .f = data };
AVFrame *pic = data;
uint8_t *src = avpkt->data;
int ret;
int thread_count = av_clip(avctx->thread_count, 1, avctx->height/4);
td.stride = avctx->width * 4;
if (avpkt->size < 4 * avctx->height * avctx->width) {
av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
return AVERROR(EINVAL);
}
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
return ret;
pic->key_frame = 1;
pic->pict_type = AV_PICTURE_TYPE_I;
td.buf = src;
td.frame = pic;
avctx->execute2(avctx, v410_decode_slice, &td, NULL, thread_count);
*got_frame = 1;
return avpkt->size;
@ -94,5 +123,6 @@ AVCodec ff_v410_decoder = {
.id = AV_CODEC_ID_V410,
.init = v410_decode_init,
.decode = v410_decode_frame,
.capabilities = AV_CODEC_CAP_DR1,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_FRAME_THREADS
};

View File

@ -717,8 +717,8 @@ int ff_v4l2_context_init(V4L2Context* ctx)
ctx->num_buffers = req.count;
ctx->buffers = av_mallocz(ctx->num_buffers * sizeof(V4L2Buffer));
if (!ctx->buffers) {
av_log(logger(ctx), AV_LOG_ERROR, "%s malloc enomem\n", ctx->name);
return AVERROR(ENOMEM);
av_log(logger(ctx), AV_LOG_ERROR, "%s malloc enomem\n", ctx->name);
return AVERROR(ENOMEM);
}
for (i = 0; i < req.count; i++) {

View File

@ -201,6 +201,7 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx)
capture->av_codec_id = AV_CODEC_ID_RAWVIDEO;
capture->av_pix_fmt = avctx->pix_fmt;
s->avctx = avctx;
ret = ff_v4l2_m2m_codec_init(priv);
if (ret) {
av_log(avctx, AV_LOG_ERROR, "can't configure decoder\n");
@ -209,7 +210,6 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx)
return ret;
}
s->avctx = avctx;
return v4l2_prepare_decoder(s);
}

View File

@ -889,7 +889,7 @@ static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
q2 = FFABS(q2) * 2 + ((q2 < 0) ? 0 : v->halfpq) - 1;
if (q2 && q1 != q2) {
for (k = 1; k < 8; k++)
ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
ac_val2[k] = (int)(ac_val2[k] * q2 * (unsigned)ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
for (k = 1; k < 8; k++) {
block[k << sh] = ac_val2[k] * scale;
@ -1036,10 +1036,10 @@ static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
if (q2 && q1 != q2) {
if (dc_pred_dir) { // left
for (k = 1; k < 8; k++)
block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
block[k << v->left_blk_sh] += (int)(ac_val[k] * q2 * (unsigned)ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
} else { //top
for (k = 1; k < 8; k++)
block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
block[k << v->top_blk_sh] += (int)(ac_val[k + 8] * q2 * (unsigned)ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
} else {
if (dc_pred_dir) { // left

View File

@ -28,8 +28,8 @@
#include "libavutil/version.h"
#define LIBAVCODEC_VERSION_MAJOR 58
#define LIBAVCODEC_VERSION_MINOR 62
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_MINOR 64
#define LIBAVCODEC_VERSION_MICRO 101
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \

View File

@ -894,6 +894,14 @@ static int get_cv_color_primaries(AVCodecContext *avctx,
*primaries = NULL;
break;
case AVCOL_PRI_BT470BG:
*primaries = kCVImageBufferColorPrimaries_EBU_3213;
break;
case AVCOL_PRI_SMPTE170M:
*primaries = kCVImageBufferColorPrimaries_SMPTE_C;
break;
case AVCOL_PRI_BT709:
*primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
break;

View File

@ -179,6 +179,9 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
/* drop incomplete chunks */
buf_size = audio_chunks * s->chunk_size;
if (silent_chunks + audio_chunks >= INT_MAX / avctx->block_align)
return AVERROR_INVALIDDATA;
/* get output buffer */
frame->nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) /
avctx->channels;

View File

@ -2715,7 +2715,8 @@ int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
s->next_framep[VP56_FRAME_CURRENT] = curframe;
ff_thread_finish_setup(avctx);
if (avctx->codec->update_thread_context)
ff_thread_finish_setup(avctx);
if (avctx->hwaccel) {
ret = avctx->hwaccel->start_frame(avctx, avpkt->data, avpkt->size);

View File

@ -123,6 +123,7 @@ typedef struct WMACodecContext {
uint8_t last_superframe[MAX_CODED_SUPERFRAME_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; /* padding added */
int last_bitoffset;
int last_superframe_len;
int exponents_initialized;
float noise_table[NOISE_TAB_SIZE];
int noise_index;
float noise_mult; /* XXX: suppress that and integrate it in the noise array */

View File

@ -587,6 +587,9 @@ static int wma_decode_block(WMACodecContext *s)
s->exponents_bsize[ch] = bsize;
}
}
s->exponents_initialized = 1;
}else if (!s->exponents_initialized) {
return AVERROR_INVALIDDATA;
}
/* parse spectral coefficients : just RLE encoding */

View File

@ -1327,6 +1327,7 @@ AVCodec ff_wmalossless_decoder = {
.decode = decode_packet,
.flush = flush,
.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE },

View File

@ -1565,9 +1565,9 @@ static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len,
s->frame_offset = get_bits_count(gb) & 7;
s->num_saved_bits = s->frame_offset;
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
}
buflen = (put_bits_count(&s->pb) + len + 8) >> 3;
buflen = (s->num_saved_bits + len + 7) >> 3;
} else
buflen = (put_bits_count(&s->pb) + len + 7) >> 3;
if (len <= 0 || buflen > MAX_FRAMESIZE) {
avpriv_request_sample(s->avctx, "Too small input buffer");
@ -1644,6 +1644,7 @@ static int decode_packet(AVCodecContext *avctx, WMAProDecodeCtx *s,
if (avctx->codec_id == AV_CODEC_ID_WMAPRO && buf_size < avctx->block_align) {
av_log(avctx, AV_LOG_ERROR, "Input packet too small (%d < %d)\n",
buf_size, avctx->block_align);
s->packet_loss = 1;
return AVERROR_INVALIDDATA;
}
@ -1803,6 +1804,11 @@ static int xma_decode_packet(AVCodecContext *avctx, void *data,
ret = decode_packet(avctx, &s->xma[s->current_stream], s->frames[s->current_stream],
&got_stream_frame_ptr, avpkt);
if (got_stream_frame_ptr && s->offset[s->current_stream] >= 64) {
got_stream_frame_ptr = 0;
ret = AVERROR_INVALIDDATA;
}
/* copy stream samples (1/2ch) to sample buffer (Nch) */
if (got_stream_frame_ptr) {
int start_ch = s->start_channel[s->current_stream];
@ -1930,6 +1936,8 @@ static av_cold int xma_decode_init(AVCodecContext *avctx)
s->start_channel[i] = start_channels;
start_channels += s->xma[i].nb_channels;
}
if (start_channels != avctx->channels)
return AVERROR_INVALIDDATA;
return ret;
}

View File

@ -1523,7 +1523,7 @@ static int synth_frame(AVCodecContext *ctx, GetBitContext *gb, int frame_idx,
/* "pitch-diff-per-sample" for calculation of pitch per sample */
s->pitch_diff_sh16 =
((cur_pitch_val - s->last_pitch_val) << 16) / MAX_FRAMESIZE;
(cur_pitch_val - s->last_pitch_val) * (1 << 16) / MAX_FRAMESIZE;
}
/* Global gain (if silence) and pitch-adaptive window coordinates */

View File

@ -149,6 +149,7 @@ struct decklink_ctx {
int channels;
int audio_depth;
unsigned long tc_seen; // used with option wait_for_tc
};
typedef enum { DIRECTION_IN, DIRECTION_OUT} decklink_direction_t;

View File

@ -58,6 +58,7 @@ struct decklink_cctx {
int copyts;
int64_t timestamp_align;
int timing_offset;
int wait_for_tc;
};
#endif /* AVDEVICE_DECKLINK_COMMON_C_H */

View File

@ -784,6 +784,8 @@ HRESULT decklink_input_callback::VideoInputFrameArrived(
if (packed_metadata) {
if (av_packet_add_side_data(&pkt, AV_PKT_DATA_STRINGS_METADATA, packed_metadata, metadata_len) < 0)
av_freep(&packed_metadata);
else if (!ctx->tc_seen)
ctx->tc_seen = ctx->frameCount;
}
}
}
@ -793,6 +795,14 @@ HRESULT decklink_input_callback::VideoInputFrameArrived(
}
}
if (ctx->tc_format && cctx->wait_for_tc && !ctx->tc_seen) {
av_log(avctx, AV_LOG_WARNING, "No TC detected yet. wait_for_tc set. Dropping. \n");
av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - "
"- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
return S_OK;
}
pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->video_pts_source, ctx->video_st->time_base, &initial_video_pts, cctx->copyts);
pkt.dts = pkt.pts;

View File

@ -85,6 +85,7 @@ static const AVOption options[] = {
{ "audio_depth", "audio bitdepth (16 or 32)", OFFSET(audio_depth), AV_OPT_TYPE_INT, { .i64 = 16}, 16, 32, DEC },
{ "decklink_copyts", "copy timestamps, do not remove the initial offset", OFFSET(copyts), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, DEC },
{ "timestamp_align", "capture start time alignment (in seconds)", OFFSET(timestamp_align), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT_MAX, DEC },
{ "wait_for_tc", "drop frames till a frame with timecode is received. TC format must be set", OFFSET(wait_for_tc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, DEC },
{ NULL },
};

View File

@ -302,9 +302,13 @@ av_cold static int lavfi_read_header(AVFormatContext *avctx)
if (lavfi->dump_graph) {
char *dump = avfilter_graph_dump(lavfi->graph, lavfi->dump_graph);
fputs(dump, stderr);
fflush(stderr);
av_free(dump);
if (dump != NULL) {
fputs(dump, stderr);
fflush(stderr);
av_free(dump);
} else {
FAIL(AVERROR(ENOMEM));
}
}
/* fill each stream with the information in the corresponding sink */

View File

@ -29,7 +29,7 @@
#define LIBAVDEVICE_VERSION_MAJOR 58
#define LIBAVDEVICE_VERSION_MINOR 9
#define LIBAVDEVICE_VERSION_MICRO 100
#define LIBAVDEVICE_VERSION_MICRO 101
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \

View File

@ -146,6 +146,11 @@ static int xcbgrab_reposition(AVFormatContext *s,
return 0;
}
static void xcbgrab_image_reply_free(void *opaque, uint8_t *data)
{
free(opaque);
}
static int xcbgrab_frame(AVFormatContext *s, AVPacket *pkt)
{
XCBGrabContext *c = s->priv_data;
@ -154,7 +159,7 @@ static int xcbgrab_frame(AVFormatContext *s, AVPacket *pkt)
xcb_drawable_t drawable = c->screen->root;
xcb_generic_error_t *e = NULL;
uint8_t *data;
int length, ret;
int length;
iq = xcb_get_image(c->conn, XCB_IMAGE_FORMAT_Z_PIXMAP, drawable,
c->x, c->y, c->width, c->height, ~0);
@ -168,6 +173,7 @@ static int xcbgrab_frame(AVFormatContext *s, AVPacket *pkt)
"sequence:%u resource_id:%u minor_code:%u major_code:%u.\n",
e->response_type, e->error_code,
e->sequence, e->resource_id, e->minor_code, e->major_code);
free(e);
return AVERROR(EACCES);
}
@ -177,14 +183,18 @@ static int xcbgrab_frame(AVFormatContext *s, AVPacket *pkt)
data = xcb_get_image_data(img);
length = xcb_get_image_data_length(img);
ret = av_new_packet(pkt, length);
av_init_packet(pkt);
if (!ret)
memcpy(pkt->data, data, length);
pkt->buf = av_buffer_create(data, length, xcbgrab_image_reply_free, img, 0);
if (!pkt->buf) {
free(img);
return AVERROR(ENOMEM);
}
free(img);
pkt->data = data;
pkt->size = length;
return ret;
return 0;
}
static void wait_frame(AVFormatContext *s, AVPacket *pkt)
@ -276,6 +286,7 @@ static int xcbgrab_frame_shm(AVFormatContext *s, AVPacket *pkt)
e->response_type, e->error_code,
e->sequence, e->resource_id, e->minor_code, e->major_code);
free(e);
return AVERROR(EACCES);
}
@ -537,6 +548,8 @@ static int create_stream(AVFormatContext *s)
gc = xcb_get_geometry(c->conn, c->screen->root);
geo = xcb_get_geometry_reply(c->conn, gc, NULL);
if (!geo)
return AVERROR_EXTERNAL;
if (c->x + c->width > geo->width ||
c->y + c->height > geo->height) {
@ -546,6 +559,7 @@ static int create_stream(AVFormatContext *s)
c->width, c->height,
c->x, c->y,
geo->width, geo->height);
free(geo);
return AVERROR(EINVAL);
}

View File

@ -88,6 +88,7 @@ OBJS-$(CONFIG_ASTATS_FILTER) += af_astats.o
OBJS-$(CONFIG_ASTREAMSELECT_FILTER) += f_streamselect.o framesync.o
OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o
OBJS-$(CONFIG_ATRIM_FILTER) += trim.o
OBJS-$(CONFIG_AXCORRELATE_FILTER) += af_axcorrelate.o
OBJS-$(CONFIG_AZMQ_FILTER) += f_zmq.o
OBJS-$(CONFIG_BANDPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_BANDREJECT_FILTER) += af_biquads.o
@ -289,7 +290,7 @@ OBJS-$(CONFIG_LUMAKEY_FILTER) += vf_lumakey.o
OBJS-$(CONFIG_LUT1D_FILTER) += vf_lut3d.o
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUT2_FILTER) += vf_lut2.o framesync.o
OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o
OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o framesync.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o
OBJS-$(CONFIG_MASKEDCLAMP_FILTER) += vf_maskedclamp.o framesync.o
@ -358,12 +359,12 @@ OBJS-$(CONFIG_ROBERTS_OPENCL_FILTER) += vf_convolution_opencl.o opencl.o
opencl/convolution.o
OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o
OBJS-$(CONFIG_SAB_FILTER) += vf_sab.o
OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o scale.o
OBJS-$(CONFIG_SCALE_CUDA_FILTER) += vf_scale_cuda.o vf_scale_cuda.ptx.o
OBJS-$(CONFIG_SCALE_NPP_FILTER) += vf_scale_npp.o scale.o
OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o scale_eval.o
OBJS-$(CONFIG_SCALE_CUDA_FILTER) += vf_scale_cuda.o vf_scale_cuda.ptx.o scale_eval.o
OBJS-$(CONFIG_SCALE_NPP_FILTER) += vf_scale_npp.o scale_eval.o
OBJS-$(CONFIG_SCALE_QSV_FILTER) += vf_scale_qsv.o
OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o scale.o vaapi_vpp.o
OBJS-$(CONFIG_SCALE2REF_FILTER) += vf_scale.o scale.o
OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o scale_eval.o vaapi_vpp.o
OBJS-$(CONFIG_SCALE2REF_FILTER) += vf_scale.o scale_eval.o
OBJS-$(CONFIG_SCROLL_FILTER) += vf_scroll.o
OBJS-$(CONFIG_SELECT_FILTER) += f_select.o
OBJS-$(CONFIG_SELECTIVECOLOR_FILTER) += vf_selectivecolor.o
@ -442,6 +443,7 @@ OBJS-$(CONFIG_XSTACK_FILTER) += vf_stack.o framesync.o
OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o yadif_common.o
OBJS-$(CONFIG_YADIF_CUDA_FILTER) += vf_yadif_cuda.o vf_yadif_cuda.ptx.o \
yadif_common.o
OBJS-$(CONFIG_YAEPBLUR_FILTER) += vf_yaepblur.o
OBJS-$(CONFIG_ZMQ_FILTER) += f_zmq.o
OBJS-$(CONFIG_ZOOMPAN_FILTER) += vf_zoompan.o
OBJS-$(CONFIG_ZSCALE_FILTER) += vf_zscale.o

View File

@ -585,7 +585,7 @@ static int filter_frame(AVFilterLink *inlink)
out->nb_samples = FFMIN(s->hop_size, s->samples_left);
out->pts = s->pts;
s->pts += s->hop_size;
s->pts += av_rescale_q(s->hop_size, (AVRational){1, outlink->sample_rate}, outlink->time_base);
s->detected_errors += detected_errors;
s->nb_samples += out->nb_samples * inlink->channels;

View File

@ -24,6 +24,7 @@
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "audio.h"
#include "filters.h"
#include "internal.h"
typedef struct AudioEchoContext {
@ -36,6 +37,7 @@ typedef struct AudioEchoContext {
uint8_t **delayptrs;
int max_samples, fade_out;
int *samples;
int eof;
int64_t next_pts;
void (*echo_samples)(struct AudioEchoContext *ctx, uint8_t **delayptrs,
@ -302,42 +304,65 @@ static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioEchoContext *s = ctx->priv;
int ret;
int nb_samples = FFMIN(s->fade_out, 2048);
AVFrame *frame = ff_get_audio_buffer(outlink, nb_samples);
ret = ff_request_frame(ctx->inputs[0]);
if (!frame)
return AVERROR(ENOMEM);
s->fade_out -= nb_samples;
if (ret == AVERROR_EOF && !ctx->is_disabled && s->fade_out) {
int nb_samples = FFMIN(s->fade_out, 2048);
AVFrame *frame;
av_samples_set_silence(frame->extended_data, 0,
frame->nb_samples,
outlink->channels,
frame->format);
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
s->fade_out -= nb_samples;
s->echo_samples(s, s->delayptrs, frame->extended_data, frame->extended_data,
frame->nb_samples, outlink->channels);
av_samples_set_silence(frame->extended_data, 0,
frame->nb_samples,
outlink->channels,
frame->format);
frame->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
s->echo_samples(s, s->delayptrs, frame->extended_data, frame->extended_data,
frame->nb_samples, outlink->channels);
return ff_filter_frame(outlink, frame);
}
frame->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AudioEchoContext *s = ctx->priv;
AVFrame *in;
int ret, status;
int64_t pts;
return ff_filter_frame(outlink, frame);
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
ret = ff_inlink_consume_frame(inlink, &in);
if (ret < 0)
return ret;
if (ret > 0)
return filter_frame(inlink, in);
if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
if (status == AVERROR_EOF)
s->eof = 1;
}
return ret;
if (s->eof && s->fade_out <= 0) {
ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
return 0;
}
if (!s->eof)
FF_FILTER_FORWARD_WANTED(outlink, inlink);
return request_frame(outlink);
}
static const AVFilterPad aecho_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
@ -345,7 +370,6 @@ static const AVFilterPad aecho_inputs[] = {
static const AVFilterPad aecho_outputs[] = {
{
.name = "default",
.request_frame = request_frame,
.config_props = config_output,
.type = AVMEDIA_TYPE_AUDIO,
},
@ -359,6 +383,7 @@ AVFilter ff_af_aecho = {
.priv_size = sizeof(AudioEchoContext),
.priv_class = &aecho_class,
.init = init,
.activate = activate,
.uninit = uninit,
.inputs = aecho_inputs,
.outputs = aecho_outputs,

View File

@ -141,24 +141,25 @@ typedef struct AudioFFTDeNoiseContext {
} AudioFFTDeNoiseContext;
#define OFFSET(x) offsetof(AudioFFTDeNoiseContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define AFR AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption afftdn_options[] = {
{ "nr", "set the noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_FLOAT, {.dbl = 12}, .01, 97, A },
{ "nf", "set the noise floor", OFFSET(noise_floor), AV_OPT_TYPE_FLOAT, {.dbl =-50}, -80,-20, A },
{ "nt", "set the noise type", OFFSET(noise_type), AV_OPT_TYPE_INT, {.i64 = WHITE_NOISE}, WHITE_NOISE, NB_NOISE-1, A, "type" },
{ "w", "white noise", 0, AV_OPT_TYPE_CONST, {.i64 = WHITE_NOISE}, 0, 0, A, "type" },
{ "v", "vinyl noise", 0, AV_OPT_TYPE_CONST, {.i64 = VINYL_NOISE}, 0, 0, A, "type" },
{ "s", "shellac noise", 0, AV_OPT_TYPE_CONST, {.i64 = SHELLAC_NOISE}, 0, 0, A, "type" },
{ "c", "custom noise", 0, AV_OPT_TYPE_CONST, {.i64 = CUSTOM_NOISE}, 0, 0, A, "type" },
{ "bn", "set the custom bands noise", OFFSET(band_noise_str), AV_OPT_TYPE_STRING, {.str = 0}, 0, 0, A },
{ "rf", "set the residual floor", OFFSET(residual_floor), AV_OPT_TYPE_FLOAT, {.dbl =-38}, -80,-20, A },
{ "tn", "track noise", OFFSET(track_noise), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, A },
{ "tr", "track residual", OFFSET(track_residual), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, A },
{ "om", "set output mode", OFFSET(output_mode), AV_OPT_TYPE_INT, {.i64 = OUT_MODE}, 0, NB_MODES-1, A, "mode" },
{ "i", "input", 0, AV_OPT_TYPE_CONST, {.i64 = IN_MODE}, 0, 0, A, "mode" },
{ "o", "output", 0, AV_OPT_TYPE_CONST, {.i64 = OUT_MODE}, 0, 0, A, "mode" },
{ "n", "noise", 0, AV_OPT_TYPE_CONST, {.i64 = NOISE_MODE}, 0, 0, A, "mode" },
{ "nr", "set the noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_FLOAT, {.dbl = 12}, .01, 97, AFR },
{ "nf", "set the noise floor", OFFSET(noise_floor), AV_OPT_TYPE_FLOAT, {.dbl =-50}, -80,-20, AFR },
{ "nt", "set the noise type", OFFSET(noise_type), AV_OPT_TYPE_INT, {.i64 = WHITE_NOISE}, WHITE_NOISE, NB_NOISE-1, AF, "type" },
{ "w", "white noise", 0, AV_OPT_TYPE_CONST, {.i64 = WHITE_NOISE}, 0, 0, AF, "type" },
{ "v", "vinyl noise", 0, AV_OPT_TYPE_CONST, {.i64 = VINYL_NOISE}, 0, 0, AF, "type" },
{ "s", "shellac noise", 0, AV_OPT_TYPE_CONST, {.i64 = SHELLAC_NOISE}, 0, 0, AF, "type" },
{ "c", "custom noise", 0, AV_OPT_TYPE_CONST, {.i64 = CUSTOM_NOISE}, 0, 0, AF, "type" },
{ "bn", "set the custom bands noise", OFFSET(band_noise_str), AV_OPT_TYPE_STRING, {.str = 0}, 0, 0, AF },
{ "rf", "set the residual floor", OFFSET(residual_floor), AV_OPT_TYPE_FLOAT, {.dbl =-38}, -80,-20, AFR },
{ "tn", "track noise", OFFSET(track_noise), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AFR },
{ "tr", "track residual", OFFSET(track_residual), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AFR },
{ "om", "set output mode", OFFSET(output_mode), AV_OPT_TYPE_INT, {.i64 = OUT_MODE}, 0, NB_MODES-1, AFR, "mode" },
{ "i", "input", 0, AV_OPT_TYPE_CONST, {.i64 = IN_MODE}, 0, 0, AFR, "mode" },
{ "o", "output", 0, AV_OPT_TYPE_CONST, {.i64 = OUT_MODE}, 0, 0, AFR, "mode" },
{ "n", "noise", 0, AV_OPT_TYPE_CONST, {.i64 = NOISE_MODE}, 0, 0, AFR, "mode" },
{ NULL }
};
@ -1260,7 +1261,7 @@ static int output_frame(AVFilterLink *inlink)
ret = ff_filter_frame(outlink, out);
if (ret < 0)
goto end;
s->pts += s->sample_advance;
s->pts += av_rescale_q(s->sample_advance, (AVRational){1, outlink->sample_rate}, outlink->time_base);
end:
av_frame_free(&in);
@ -1375,6 +1376,7 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar
{
AudioFFTDeNoiseContext *s = ctx->priv;
int need_reset = 0;
int ret = 0;
if (!strcmp(cmd, "sample_noise") ||
!strcmp(cmd, "sn")) {
@ -1386,31 +1388,11 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar
s->sample_noise_start = 0;
s->sample_noise_end = 1;
}
} else if (!strcmp(cmd, "nr") ||
!strcmp(cmd, "noise_reduction")) {
float nr;
if (av_sscanf(args, "%f", &nr) == 1) {
s->noise_reduction = av_clipf(nr, 0.01, 97);
need_reset = 1;
}
} else if (!strcmp(cmd, "nf") ||
!strcmp(cmd, "noise_floor")) {
float nf;
if (av_sscanf(args, "%f", &nf) == 1) {
s->noise_floor = av_clipf(nf, -80, -20);
need_reset = 1;
}
} else if (!strcmp(cmd, "output_mode") ||
!strcmp(cmd, "om")) {
if (!strcmp(args, "i")) {
s->output_mode = IN_MODE;
} else if (!strcmp(args, "o")) {
s->output_mode = OUT_MODE;
} else if (!strcmp(args, "n")) {
s->output_mode = NOISE_MODE;
}
} else {
ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
if (ret < 0)
return ret;
need_reset = 1;
}
if (need_reset)

View File

@ -315,7 +315,7 @@ static int filter_frame(AVFilterLink *inlink)
}
out->pts = s->pts;
s->pts += s->hop_size;
s->pts += av_rescale_q(s->hop_size, (AVRational){1, outlink->sample_rate}, outlink->time_base);
for (ch = 0; ch < inlink->channels; ch++) {
float *dst = (float *)out->extended_data[ch];

View File

@ -318,7 +318,7 @@ static int activate(AVFilterContext *ctx)
dst = (double *)out->data[0];
out->pts = s->pts;
s->pts += nb_samples;
s->pts += av_rescale_q(nb_samples, (AVRational){1, ctx->outputs[0]->sample_rate}, ctx->outputs[0]->time_base);
gate(s, (double *)in[0]->data[0], dst,
(double *)in[1]->data[0], nb_samples,

Some files were not shown because too many files have changed in this diff Show More