Compare commits
382 Commits
438b0eae71
...
a1b65bf80a
Author | SHA1 | Date |
---|---|---|
MrBesen | a1b65bf80a | |
James Almer | e7c04eaf50 | |
James Almer | 05b3460991 | |
Anton Khirnov | bc4f0e713b | |
James Almer | c96a9636c8 | |
James Almer | cc929cee76 | |
James Almer | f63407a986 | |
James Almer | 21847e2a52 | |
James Almer | 7e5d143bb5 | |
James Almer | 7abc0979e6 | |
James Almer | 4e4ac20340 | |
James Almer | 56c8856966 | |
James Almer | 717b2074ec | |
James Almer | ea5a910628 | |
James Almer | ceb236c1c5 | |
James Almer | 0c19dd2df4 | |
Michael Niedermayer | d26589c27a | |
Michael Niedermayer | 9d6be83085 | |
Michael Niedermayer | 891bcc4acc | |
Gyan Doshi | 0777b197c5 | |
Gyan Doshi | 0dc0837960 | |
Gyan Doshi | ce2cfa67aa | |
Gyan Doshi | 6310fbd543 | |
Gyan Doshi | cc9a7cae76 | |
Gyan Doshi | c21462d42e | |
Andreas Rheinhardt | b55c6b8c40 | |
Andreas Rheinhardt | 56a04b7c38 | |
Andreas Rheinhardt | 30859c270f | |
Andreas Rheinhardt | 8ae026d74f | |
Limin Wang | 44a80897e8 | |
Limin Wang | 72a08af8ba | |
Andriy Gelman | 8a8966295f | |
Paul B Mahol | 7e4b0d377f | |
Paul B Mahol | cc752233fb | |
Paul B Mahol | c43f8baa41 | |
Paul B Mahol | d580c7a797 | |
Paul B Mahol | 928020b9d0 | |
Paul B Mahol | f6f6857c2a | |
Paul B Mahol | 80c4c336f9 | |
Paul B Mahol | 2a9a52b913 | |
Paul B Mahol | f9897eda43 | |
Guo, Yejun | 4e1ae43b17 | |
Guo, Yejun | 95ade711eb | |
Paul B Mahol | d9a52b0bbf | |
Lynne | 9e01f171f3 | |
Jun Zhao | 722547996c | |
Jun Zhao | b5cea39190 | |
Jun Zhao | a2619a473e | |
Jun Zhao | b7bf55550c | |
Jun Zhao | 419e47788a | |
Jun Zhao | f2a095ac2a | |
Jun Zhao | d9e78a723a | |
Jun Zhao | 8cebc8e479 | |
Jun Zhao | 31b24588c5 | |
Jun Zhao | 94004a8b65 | |
Jun Zhao | 692f0519bb | |
Michael Niedermayer | 029cc9883f | |
Michael Niedermayer | 285138ef14 | |
Michael Niedermayer | 794352ae9d | |
Michael Niedermayer | 6a865cec5e | |
Michael Niedermayer | c0bd5fa43d | |
Michael Niedermayer | fea90e9438 | |
Michael Niedermayer | 32fb919836 | |
Michael Niedermayer | c56a52a82c | |
Limin Wang | 8250bb49b1 | |
Paul B Mahol | 1a7f4a122e | |
Andreas Rheinhardt | 6a1bf8634a | |
Paul B Mahol | b650046860 | |
Limin Wang | dd39dbf983 | |
Limin Wang | a144cd6a51 | |
Gyan Doshi | c11b3253a4 | |
Jun Zhao | 883e6af710 | |
Alexandre Heitor Schmidt | ae436cc5e4 | |
Andreas Rheinhardt | ad1b0a12f3 | |
Limin Wang | 9519c8dbb7 | |
Limin Wang | 8a62d88b0e | |
Gyan Doshi | 22a06a539d | |
Andreas Rheinhardt | c4c96d5942 | |
Paul B Mahol | 52bf43eb49 | |
Paul B Mahol | 03a7240a73 | |
Paul B Mahol | e364fe4cca | |
Gilles Bouthenot | 929e5159bc | |
Martin Storsjö | 3d894db700 | |
Philip Langdale | 3ea7057677 | |
Andreas Rheinhardt | 94cdf82d53 | |
Andreas Rheinhardt | 198081efb7 | |
Martin Storsjö | 2dc2b11fba | |
Gyan Doshi | 5bd001043d | |
Andreas Rheinhardt | 653ef8828a | |
Andreas Rheinhardt | 373c1c9b69 | |
Limin Wang | dce2e0657e | |
Anthony Delannoy | 3def315c5c | |
Andreas Rheinhardt | 8aeab0dbc1 | |
Andreas Rheinhardt | ad18f69bcb | |
Andreas Rheinhardt | 45e7c67aff | |
Andreas Rheinhardt | bb20f3dd73 | |
Andreas Rheinhardt | 3fd68fd955 | |
Marton Balint | 3414115cd4 | |
Marton Balint | f4a8ea7ff6 | |
Marton Balint | c371463915 | |
Marton Balint | c05d82fa92 | |
Marton Balint | 4bf90e095b | |
Marton Balint | 4d7f8254ac | |
Guo, Yejun | 37d24a6c8f | |
Guo, Yejun | 04e6f8a143 | |
James Zern | 742221d339 | |
Michael Niedermayer | 06f6857b54 | |
Limin Wang | d31a1266a7 | |
Andreas Rheinhardt | 220846f74f | |
Limin Wang | 10f0332fd5 | |
Mark Thompson | f907eea863 | |
Xinpeng Sun | f0e7ac5986 | |
Carl Eugen Hoyos | a665a2ecda | |
Carl Eugen Hoyos | 96fab29e96 | |
Paul B Mahol | e21ba176c9 | |
Paul B Mahol | 51927d33b7 | |
Gyan Doshi | d0b0e8ecc8 | |
Gyan Doshi | 71ac3b6edc | |
Paul B Mahol | 27ec72db06 | |
Limin Wang | 1e3f4b5f19 | |
Derek Buitenhuis | a2d3511154 | |
Ulrich Spörlein | 60d599e217 | |
Michael Niedermayer | b0a718923b | |
Gyan Doshi | 8aa3c09c1b | |
Paul B Mahol | e26d66daaa | |
Paul B Mahol | 1187dbb7e9 | |
Gyan Doshi | 5d82c078ea | |
Gyan Doshi | fa3ad7bbc6 | |
Gyan Doshi | fa677750de | |
Sebastian Pop | c3a17ffff6 | |
Limin Wang | ebbc976ae6 | |
Carl Eugen Hoyos | f7c945abc0 | |
Jun Zhao | 304eaa63a9 | |
Paul B Mahol | 389865352d | |
Paul B Mahol | c8253cb332 | |
Paul B Mahol | 6a1305e8b7 | |
Jun Zhao | 3c8da7b982 | |
Jun Zhao | c8e72a6494 | |
Jun Zhao | cf9c9a9db9 | |
Jun Zhao | 0099f71502 | |
Marton Balint | 944203270d | |
Marton Balint | 2b7097ef27 | |
Marton Balint | 78a233e6e9 | |
Marton Balint | d111a41f9d | |
Marton Balint | beb7f93b23 | |
Marton Balint | 57df8839e1 | |
Marton Balint | 43d5ddb4b5 | |
Marton Balint | 04e36fc4e1 | |
Marton Balint | b693b06b22 | |
Marton Balint | bc17b831dd | |
Paul B Mahol | 81172b5e3a | |
Paul B Mahol | ed02563ce0 | |
Zhong Li | 12c4d00c10 | |
Timo Rothenpieler | d4996a600c | |
Marton Balint | e47a954631 | |
Marton Balint | a613d042e7 | |
Marton Balint | 91f64ea4c5 | |
Marton Balint | dfea6d2e50 | |
Marton Balint | 9ac1066dc6 | |
Marton Balint | 710e9f3f18 | |
Marton Balint | 7c3125133f | |
Marton Balint | 5d35a6e8e2 | |
Marton Balint | f20bdbff6d | |
Marton Balint | 59172164f3 | |
James Almer | 3670a10c2d | |
James Almer | 278a91f8fe | |
James Almer | 3c22436ddf | |
Andreas Rheinhardt | 40d9cbdc22 | |
Andreas Rheinhardt | 34e3810b04 | |
Andreas Rheinhardt | 2968da7d9e | |
Andreas Rheinhardt | 6d354aeb29 | |
Matt Oliver | 1fd8929450 | |
Paul B Mahol | d4d6280ab2 | |
Paul B Mahol | 7bb09e57e0 | |
Jun Zhao | d7e2a2bb35 | |
Gyan Doshi | 7b58702cbd | |
Michael Niedermayer | a15d904ad7 | |
Michael Niedermayer | 9a8471e3f0 | |
Michael Niedermayer | e900621074 | |
Michael Niedermayer | f72fa1ad67 | |
Michael Niedermayer | 79d5d79f38 | |
Michael Niedermayer | fa47f6412d | |
Michael Niedermayer | 89fd76db71 | |
Michael Niedermayer | 4cfc563c57 | |
Michael Niedermayer | 7d475c53c8 | |
Michael Niedermayer | 53efab44a9 | |
Michael Niedermayer | aea6755611 | |
Michael Niedermayer | 4dc93ae3d7 | |
Michael Niedermayer | 77ba9e3252 | |
Zhao Zhili | 609285af51 | |
Michael Niedermayer | c1411a1132 | |
Zhao Zhili | f78dc66089 | |
Zhao Zhili | 1e3e547a5b | |
Marton Balint | f1353ce222 | |
Marton Balint | f6845269c6 | |
Paul B Mahol | f651b18c19 | |
Zhao Zhili | b2491566a6 | |
Paul B Mahol | e20c6d95b2 | |
Paul B Mahol | 22d3552f44 | |
Paul B Mahol | 26eba8ca61 | |
Paul B Mahol | 11f6657e92 | |
Paul B Mahol | 612b5791b8 | |
Paul B Mahol | d3d6f5a76e | |
Paul B Mahol | cc43c2f29a | |
Zhong Li | 755ad01dd1 | |
Zhong Li | 779951f7b2 | |
Zhong Li | 9fff5c40a7 | |
Andreas Rheinhardt | 2ff687c17f | |
Andreas Rheinhardt | 6c735b96ee | |
James Almer | eb17a7906b | |
Paul B Mahol | 6399eed48a | |
Paul B Mahol | b3216f13ce | |
Carl Eugen Hoyos | 2e700ec3a7 | |
Paul B Mahol | 2736dc0564 | |
Paul B Mahol | 29b765d657 | |
Paul B Mahol | 1669c970b1 | |
Michael Niedermayer | 5c0d1f7896 | |
Michael Niedermayer | 47fd73ace2 | |
Michael Niedermayer | 45259a0ee4 | |
Michael Niedermayer | 26f040bcb4 | |
Michael Niedermayer | 3ee9240be3 | |
Michael Niedermayer | d5e7f01090 | |
Michael Niedermayer | 18ff210efb | |
Michael Niedermayer | 1f20969457 | |
Michael Niedermayer | bf5c850b79 | |
Michael Niedermayer | f1b97f62f8 | |
Andriy Gelman | fc3760a66d | |
Marton Balint | a619787a9c | |
Marton Balint | 672b925e8a | |
Marton Balint | 5edacc4609 | |
Paul B Mahol | 6c883e214a | |
Paul B Mahol | 50cfe9662d | |
Paul B Mahol | 5fb37598ad | |
Paul B Mahol | 954637805d | |
Paul B Mahol | fad62eebee | |
Marvin Scholz | f37bfd3f62 | |
Marvin Scholz | c3bb6efef2 | |
Limin Wang | 93671d6755 | |
Andreas Rheinhardt | 39561bbe77 | |
Andreas Rheinhardt | 9bde6c6be0 | |
Andreas Rheinhardt | b0d0d7e4d0 | |
Andreas Rheinhardt | 28d02524a0 | |
Andreas Rheinhardt | a94e6b50c6 | |
Andreas Rheinhardt | 827bdc8418 | |
Limin Wang | ef91e3955a | |
Limin Wang | f8f86f8356 | |
Paul B Mahol | b5f0cea16c | |
Michael Niedermayer | 4fa2d5a692 | |
Andreas Rheinhardt | 398a5f5d8f | |
Marton Balint | c991e9cd91 | |
Marton Balint | 16685114d5 | |
Paul B Mahol | e3dca0744d | |
Michael Niedermayer | d6553e2e60 | |
Limin Wang | 3dd6c4478b | |
Gyan Doshi | 287620f59c | |
Paul B Mahol | a13af9aee1 | |
Paul B Mahol | 13bffa7969 | |
Paul B Mahol | 9da7536ce2 | |
Paul B Mahol | 547b0c61af | |
Paul B Mahol | 6c9a9dd25a | |
Zhong Li | 5dc527f9ca | |
Paul B Mahol | 94ad5d0128 | |
Paul B Mahol | 94682555c6 | |
Paul B Mahol | 16968b619d | |
Zhao Zhili | 0115dbd043 | |
Zhao Zhili | 61097535cd | |
Zhao Zhili | 07ffdedf78 | |
Zhao Zhili | 807e90d232 | |
Zhao Zhili | bf08264daa | |
Gyan Doshi | f0b3b55002 | |
James Almer | c75f246a3c | |
Steven Liu | ec2a8e3390 | |
Nicolas George | f09ae7309d | |
Nicolas George | 02daafb45c | |
Nicolas George | f3a6ef69bf | |
Nicolas George | 65e6850c56 | |
Nicolas George | 9ea7e68907 | |
Limin Wang | 03eb96f9b7 | |
Andreas Rheinhardt | 5b42d33571 | |
Andreas Rheinhardt | 74a8be3546 | |
Andreas Rheinhardt | 5ba3a8958c | |
Andreas Rheinhardt | 53c1458bf2 | |
Andreas Rheinhardt | 149ee954a3 | |
Andreas Rheinhardt | bd131b64bc | |
Andreas Rheinhardt | 9e4b3ccbb6 | |
Andreas Rheinhardt | ae84305036 | |
Andreas Rheinhardt | 728c44b861 | |
Andreas Rheinhardt | 7d6637bcc4 | |
Jun Zhao | c1e215041b | |
James Almer | 231ffd7e63 | |
Paul B Mahol | e890ce62ef | |
James Almer | 191df4f2d1 | |
James Almer | 245ace4e2d | |
James Almer | 58ac760816 | |
Nicolas Gaullier | 1fcbaa37e6 | |
Andreas Rheinhardt | 15345881e7 | |
Nicolas Gaullier | f40fb7963e | |
Paul B Mahol | 26f4ee37f7 | |
Limin Wang | 5e5bbd21e6 | |
Wonkap Jang | a86bb2f606 | |
Paul B Mahol | 786a2daa3d | |
Limin Wang | dbb051767f | |
Andreas Rheinhardt | 99f505d2df | |
James Almer | abf5e7bc21 | |
James Almer | 8887991a31 | |
Martin Storsjö | aad0e26f93 | |
Martin Storsjö | e10654de2b | |
Sebastian Pop | bd83191271 | |
Linjie Fu | e43d66dc67 | |
Linjie Fu | fb705e4073 | |
Linjie Fu | ed2bd94fc0 | |
Linjie Fu | d31a290226 | |
Lynne | 377a095dc3 | |
Paul B Mahol | 3530fdc78e | |
Martin Storsjö | 0f50be625f | |
Linjie Fu | efefba61f8 | |
Xinpeng Sun | 2e2dfe6673 | |
Carl Eugen Hoyos | 9f6a06d927 | |
Carl Eugen Hoyos | 84db67894f | |
Andriy Gelman | c07a772473 | |
Andreas Rheinhardt | ed9279afbd | |
Michael Niedermayer | 9fe07908c3 | |
Michael Niedermayer | 01593278ce | |
Michael Niedermayer | 5a8877da35 | |
Michael Niedermayer | a76690c02b | |
Michael Niedermayer | 19c41969b2 | |
Michael Niedermayer | 92455c8c65 | |
Michael Niedermayer | 6b7bcd437e | |
Michael Niedermayer | a9cc69c0d5 | |
Michael Niedermayer | 50db30b47d | |
Michael Niedermayer | 5d0b3e282a | |
Michael Niedermayer | ec3fe67074 | |
Michael Niedermayer | 9d42826580 | |
Michael Niedermayer | 185f441ba2 | |
Michael Niedermayer | 2c2ee67fd3 | |
Michael Niedermayer | c94cb8d9b2 | |
Michael Niedermayer | f064c7c449 | |
Marton Balint | 1f8e43938b | |
Marton Balint | 4cd2cee7ed | |
Marton Balint | 6498522bef | |
Marton Balint | 28b5dc6199 | |
Carl Eugen Hoyos | 8b5ef2dcff | |
Carl Eugen Hoyos | 193143486e | |
Paul B Mahol | 24424a6516 | |
Michael Niedermayer | bbe27890ff | |
Guo, Yejun | e52070e89c | |
Guo, Yejun | ed9fc2e3c5 | |
James Almer | 54d09eb8d0 | |
Paul B Mahol | 824324db41 | |
Martin Storsjö | 06ec9c4746 | |
Martin Storsjö | f58bda642d | |
Michael Niedermayer | 9d1f7870a9 | |
Michael Niedermayer | ab3044368f | |
Linjie Fu | 7aef2f59b5 | |
Linjie Fu | bffb9326b6 | |
Andreas Rheinhardt | 82d61a9ce3 | |
Andreas Rheinhardt | c1e439d7e9 | |
Andreas Rheinhardt | cb88cdf773 | |
Andreas Rheinhardt | c1d300f83a | |
Andreas Rheinhardt | 2e328a8a38 | |
Andreas Rheinhardt | 56ce2ad2cc | |
Zhong Li | a87b5d5e8c | |
Linjie Fu | 8446318502 | |
Yuki Tsuchiya | 0ceed513d5 | |
James Almer | 6467a15997 | |
Yuki Tsuchiya | 632b8298b7 | |
Yuki Tsuchiya | 30047b6a57 | |
Yuki Tsuchiya | 610473b967 | |
James Almer | aedffc0b22 | |
Martin Storsjö | 29f8d4e947 | |
Martin Storsjö | e9bb1410e4 | |
Martin Storsjö | b85dcd8586 | |
James Almer | 13f2b6dc72 | |
James Almer | a23dd33606 | |
Martin Storsjö | c27a85b983 | |
Martin Storsjö | 8f70e261fa | |
Martin Storsjö | 6569e9505c | |
Fei Wang | 5fc3099caf | |
Andreas Rheinhardt | 86e9747c63 | |
Andreas Rheinhardt | a6d292b954 | |
Andreas Rheinhardt | 91f775e0c5 | |
Gyan Doshi | 78676ee8f8 |
|
@ -5,8 +5,8 @@ version <next>:
|
||||||
- v360 filter
|
- v360 filter
|
||||||
- Intel QSV-accelerated MJPEG decoding
|
- Intel QSV-accelerated MJPEG decoding
|
||||||
- Intel QSV-accelerated VP9 decoding
|
- Intel QSV-accelerated VP9 decoding
|
||||||
- support for TrueHD in mp4
|
- Support for TrueHD in mp4
|
||||||
- Supoort AMD AMF encoder on Linux (via Vulkan)
|
- Support AMD AMF encoder on Linux (via Vulkan)
|
||||||
- IMM5 video decoder
|
- IMM5 video decoder
|
||||||
- ZeroMQ protocol
|
- ZeroMQ protocol
|
||||||
- support Sipro ACELP.KELVIN decoding
|
- support Sipro ACELP.KELVIN decoding
|
||||||
|
@ -27,6 +27,9 @@ version <next>:
|
||||||
- axcorrelate filter
|
- axcorrelate filter
|
||||||
- mvdv decoder
|
- mvdv decoder
|
||||||
- mvha decoder
|
- mvha decoder
|
||||||
|
- MPEG-H 3D Audio support in mp4
|
||||||
|
- thistogram filter
|
||||||
|
- freezeframes filter
|
||||||
|
|
||||||
|
|
||||||
version 4.2:
|
version 4.2:
|
||||||
|
|
|
@ -35,7 +35,6 @@ Specifically, the GPL parts of FFmpeg are:
|
||||||
- `vf_eq.c`
|
- `vf_eq.c`
|
||||||
- `vf_find_rect.c`
|
- `vf_find_rect.c`
|
||||||
- `vf_fspp.c`
|
- `vf_fspp.c`
|
||||||
- `vf_geq.c`
|
|
||||||
- `vf_histeq.c`
|
- `vf_histeq.c`
|
||||||
- `vf_hqdn3d.c`
|
- `vf_hqdn3d.c`
|
||||||
- `vf_kerndeint.c`
|
- `vf_kerndeint.c`
|
||||||
|
|
|
@ -1096,7 +1096,7 @@ AVSC_INLINE AVS_Library * avs_load_library() {
|
||||||
AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library));
|
AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library));
|
||||||
if (library == NULL)
|
if (library == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
library->handle = LoadLibrary("avisynth");
|
library->handle = LoadLibraryA("avisynth");
|
||||||
if (library->handle == NULL)
|
if (library->handle == NULL)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
|
|
@ -482,6 +482,7 @@ Developer options (useful when working on FFmpeg itself):
|
||||||
--ignore-tests=TESTS comma-separated list (without "fate-" prefix
|
--ignore-tests=TESTS comma-separated list (without "fate-" prefix
|
||||||
in the name) of tests whose result is ignored
|
in the name) of tests whose result is ignored
|
||||||
--enable-linux-perf enable Linux Performance Monitor API
|
--enable-linux-perf enable Linux Performance Monitor API
|
||||||
|
--disable-large-tests disable tests that use a large amount of memory
|
||||||
|
|
||||||
NOTE: Object files are built at the place where configure is launched.
|
NOTE: Object files are built at the place where configure is launched.
|
||||||
EOF
|
EOF
|
||||||
|
@ -1931,6 +1932,7 @@ CONFIG_LIST="
|
||||||
$SUBSYSTEM_LIST
|
$SUBSYSTEM_LIST
|
||||||
autodetect
|
autodetect
|
||||||
fontconfig
|
fontconfig
|
||||||
|
large_tests
|
||||||
linux_perf
|
linux_perf
|
||||||
memory_poisoning
|
memory_poisoning
|
||||||
neon_clobber_test
|
neon_clobber_test
|
||||||
|
@ -2194,6 +2196,7 @@ SYSTEM_FUNCS="
|
||||||
getaddrinfo
|
getaddrinfo
|
||||||
gethrtime
|
gethrtime
|
||||||
getopt
|
getopt
|
||||||
|
GetModuleHandle
|
||||||
GetProcessAffinityMask
|
GetProcessAffinityMask
|
||||||
GetProcessMemoryInfo
|
GetProcessMemoryInfo
|
||||||
GetProcessTimes
|
GetProcessTimes
|
||||||
|
@ -2223,6 +2226,7 @@ SYSTEM_FUNCS="
|
||||||
SecItemImport
|
SecItemImport
|
||||||
SetConsoleTextAttribute
|
SetConsoleTextAttribute
|
||||||
SetConsoleCtrlHandler
|
SetConsoleCtrlHandler
|
||||||
|
SetDllDirectory
|
||||||
setmode
|
setmode
|
||||||
setrlimit
|
setrlimit
|
||||||
Sleep
|
Sleep
|
||||||
|
@ -3499,7 +3503,6 @@ freezedetect_filter_select="scene_sad"
|
||||||
frei0r_filter_deps="frei0r libdl"
|
frei0r_filter_deps="frei0r libdl"
|
||||||
frei0r_src_filter_deps="frei0r libdl"
|
frei0r_src_filter_deps="frei0r libdl"
|
||||||
fspp_filter_deps="gpl"
|
fspp_filter_deps="gpl"
|
||||||
geq_filter_deps="gpl"
|
|
||||||
headphone_filter_select="fft"
|
headphone_filter_select="fft"
|
||||||
histeq_filter_deps="gpl"
|
histeq_filter_deps="gpl"
|
||||||
hqdn3d_filter_deps="gpl"
|
hqdn3d_filter_deps="gpl"
|
||||||
|
@ -3576,6 +3579,7 @@ tinterlace_filter_deps="gpl"
|
||||||
tinterlace_merge_test_deps="tinterlace_filter"
|
tinterlace_merge_test_deps="tinterlace_filter"
|
||||||
tinterlace_pad_test_deps="tinterlace_filter"
|
tinterlace_pad_test_deps="tinterlace_filter"
|
||||||
tonemap_filter_deps="const_nan"
|
tonemap_filter_deps="const_nan"
|
||||||
|
tonemap_vaapi_filter_deps="vaapi VAProcFilterParameterBufferHDRToneMapping"
|
||||||
tonemap_opencl_filter_deps="opencl const_nan"
|
tonemap_opencl_filter_deps="opencl const_nan"
|
||||||
transpose_opencl_filter_deps="opencl"
|
transpose_opencl_filter_deps="opencl"
|
||||||
transpose_vaapi_filter_deps="vaapi VAProcPipelineCaps_rotation_flags"
|
transpose_vaapi_filter_deps="vaapi VAProcPipelineCaps_rotation_flags"
|
||||||
|
@ -3724,6 +3728,7 @@ enable asm
|
||||||
enable debug
|
enable debug
|
||||||
enable doc
|
enable doc
|
||||||
enable faan faandct faanidct
|
enable faan faandct faanidct
|
||||||
|
enable large_tests
|
||||||
enable optimizations
|
enable optimizations
|
||||||
enable runtime_cpudetect
|
enable runtime_cpudetect
|
||||||
enable safe_bitstream_reader
|
enable safe_bitstream_reader
|
||||||
|
@ -6032,6 +6037,7 @@ check_func_headers mach/mach_time.h mach_absolute_time
|
||||||
check_func_headers stdlib.h getenv
|
check_func_headers stdlib.h getenv
|
||||||
check_func_headers sys/stat.h lstat
|
check_func_headers sys/stat.h lstat
|
||||||
|
|
||||||
|
check_func_headers windows.h GetModuleHandle
|
||||||
check_func_headers windows.h GetProcessAffinityMask
|
check_func_headers windows.h GetProcessAffinityMask
|
||||||
check_func_headers windows.h GetProcessTimes
|
check_func_headers windows.h GetProcessTimes
|
||||||
check_func_headers windows.h GetSystemTimeAsFileTime
|
check_func_headers windows.h GetSystemTimeAsFileTime
|
||||||
|
@ -6040,6 +6046,7 @@ check_func_headers windows.h MapViewOfFile
|
||||||
check_func_headers windows.h PeekNamedPipe
|
check_func_headers windows.h PeekNamedPipe
|
||||||
check_func_headers windows.h SetConsoleTextAttribute
|
check_func_headers windows.h SetConsoleTextAttribute
|
||||||
check_func_headers windows.h SetConsoleCtrlHandler
|
check_func_headers windows.h SetConsoleCtrlHandler
|
||||||
|
check_func_headers windows.h SetDllDirectory
|
||||||
check_func_headers windows.h Sleep
|
check_func_headers windows.h Sleep
|
||||||
check_func_headers windows.h VirtualAlloc
|
check_func_headers windows.h VirtualAlloc
|
||||||
check_func_headers glob.h glob
|
check_func_headers glob.h glob
|
||||||
|
@ -6577,6 +6584,7 @@ if enabled vaapi; then
|
||||||
|
|
||||||
check_type "va/va.h va/va_dec_hevc.h" "VAPictureParameterBufferHEVC"
|
check_type "va/va.h va/va_dec_hevc.h" "VAPictureParameterBufferHEVC"
|
||||||
check_struct "va/va.h" "VADecPictureParameterBufferVP9" bit_depth
|
check_struct "va/va.h" "VADecPictureParameterBufferVP9" bit_depth
|
||||||
|
check_type "va/va.h va/va_vpp.h" "VAProcFilterParameterBufferHDRToneMapping"
|
||||||
check_struct "va/va.h va/va_vpp.h" "VAProcPipelineCaps" rotation_flags
|
check_struct "va/va.h va/va_vpp.h" "VAProcPipelineCaps" rotation_flags
|
||||||
check_type "va/va.h va/va_enc_hevc.h" "VAEncPictureParameterBufferHEVC"
|
check_type "va/va.h va/va_enc_hevc.h" "VAEncPictureParameterBufferHEVC"
|
||||||
check_type "va/va.h va/va_enc_jpeg.h" "VAEncPictureParameterBufferJPEG"
|
check_type "va/va.h va/va_enc_jpeg.h" "VAEncPictureParameterBufferJPEG"
|
||||||
|
@ -7448,7 +7456,7 @@ cat > $TMPH <<EOF
|
||||||
#define FFMPEG_CONFIG_H
|
#define FFMPEG_CONFIG_H
|
||||||
#define FFMPEG_CONFIGURATION "$(c_escape $FFMPEG_CONFIGURATION)"
|
#define FFMPEG_CONFIGURATION "$(c_escape $FFMPEG_CONFIGURATION)"
|
||||||
#define FFMPEG_LICENSE "$(c_escape $license)"
|
#define FFMPEG_LICENSE "$(c_escape $license)"
|
||||||
#define CONFIG_THIS_YEAR 2019
|
#define CONFIG_THIS_YEAR 2020
|
||||||
#define FFMPEG_DATADIR "$(eval c_escape $datadir)"
|
#define FFMPEG_DATADIR "$(eval c_escape $datadir)"
|
||||||
#define AVCONV_DATADIR "$(eval c_escape $datadir)"
|
#define AVCONV_DATADIR "$(eval c_escape $datadir)"
|
||||||
#define CC_IDENT "$(c_escape ${cc_ident:-Unknown compiler})"
|
#define CC_IDENT "$(c_escape ${cc_ident:-Unknown compiler})"
|
||||||
|
|
|
@ -15,6 +15,15 @@ libavutil: 2017-10-21
|
||||||
|
|
||||||
API changes, most recent first:
|
API changes, most recent first:
|
||||||
|
|
||||||
|
2020-01-15 - xxxxxxxxxx - lavc 58.66.100 - avcodec.h
|
||||||
|
Add AV_PKT_DATA_PRFT and AVProducerReferenceTime.
|
||||||
|
|
||||||
|
2019-12-27 - xxxxxxxxxx - lavu 56.38.100 - eval.h
|
||||||
|
Add av_expr_count_func().
|
||||||
|
|
||||||
|
2019-12-xx - xxxxxxxxxx - lavu 56.37.100 - buffer.h
|
||||||
|
Add av_buffer_pool_buffer_get_opaque().
|
||||||
|
|
||||||
2019-11-17 - 1c23abc88f - lavu 56.36.100 - eval API
|
2019-11-17 - 1c23abc88f - lavu 56.36.100 - eval API
|
||||||
Add av_expr_count_vars().
|
Add av_expr_count_vars().
|
||||||
|
|
||||||
|
|
|
@ -456,6 +456,17 @@ nanosecond precision.
|
||||||
@item video_size
|
@item video_size
|
||||||
Set the video size of the images to read. If not specified the video
|
Set the video size of the images to read. If not specified the video
|
||||||
size is guessed from the first image file in the sequence.
|
size is guessed from the first image file in the sequence.
|
||||||
|
@item export_path_metadata
|
||||||
|
If set to 1, will add two extra fields to the metadata found in input, making them
|
||||||
|
also available for other filters (see @var{drawtext} filter for examples). Default
|
||||||
|
value is 0. The extra fields are described below:
|
||||||
|
@table @option
|
||||||
|
@item lavf.image2dec.source_path
|
||||||
|
Corresponds to the full path to the input file being read.
|
||||||
|
@item lavf.image2dec.source_basename
|
||||||
|
Corresponds to the name of the file being read.
|
||||||
|
@end table
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Examples
|
@subsection Examples
|
||||||
|
|
|
@ -1893,7 +1893,7 @@ key=value pairs. For example, to specify temporal scalability parameters
|
||||||
with @code{ffmpeg}:
|
with @code{ffmpeg}:
|
||||||
@example
|
@example
|
||||||
ffmpeg -i INPUT -c:v libvpx -ts-parameters ts_number_layers=3:\
|
ffmpeg -i INPUT -c:v libvpx -ts-parameters ts_number_layers=3:\
|
||||||
ts_target_bitrate=250000,500000,1000000:ts_rate_decimator=4,2,1:\
|
ts_target_bitrate=250,500,1000:ts_rate_decimator=4,2,1:\
|
||||||
ts_periodicity=4:ts_layer_id=0,2,1,2 OUTPUT
|
ts_periodicity=4:ts_layer_id=0,2,1,2 OUTPUT
|
||||||
@end example
|
@end example
|
||||||
Below is a brief explanation of each of the parameters, please
|
Below is a brief explanation of each of the parameters, please
|
||||||
|
@ -1903,7 +1903,8 @@ details.
|
||||||
@item ts_number_layers
|
@item ts_number_layers
|
||||||
Number of temporal coding layers.
|
Number of temporal coding layers.
|
||||||
@item ts_target_bitrate
|
@item ts_target_bitrate
|
||||||
Target bitrate for each temporal layer.
|
Target bitrate for each temporal layer (in kbps).
|
||||||
|
(bitrate should be inclusive of the lower temporal layer).
|
||||||
@item ts_rate_decimator
|
@item ts_rate_decimator
|
||||||
Frame rate decimation factor for each temporal layer.
|
Frame rate decimation factor for each temporal layer.
|
||||||
@item ts_periodicity
|
@item ts_periodicity
|
||||||
|
@ -2414,6 +2415,20 @@ during configuration. You need to explicitly configure the build with
|
||||||
@subsection Options
|
@subsection Options
|
||||||
|
|
||||||
@table @option
|
@table @option
|
||||||
|
@item b
|
||||||
|
Sets target video bitrate.
|
||||||
|
|
||||||
|
@item bf
|
||||||
|
|
||||||
|
@item g
|
||||||
|
Set the GOP size.
|
||||||
|
|
||||||
|
@item keyint_min
|
||||||
|
Minimum GOP size.
|
||||||
|
|
||||||
|
@item refs
|
||||||
|
Number of reference frames each P-frame can use. The range is from @var{1-16}.
|
||||||
|
|
||||||
@item preset
|
@item preset
|
||||||
Set the x265 preset.
|
Set the x265 preset.
|
||||||
|
|
||||||
|
@ -2426,6 +2441,28 @@ Set profile restrictions.
|
||||||
@item crf
|
@item crf
|
||||||
Set the quality for constant quality mode.
|
Set the quality for constant quality mode.
|
||||||
|
|
||||||
|
@item qp
|
||||||
|
Set constant quantization rate control method parameter.
|
||||||
|
|
||||||
|
@item qmin
|
||||||
|
Minimum quantizer scale.
|
||||||
|
|
||||||
|
@item qmax
|
||||||
|
Maximum quantizer scale.
|
||||||
|
|
||||||
|
@item qdiff
|
||||||
|
Maximum difference between quantizer scales.
|
||||||
|
|
||||||
|
@item qblur
|
||||||
|
Quantizer curve blur
|
||||||
|
|
||||||
|
@item qcomp
|
||||||
|
Quantizer curve compression factor
|
||||||
|
|
||||||
|
@item i_qfactor
|
||||||
|
|
||||||
|
@item b_qfactor
|
||||||
|
|
||||||
@item forced-idr
|
@item forced-idr
|
||||||
Normally, when forcing a I-frame type, the encoder can select any type
|
Normally, when forcing a I-frame type, the encoder can select any type
|
||||||
of I-frame. This option forces it to choose an IDR-frame.
|
of I-frame. This option forces it to choose an IDR-frame.
|
||||||
|
|
|
@ -879,12 +879,19 @@ Deprecated see -bsf
|
||||||
|
|
||||||
@item -force_key_frames[:@var{stream_specifier}] @var{time}[,@var{time}...] (@emph{output,per-stream})
|
@item -force_key_frames[:@var{stream_specifier}] @var{time}[,@var{time}...] (@emph{output,per-stream})
|
||||||
@item -force_key_frames[:@var{stream_specifier}] expr:@var{expr} (@emph{output,per-stream})
|
@item -force_key_frames[:@var{stream_specifier}] expr:@var{expr} (@emph{output,per-stream})
|
||||||
Force key frames at the specified timestamps, more precisely at the first
|
@item -force_key_frames[:@var{stream_specifier}] source (@emph{output,per-stream})
|
||||||
frames after each specified time.
|
|
||||||
|
|
||||||
If the argument is prefixed with @code{expr:}, the string @var{expr}
|
@var{force_key_frames} can take arguments of the following form:
|
||||||
is interpreted like an expression and is evaluated for each frame. A
|
|
||||||
key frame is forced in case the evaluation is non-zero.
|
@table @option
|
||||||
|
|
||||||
|
@item @var{time}[,@var{time}...]
|
||||||
|
If the argument consists of timestamps, ffmpeg will round the specified times to the nearest
|
||||||
|
output timestamp as per the encoder time base and force a keyframe at the first frame having
|
||||||
|
timestamp equal or greater than the computed timestamp. Note that if the encoder time base is too
|
||||||
|
coarse, then the keyframes may be forced on frames with timestamps lower than the specified time.
|
||||||
|
The default encoder time base is the inverse of the output framerate but may be set otherwise
|
||||||
|
via @code{-enc_time_base}.
|
||||||
|
|
||||||
If one of the times is "@code{chapters}[@var{delta}]", it is expanded into
|
If one of the times is "@code{chapters}[@var{delta}]", it is expanded into
|
||||||
the time of the beginning of all chapters in the file, shifted by
|
the time of the beginning of all chapters in the file, shifted by
|
||||||
|
@ -898,6 +905,11 @@ before the beginning of every chapter:
|
||||||
-force_key_frames 0:05:00,chapters-0.1
|
-force_key_frames 0:05:00,chapters-0.1
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
|
@item expr:@var{expr}
|
||||||
|
If the argument is prefixed with @code{expr:}, the string @var{expr}
|
||||||
|
is interpreted like an expression and is evaluated for each frame. A
|
||||||
|
key frame is forced in case the evaluation is non-zero.
|
||||||
|
|
||||||
The expression in @var{expr} can contain the following constants:
|
The expression in @var{expr} can contain the following constants:
|
||||||
@table @option
|
@table @option
|
||||||
@item n
|
@item n
|
||||||
|
@ -925,6 +937,12 @@ starting from second 13:
|
||||||
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
|
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
|
@item source
|
||||||
|
If the argument is @code{source}, ffmpeg will force a key frame if
|
||||||
|
the current frame being encoded is marked as a key frame in its source.
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
Note that forcing too many keyframes is very harmful for the lookahead
|
Note that forcing too many keyframes is very harmful for the lookahead
|
||||||
algorithms of certain encoders: using fixed-GOP options or similar
|
algorithms of certain encoders: using fixed-GOP options or similar
|
||||||
would be more efficient.
|
would be more efficient.
|
||||||
|
|
390
doc/filters.texi
390
doc/filters.texi
|
@ -443,6 +443,10 @@ How much to use compressed signal in output. Default is 1.
|
||||||
Range is between 0 and 1.
|
Range is between 0 and 1.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the all above options as @ref{commands}.
|
||||||
|
|
||||||
@section acontrast
|
@section acontrast
|
||||||
Simple audio dynamic range compression/expansion filter.
|
Simple audio dynamic range compression/expansion filter.
|
||||||
|
|
||||||
|
@ -1179,7 +1183,7 @@ afftfilt="real='hypot(re,im)*cos((random(0)*2-1)*2*3.14)':imag='hypot(re,im)*sin
|
||||||
@anchor{afir}
|
@anchor{afir}
|
||||||
@section afir
|
@section afir
|
||||||
|
|
||||||
Apply an arbitrary Frequency Impulse Response filter.
|
Apply an arbitrary Finite Impulse Response filter.
|
||||||
|
|
||||||
This filter is designed for applying long FIR filters,
|
This filter is designed for applying long FIR filters,
|
||||||
up to 60 seconds long.
|
up to 60 seconds long.
|
||||||
|
@ -1188,10 +1192,10 @@ It can be used as component for digital crossover filters,
|
||||||
room equalization, cross talk cancellation, wavefield synthesis,
|
room equalization, cross talk cancellation, wavefield synthesis,
|
||||||
auralization, ambiophonics, ambisonics and spatialization.
|
auralization, ambiophonics, ambisonics and spatialization.
|
||||||
|
|
||||||
This filter uses the second stream as FIR coefficients.
|
This filter uses the streams higher than first one as FIR coefficients.
|
||||||
If the second stream holds a single channel, it will be used
|
If the non-first stream holds a single channel, it will be used
|
||||||
for all input channels in the first stream, otherwise
|
for all input channels in the first stream, otherwise
|
||||||
the number of channels in the second stream must be same as
|
the number of channels in the non-first stream must be same as
|
||||||
the number of channels in the first stream.
|
the number of channels in the first stream.
|
||||||
|
|
||||||
It accepts the following parameters:
|
It accepts the following parameters:
|
||||||
|
@ -1253,13 +1257,22 @@ Set video stream frame rate. This option is used only when @var{response} is ena
|
||||||
|
|
||||||
@item minp
|
@item minp
|
||||||
Set minimal partition size used for convolution. Default is @var{8192}.
|
Set minimal partition size used for convolution. Default is @var{8192}.
|
||||||
Allowed range is from @var{8} to @var{32768}.
|
Allowed range is from @var{1} to @var{32768}.
|
||||||
Lower values decreases latency at cost of higher CPU usage.
|
Lower values decreases latency at cost of higher CPU usage.
|
||||||
|
|
||||||
@item maxp
|
@item maxp
|
||||||
Set maximal partition size used for convolution. Default is @var{8192}.
|
Set maximal partition size used for convolution. Default is @var{8192}.
|
||||||
Allowed range is from @var{8} to @var{32768}.
|
Allowed range is from @var{8} to @var{32768}.
|
||||||
Lower values may increase CPU usage.
|
Lower values may increase CPU usage.
|
||||||
|
|
||||||
|
@item nbirs
|
||||||
|
Set number of input impulse responses streams which will be switchable at runtime.
|
||||||
|
Allowed range is from @var{1} to @var{32}. Default is @var{1}.
|
||||||
|
|
||||||
|
@item ir
|
||||||
|
Set IR stream which will be used for convolution, starting from @var{0}, should always be
|
||||||
|
lower than supplied value by @code{nbirs} option. Default is @var{0}.
|
||||||
|
This option can be changed at runtime via @ref{commands}.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Examples
|
@subsection Examples
|
||||||
|
@ -1281,13 +1294,13 @@ negotiate the most appropriate format to minimize conversions.
|
||||||
It accepts the following parameters:
|
It accepts the following parameters:
|
||||||
@table @option
|
@table @option
|
||||||
|
|
||||||
@item sample_fmts
|
@item sample_fmts, f
|
||||||
A '|'-separated list of requested sample formats.
|
A '|'-separated list of requested sample formats.
|
||||||
|
|
||||||
@item sample_rates
|
@item sample_rates, r
|
||||||
A '|'-separated list of requested sample rates.
|
A '|'-separated list of requested sample rates.
|
||||||
|
|
||||||
@item channel_layouts
|
@item channel_layouts, cl
|
||||||
A '|'-separated list of requested channel layouts.
|
A '|'-separated list of requested channel layouts.
|
||||||
|
|
||||||
See @ref{channel layout syntax,,the Channel Layout section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
See @ref{channel layout syntax,,the Channel Layout section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
||||||
|
@ -3213,6 +3226,10 @@ Sets the intensity of effect (default: 2.0). Must be in range between 0.0
|
||||||
Enable clipping. By default is enabled.
|
Enable clipping. By default is enabled.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the all above options as @ref{commands}.
|
||||||
|
|
||||||
@section dcshift
|
@section dcshift
|
||||||
Apply a DC shift to the audio.
|
Apply a DC shift to the audio.
|
||||||
|
|
||||||
|
@ -3438,8 +3455,20 @@ value. Instead, the threshold value will be adjusted for each individual
|
||||||
frame.
|
frame.
|
||||||
In general, smaller parameters result in stronger compression, and vice versa.
|
In general, smaller parameters result in stronger compression, and vice versa.
|
||||||
Values below 3.0 are not recommended, because audible distortion may appear.
|
Values below 3.0 are not recommended, because audible distortion may appear.
|
||||||
|
|
||||||
|
@item threshold, t
|
||||||
|
Set the target threshold value. This specifies the lowest permissible
|
||||||
|
magnitude level for the audio input which will be normalized.
|
||||||
|
If input frame volume is above this value frame will be normalized.
|
||||||
|
Otherwise frame may not be normalized at all. The default value is set
|
||||||
|
to 0, which means all input frames will be normalized.
|
||||||
|
This option is mostly useful if digital noise is not wanted to be amplified.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the all above options as @ref{commands}.
|
||||||
|
|
||||||
@section earwax
|
@section earwax
|
||||||
|
|
||||||
Make audio easier to listen to on headphones.
|
Make audio easier to listen to on headphones.
|
||||||
|
@ -3558,6 +3587,10 @@ Sets the difference coefficient (default: 2.5). 0.0 means mono sound
|
||||||
Enable clipping. By default is enabled.
|
Enable clipping. By default is enabled.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the all above options as @ref{commands}.
|
||||||
|
|
||||||
@section firequalizer
|
@section firequalizer
|
||||||
Apply FIR Equalization using arbitrary frequency response.
|
Apply FIR Equalization using arbitrary frequency response.
|
||||||
|
|
||||||
|
@ -4660,6 +4693,10 @@ How much to use compressed signal in output. Default is 1.
|
||||||
Range is between 0 and 1.
|
Range is between 0 and 1.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the all above options as @ref{commands}.
|
||||||
|
|
||||||
@subsection Examples
|
@subsection Examples
|
||||||
|
|
||||||
@itemize
|
@itemize
|
||||||
|
@ -5167,6 +5204,10 @@ channels. Default is 0.3.
|
||||||
Set level of input signal of original channel. Default is 0.8.
|
Set level of input signal of original channel. Default is 0.8.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the all above options except @code{delay} as @ref{commands}.
|
||||||
|
|
||||||
@section superequalizer
|
@section superequalizer
|
||||||
Apply 18 band equalizer.
|
Apply 18 band equalizer.
|
||||||
|
|
||||||
|
@ -5522,6 +5563,11 @@ Pre-amplification gain in dB to apply to the selected replaygain gain.
|
||||||
|
|
||||||
Default value for @var{replaygain_preamp} is 0.0.
|
Default value for @var{replaygain_preamp} is 0.0.
|
||||||
|
|
||||||
|
@item replaygain_noclip
|
||||||
|
Prevent clipping by limiting the gain applied.
|
||||||
|
|
||||||
|
Default value for @var{replaygain_noclip} is 1.
|
||||||
|
|
||||||
@item eval
|
@item eval
|
||||||
Set when the volume expression is evaluated.
|
Set when the volume expression is evaluated.
|
||||||
|
|
||||||
|
@ -5581,11 +5627,6 @@ The command accepts the same syntax of the corresponding option.
|
||||||
|
|
||||||
If the specified expression is not valid, it is kept at its current
|
If the specified expression is not valid, it is kept at its current
|
||||||
value.
|
value.
|
||||||
@item replaygain_noclip
|
|
||||||
Prevent clipping by limiting the gain applied.
|
|
||||||
|
|
||||||
Default value for @var{replaygain_noclip} is 1.
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Examples
|
@subsection Examples
|
||||||
|
@ -8354,6 +8395,9 @@ Draw rows and columns numbers on left and top of video.
|
||||||
|
|
||||||
@item opacity
|
@item opacity
|
||||||
Set background opacity.
|
Set background opacity.
|
||||||
|
|
||||||
|
@item format
|
||||||
|
Set display number format. Can be @code{hex}, or @code{dec}. Default is @code{hex}.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@section dctdnoiz
|
@section dctdnoiz
|
||||||
|
@ -8629,6 +8673,10 @@ Limit the maximum change for each plane, default is 65535.
|
||||||
If 0, plane will remain unchanged.
|
If 0, plane will remain unchanged.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the all above options as @ref{commands}.
|
||||||
|
|
||||||
@section deflicker
|
@section deflicker
|
||||||
|
|
||||||
Remove temporal frame luminance variations.
|
Remove temporal frame luminance variations.
|
||||||
|
@ -8970,6 +9018,10 @@ Flags to local 3x3 coordinates maps like this:
|
||||||
6 7 8
|
6 7 8
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the all above options as @ref{commands}.
|
||||||
|
|
||||||
@section displace
|
@section displace
|
||||||
|
|
||||||
Displace pixels as indicated by second and third input stream.
|
Displace pixels as indicated by second and third input stream.
|
||||||
|
@ -9027,8 +9079,8 @@ ffmpeg -i INPUT -f lavfi -i nullsrc=hd720,geq='r=128+80*(sin(sqrt((X-W/2)*(X-W/2
|
||||||
|
|
||||||
@section dnn_processing
|
@section dnn_processing
|
||||||
|
|
||||||
Do image processing with deep neural networks. Currently only AVFrame with RGB24
|
Do image processing with deep neural networks. It works together with another filter
|
||||||
and BGR24 are supported, more formats will be added later.
|
which converts the pixel format of the Frame to what the dnn network requires.
|
||||||
|
|
||||||
The filter accepts the following options:
|
The filter accepts the following options:
|
||||||
|
|
||||||
|
@ -9063,12 +9115,23 @@ Set the input name of the dnn network.
|
||||||
@item output
|
@item output
|
||||||
Set the output name of the dnn network.
|
Set the output name of the dnn network.
|
||||||
|
|
||||||
@item fmt
|
|
||||||
Set the pixel format for the Frame. Allowed values are @code{AV_PIX_FMT_RGB24}, and @code{AV_PIX_FMT_BGR24}.
|
|
||||||
Default value is @code{AV_PIX_FMT_RGB24}.
|
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@itemize
|
||||||
|
@item
|
||||||
|
Halve the red channle of the frame with format rgb24:
|
||||||
|
@example
|
||||||
|
ffmpeg -i input.jpg -vf format=rgb24,dnn_processing=model=halve_first_channel.model:input=dnn_in:output=dnn_out:dnn_backend=native out.native.png
|
||||||
|
@end example
|
||||||
|
|
||||||
|
@item
|
||||||
|
Halve the pixel value of the frame with format gray32f:
|
||||||
|
@example
|
||||||
|
ffmpeg -i input.jpg -vf format=grayf32,dnn_processing=model=halve_gray_float.model:input=dnn_in:output=dnn_out:dnn_backend=native -y out.native.png
|
||||||
|
@end example
|
||||||
|
|
||||||
|
@end itemize
|
||||||
|
|
||||||
@section drawbox
|
@section drawbox
|
||||||
|
|
||||||
Draw a colored box on the input image.
|
Draw a colored box on the input image.
|
||||||
|
@ -9258,6 +9321,9 @@ Set size of graph video. For the syntax of this option, check the
|
||||||
@ref{video size syntax,,"Video size" section in the ffmpeg-utils manual,ffmpeg-utils}.
|
@ref{video size syntax,,"Video size" section in the ffmpeg-utils manual,ffmpeg-utils}.
|
||||||
The default value is @code{900x256}.
|
The default value is @code{900x256}.
|
||||||
|
|
||||||
|
@item rate, r
|
||||||
|
Set the output frame rate. Default value is @code{25}.
|
||||||
|
|
||||||
The foreground color expressions can use the following variables:
|
The foreground color expressions can use the following variables:
|
||||||
@table @option
|
@table @option
|
||||||
@item MIN
|
@item MIN
|
||||||
|
@ -9869,6 +9935,15 @@ drawtext=fontfile=FreeSans.ttf:text=DOG:fontsize=24:x=10:y=20+24-max_glyph_a,
|
||||||
drawtext=fontfile=FreeSans.ttf:text=cow:fontsize=24:x=80:y=20+24-max_glyph_a
|
drawtext=fontfile=FreeSans.ttf:text=cow:fontsize=24:x=80:y=20+24-max_glyph_a
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
|
@item
|
||||||
|
Plot special @var{lavf.image2dec.source_basename} metadata onto each frame if
|
||||||
|
such metadata exists. Otherwise, plot the string "NA". Note that image2 demuxer
|
||||||
|
must have option @option{-export_path_metadata 1} for the special metadata fields
|
||||||
|
to be available for filters.
|
||||||
|
@example
|
||||||
|
drawtext="fontsize=20:fontcolor=white:fontfile=FreeSans.ttf:text='%@{metadata\:lavf.image2dec.source_basename\:NA@}':x=10:y=10"
|
||||||
|
@end example
|
||||||
|
|
||||||
@end itemize
|
@end itemize
|
||||||
|
|
||||||
For more information about libfreetype, check:
|
For more information about libfreetype, check:
|
||||||
|
@ -10118,6 +10193,10 @@ Flags to local 3x3 coordinates maps like this:
|
||||||
6 7 8
|
6 7 8
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the all above options as @ref{commands}.
|
||||||
|
|
||||||
@section extractplanes
|
@section extractplanes
|
||||||
|
|
||||||
Extract color channel components from input video stream into
|
Extract color channel components from input video stream into
|
||||||
|
@ -11143,6 +11222,25 @@ specified value) or as a difference ratio between 0 and 1. Default is -60dB, or
|
||||||
Set freeze duration until notification (default is 2 seconds).
|
Set freeze duration until notification (default is 2 seconds).
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@section freezeframes
|
||||||
|
|
||||||
|
Freeze video frames.
|
||||||
|
|
||||||
|
This filter freezes video frames using frame from 2nd input.
|
||||||
|
|
||||||
|
The filter accepts the following options:
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item first
|
||||||
|
Set number of first frame from which to start freeze.
|
||||||
|
|
||||||
|
@item last
|
||||||
|
Set number of last frame from which to end freeze.
|
||||||
|
|
||||||
|
@item replace
|
||||||
|
Set number of frame from 2nd input which will be used instead of replaced frames.
|
||||||
|
@end table
|
||||||
|
|
||||||
@anchor{frei0r}
|
@anchor{frei0r}
|
||||||
@section frei0r
|
@section frei0r
|
||||||
|
|
||||||
|
@ -11671,6 +11769,7 @@ the histogram. Possible values are @code{none}, @code{weak} or
|
||||||
@code{strong}. It defaults to @code{none}.
|
@code{strong}. It defaults to @code{none}.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@anchor{histogram}
|
||||||
@section histogram
|
@section histogram
|
||||||
|
|
||||||
Compute and draw a color distribution histogram for the input video.
|
Compute and draw a color distribution histogram for the input video.
|
||||||
|
@ -12165,6 +12264,10 @@ Default value is @code{none}.
|
||||||
Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is @code{0}.
|
Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is @code{0}.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the all above options as @ref{commands}.
|
||||||
|
|
||||||
@section inflate
|
@section inflate
|
||||||
|
|
||||||
Apply inflate effect to the video.
|
Apply inflate effect to the video.
|
||||||
|
@ -12183,6 +12286,10 @@ Limit the maximum change for each plane, default is 65535.
|
||||||
If 0, plane will remain unchanged.
|
If 0, plane will remain unchanged.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the all above options as @ref{commands}.
|
||||||
|
|
||||||
@section interlace
|
@section interlace
|
||||||
|
|
||||||
Simple interlacing filter from progressive contents. This interleaves upper (or
|
Simple interlacing filter from progressive contents. This interleaves upper (or
|
||||||
|
@ -15345,42 +15452,16 @@ Set the line to start scanning for EIA-608 data. Default is @code{0}.
|
||||||
@item scan_max
|
@item scan_max
|
||||||
Set the line to end scanning for EIA-608 data. Default is @code{29}.
|
Set the line to end scanning for EIA-608 data. Default is @code{29}.
|
||||||
|
|
||||||
@item mac
|
|
||||||
Set minimal acceptable amplitude change for sync codes detection.
|
|
||||||
Default is @code{0.2}. Allowed range is @code{[0.001 - 1]}.
|
|
||||||
|
|
||||||
@item spw
|
@item spw
|
||||||
Set the ratio of width reserved for sync code detection.
|
Set the ratio of width reserved for sync code detection.
|
||||||
Default is @code{0.27}. Allowed range is @code{[0.01 - 0.7]}.
|
Default is @code{0.27}. Allowed range is @code{[0.1 - 0.7]}.
|
||||||
|
|
||||||
@item mhd
|
|
||||||
Set the max peaks height difference for sync code detection.
|
|
||||||
Default is @code{0.1}. Allowed range is @code{[0.0 - 0.5]}.
|
|
||||||
|
|
||||||
@item mpd
|
|
||||||
Set max peaks period difference for sync code detection.
|
|
||||||
Default is @code{0.1}. Allowed range is @code{[0.0 - 0.5]}.
|
|
||||||
|
|
||||||
@item msd
|
|
||||||
Set the first two max start code bits differences.
|
|
||||||
Default is @code{0.02}. Allowed range is @code{[0.0 - 0.5]}.
|
|
||||||
|
|
||||||
@item bhd
|
|
||||||
Set the minimum ratio of bits height compared to 3rd start code bit.
|
|
||||||
Default is @code{0.75}. Allowed range is @code{[0.01 - 1]}.
|
|
||||||
|
|
||||||
@item th_w
|
|
||||||
Set the white color threshold. Default is @code{0.35}. Allowed range is @code{[0.1 - 1]}.
|
|
||||||
|
|
||||||
@item th_b
|
|
||||||
Set the black color threshold. Default is @code{0.15}. Allowed range is @code{[0.0 - 0.5]}.
|
|
||||||
|
|
||||||
@item chp
|
@item chp
|
||||||
Enable checking the parity bit. In the event of a parity error, the filter will output
|
Enable checking the parity bit. In the event of a parity error, the filter will output
|
||||||
@code{0x00} for that character. Default is false.
|
@code{0x00} for that character. Default is false.
|
||||||
|
|
||||||
@item lp
|
@item lp
|
||||||
Lowpass lines prior to further processing. Default is disabled.
|
Lowpass lines prior to further processing. Default is enabled.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Examples
|
@subsection Examples
|
||||||
|
@ -16058,6 +16139,19 @@ pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1.
|
||||||
@item ovsub
|
@item ovsub
|
||||||
horizontal and vertical output chroma subsample values. For example for the
|
horizontal and vertical output chroma subsample values. For example for the
|
||||||
pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1.
|
pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1.
|
||||||
|
|
||||||
|
@item n
|
||||||
|
The (sequential) number of the input frame, starting from 0.
|
||||||
|
Only available with @code{eval=frame}.
|
||||||
|
|
||||||
|
@item t
|
||||||
|
The presentation timestamp of the input frame, expressed as a number of
|
||||||
|
seconds. Only available with @code{eval=frame}.
|
||||||
|
|
||||||
|
@item pos
|
||||||
|
The position (byte offset) of the frame in the input stream, or NaN if
|
||||||
|
this information is unavailable and/or meaningless (for example in case of synthetic video).
|
||||||
|
Only available with @code{eval=frame}.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Examples
|
@subsection Examples
|
||||||
|
@ -16281,6 +16375,19 @@ The main input video's display aspect ratio. Calculated from
|
||||||
The main input video's horizontal and vertical chroma subsample values.
|
The main input video's horizontal and vertical chroma subsample values.
|
||||||
For example for the pixel format "yuv422p" @var{hsub} is 2 and @var{vsub}
|
For example for the pixel format "yuv422p" @var{hsub} is 2 and @var{vsub}
|
||||||
is 1.
|
is 1.
|
||||||
|
|
||||||
|
@item main_n
|
||||||
|
The (sequential) number of the main input frame, starting from 0.
|
||||||
|
Only available with @code{eval=frame}.
|
||||||
|
|
||||||
|
@item main_t
|
||||||
|
The presentation timestamp of the main input frame, expressed as a number of
|
||||||
|
seconds. Only available with @code{eval=frame}.
|
||||||
|
|
||||||
|
@item main_pos
|
||||||
|
The position (byte offset) of the frame in the main input stream, or NaN if
|
||||||
|
this information is unavailable and/or meaningless (for example in case of synthetic video).
|
||||||
|
Only available with @code{eval=frame}.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Examples
|
@subsection Examples
|
||||||
|
@ -16299,6 +16406,19 @@ Scale a logo to 1/10th the height of a video, while preserving its display aspec
|
||||||
@end example
|
@end example
|
||||||
@end itemize
|
@end itemize
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the following commands:
|
||||||
|
@table @option
|
||||||
|
@item width, w
|
||||||
|
@item height, h
|
||||||
|
Set the output video dimension expression.
|
||||||
|
The command accepts the same syntax of the corresponding option.
|
||||||
|
|
||||||
|
If the specified expression is not valid, it is kept at its current
|
||||||
|
value.
|
||||||
|
@end table
|
||||||
|
|
||||||
@section scroll
|
@section scroll
|
||||||
Scroll input video horizontally and/or vertically by constant speed.
|
Scroll input video horizontally and/or vertically by constant speed.
|
||||||
|
|
||||||
|
@ -16723,6 +16843,15 @@ The Adler-32 checksum (printed in hexadecimal) of all the planes of the input fr
|
||||||
@item plane_checksum
|
@item plane_checksum
|
||||||
The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
|
The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
|
||||||
expressed in the form "[@var{c0} @var{c1} @var{c2} @var{c3}]".
|
expressed in the form "[@var{c0} @var{c1} @var{c2} @var{c3}]".
|
||||||
|
|
||||||
|
@item mean
|
||||||
|
The mean value of pixels in each plane of the input frame, expressed in the form
|
||||||
|
"[@var{mean0} @var{mean1} @var{mean2} @var{mean3}]".
|
||||||
|
|
||||||
|
@item stdev
|
||||||
|
The standard deviation of pixel values in each plane of the input frame, expressed
|
||||||
|
in the form "[@var{stdev0} @var{stdev1} @var{stdev2} @var{stdev3}]".
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@section showpalette
|
@section showpalette
|
||||||
|
@ -17190,6 +17319,15 @@ option may cause flicker since the B-Frames have often larger QP. Default is
|
||||||
@code{0} (not enabled).
|
@code{0} (not enabled).
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the following commands:
|
||||||
|
@table @option
|
||||||
|
@item level
|
||||||
|
@item quality
|
||||||
|
Same as quality option. And the command accepts the @code{max} same as the @code{6}.
|
||||||
|
@end table
|
||||||
|
|
||||||
@section sr
|
@section sr
|
||||||
|
|
||||||
Scale the input by applying one of the super-resolution methods based on
|
Scale the input by applying one of the super-resolution methods based on
|
||||||
|
@ -17740,6 +17878,61 @@ PAL output (25i):
|
||||||
16p: 33333334
|
16p: 33333334
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
|
@section thistogram
|
||||||
|
|
||||||
|
Compute and draw a color distribution histogram for the input video across time.
|
||||||
|
|
||||||
|
Unlike @ref{histogram} video filter which only shows histogram of single input frame
|
||||||
|
at certain time, this filter shows also past histograms of number of frames defined
|
||||||
|
by @code{width} option.
|
||||||
|
|
||||||
|
The computed histogram is a representation of the color component
|
||||||
|
distribution in an image.
|
||||||
|
|
||||||
|
The filter accepts the following options:
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item width, w
|
||||||
|
Set width of single color component output. Default value is @code{0}.
|
||||||
|
Value of @code{0} means width will be picked from input video.
|
||||||
|
This also set number of passed histograms to keep.
|
||||||
|
Allowed range is [0, 8192].
|
||||||
|
|
||||||
|
@item display_mode, d
|
||||||
|
Set display mode.
|
||||||
|
It accepts the following values:
|
||||||
|
@table @samp
|
||||||
|
@item stack
|
||||||
|
Per color component graphs are placed below each other.
|
||||||
|
|
||||||
|
@item parade
|
||||||
|
Per color component graphs are placed side by side.
|
||||||
|
|
||||||
|
@item overlay
|
||||||
|
Presents information identical to that in the @code{parade}, except
|
||||||
|
that the graphs representing color components are superimposed directly
|
||||||
|
over one another.
|
||||||
|
@end table
|
||||||
|
Default is @code{stack}.
|
||||||
|
|
||||||
|
@item levels_mode, m
|
||||||
|
Set mode. Can be either @code{linear}, or @code{logarithmic}.
|
||||||
|
Default is @code{linear}.
|
||||||
|
|
||||||
|
@item components, c
|
||||||
|
Set what color components to display.
|
||||||
|
Default is @code{7}.
|
||||||
|
|
||||||
|
@item bgopacity, b
|
||||||
|
Set background opacity. Default is @code{0.9}.
|
||||||
|
|
||||||
|
@item envelope, e
|
||||||
|
Show envelope. Default is disabled.
|
||||||
|
|
||||||
|
@item ecolor, ec
|
||||||
|
Set envelope color. Default is @code{gold}.
|
||||||
|
@end table
|
||||||
|
|
||||||
@section threshold
|
@section threshold
|
||||||
|
|
||||||
Apply threshold effect to video stream.
|
Apply threshold effect to video stream.
|
||||||
|
@ -18119,10 +18312,12 @@ Enable complex vertical low-pass filtering.
|
||||||
This will slightly less reduce interlace 'twitter' and Moire
|
This will slightly less reduce interlace 'twitter' and Moire
|
||||||
patterning but better retain detail and subjective sharpness impression.
|
patterning but better retain detail and subjective sharpness impression.
|
||||||
|
|
||||||
|
@item bypass_il
|
||||||
|
Bypass already interlaced frames, only adjust the frame rate.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
Vertical low-pass filtering can only be enabled for @option{mode}
|
Vertical low-pass filtering and bypassing already interlaced frames can only be
|
||||||
@var{interleave_top} and @var{interleave_bottom}.
|
enabled for @option{mode} @var{interleave_top} and @var{interleave_bottom}.
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@ -18938,6 +19133,7 @@ Set vectorscope mode.
|
||||||
It accepts the following values:
|
It accepts the following values:
|
||||||
@table @samp
|
@table @samp
|
||||||
@item gray
|
@item gray
|
||||||
|
@item tint
|
||||||
Gray values are displayed on graph, higher brightness means more pixels have
|
Gray values are displayed on graph, higher brightness means more pixels have
|
||||||
same component color value on location in graph. This is the default mode.
|
same component color value on location in graph. This is the default mode.
|
||||||
|
|
||||||
|
@ -18996,6 +19192,7 @@ Set what kind of graticule to draw.
|
||||||
@item none
|
@item none
|
||||||
@item green
|
@item green
|
||||||
@item color
|
@item color
|
||||||
|
@item invert
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@item opacity, o
|
@item opacity, o
|
||||||
|
@ -19040,6 +19237,11 @@ Set what kind of colorspace to use when drawing graticule.
|
||||||
@item 709
|
@item 709
|
||||||
@end table
|
@end table
|
||||||
Default is auto.
|
Default is auto.
|
||||||
|
|
||||||
|
@item tint0, t0
|
||||||
|
@item tint1, t1
|
||||||
|
Set color tint for gray/tint vectorscope mode. By default both options are zero.
|
||||||
|
This means no tint, and output will remain gray.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@anchor{vidstabdetect}
|
@anchor{vidstabdetect}
|
||||||
|
@ -19328,6 +19530,10 @@ If @code{intensity} is negative and this is set to 1, colors will change,
|
||||||
otherwise colors will be less saturated, more towards gray.
|
otherwise colors will be less saturated, more towards gray.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the all above options as @ref{commands}.
|
||||||
|
|
||||||
@anchor{vignette}
|
@anchor{vignette}
|
||||||
@section vignette
|
@section vignette
|
||||||
|
|
||||||
|
@ -19671,6 +19877,12 @@ Default is digital.
|
||||||
|
|
||||||
@item bgopacity, b
|
@item bgopacity, b
|
||||||
Set background opacity.
|
Set background opacity.
|
||||||
|
|
||||||
|
@item tint0, t0
|
||||||
|
@item tint1, t1
|
||||||
|
Set tint for output.
|
||||||
|
Only used with lowpass filter and when display is not overlay and input
|
||||||
|
pixel formats are not RGB.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@section weave, doubleweave
|
@section weave, doubleweave
|
||||||
|
@ -20344,7 +20556,17 @@ horizontal and vertical output chroma subsample values. For example for the
|
||||||
pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1.
|
pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
|
@subsection Commands
|
||||||
|
|
||||||
|
This filter supports the following commands:
|
||||||
@table @option
|
@table @option
|
||||||
|
@item width, w
|
||||||
|
@item height, h
|
||||||
|
Set the output video dimension expression.
|
||||||
|
The command accepts the same syntax of the corresponding option.
|
||||||
|
|
||||||
|
If the specified expression is not valid, it is kept at its current
|
||||||
|
value.
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@c man end VIDEO FILTERS
|
@c man end VIDEO FILTERS
|
||||||
|
@ -21034,6 +21256,65 @@ Apply a strong blur of both luma and chroma parameters:
|
||||||
|
|
||||||
@c man end OPENCL VIDEO FILTERS
|
@c man end OPENCL VIDEO FILTERS
|
||||||
|
|
||||||
|
@chapter VAAPI Video Filters
|
||||||
|
@c man begin VAAPI VIDEO FILTERS
|
||||||
|
|
||||||
|
VAAPI Video filters are usually used with VAAPI decoder and VAAPI encoder. Below is a description of VAAPI video filters.
|
||||||
|
|
||||||
|
To enable compilation of these filters you need to configure FFmpeg with
|
||||||
|
@code{--enable-vaapi}.
|
||||||
|
|
||||||
|
To use vaapi filters, you need to setup the vaapi device correctly. For more information, please read @url{https://trac.ffmpeg.org/wiki/Hardware/VAAPI}
|
||||||
|
|
||||||
|
@section tonemap_vappi
|
||||||
|
|
||||||
|
Perform HDR(High Dynamic Range) to SDR(Standard Dynamic Range) conversion with tone-mapping.
|
||||||
|
It maps the dynamic range of HDR10 content to the SDR content.
|
||||||
|
It currently only accepts HDR10 as input.
|
||||||
|
|
||||||
|
It accepts the following parameters:
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item format
|
||||||
|
Specify the output pixel format.
|
||||||
|
|
||||||
|
Currently supported formats are:
|
||||||
|
@table @var
|
||||||
|
@item p010
|
||||||
|
@item nv12
|
||||||
|
@end table
|
||||||
|
|
||||||
|
Default is nv12.
|
||||||
|
|
||||||
|
@item primaries, p
|
||||||
|
Set the output color primaries.
|
||||||
|
|
||||||
|
Default is same as input.
|
||||||
|
|
||||||
|
@item transfer, t
|
||||||
|
Set the output transfer characteristics.
|
||||||
|
|
||||||
|
Default is bt709.
|
||||||
|
|
||||||
|
@item matrix, m
|
||||||
|
Set the output colorspace matrix.
|
||||||
|
|
||||||
|
Default is same as input.
|
||||||
|
|
||||||
|
@end table
|
||||||
|
|
||||||
|
@subsection Example
|
||||||
|
|
||||||
|
@itemize
|
||||||
|
@item
|
||||||
|
Convert HDR(HDR10) video to bt2020-transfer-characteristic p010 format
|
||||||
|
@example
|
||||||
|
tonemap_vaapi=format=p010:t=bt2020-10
|
||||||
|
@end example
|
||||||
|
@end itemize
|
||||||
|
|
||||||
|
@c man end VAAPI VIDEO FILTERS
|
||||||
|
|
||||||
@chapter Video Sources
|
@chapter Video Sources
|
||||||
@c man begin VIDEO SOURCES
|
@c man begin VIDEO SOURCES
|
||||||
|
|
||||||
|
@ -21076,9 +21357,9 @@ Specify the frame rate expected for the video stream.
|
||||||
The sample (pixel) aspect ratio of the input video.
|
The sample (pixel) aspect ratio of the input video.
|
||||||
|
|
||||||
@item sws_param
|
@item sws_param
|
||||||
Specify the optional parameters to be used for the scale filter which
|
This option is deprecated and ignored. Prepend @code{sws_flags=@var{flags};}
|
||||||
is automatically inserted when an input change is detected in the
|
to the filtergraph description to specify swscale flags for automatically
|
||||||
input size or format.
|
inserted scalers. See @ref{Filtergraph syntax}.
|
||||||
|
|
||||||
@item hw_frames_ctx
|
@item hw_frames_ctx
|
||||||
When using a hardware pixel format, this should be a reference to an
|
When using a hardware pixel format, this should be a reference to an
|
||||||
|
@ -21103,7 +21384,7 @@ buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
|
||||||
Alternatively, the options can be specified as a flat string, but this
|
Alternatively, the options can be specified as a flat string, but this
|
||||||
syntax is deprecated:
|
syntax is deprecated:
|
||||||
|
|
||||||
@var{width}:@var{height}:@var{pix_fmt}:@var{time_base.num}:@var{time_base.den}:@var{pixel_aspect.num}:@var{pixel_aspect.den}[:@var{sws_param}]
|
@var{width}:@var{height}:@var{pix_fmt}:@var{time_base.num}:@var{time_base.den}:@var{pixel_aspect.num}:@var{pixel_aspect.den}
|
||||||
|
|
||||||
@section cellauto
|
@section cellauto
|
||||||
|
|
||||||
|
@ -22516,6 +22797,9 @@ plain filename any writable url can be specified. Filename ``-'' is a shorthand
|
||||||
for standard output. If @code{file} option is not set, output is written to the log
|
for standard output. If @code{file} option is not set, output is written to the log
|
||||||
with AV_LOG_INFO loglevel.
|
with AV_LOG_INFO loglevel.
|
||||||
|
|
||||||
|
@item direct
|
||||||
|
Reduces buffering in print mode when output is written to a URL set using @var{file}.
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@subsection Examples
|
@subsection Examples
|
||||||
|
|
|
@ -814,11 +814,13 @@ following image formats are supported:
|
||||||
@item Autodesk RLE @tab @tab X
|
@item Autodesk RLE @tab @tab X
|
||||||
@tab fourcc: AASC
|
@tab fourcc: AASC
|
||||||
@item AV1 @tab E @tab E
|
@item AV1 @tab E @tab E
|
||||||
@tab Supported through external libraries libaom and libdav1d
|
@tab Supported through external libraries libaom, libdav1d and librav1e
|
||||||
@item Avid 1:1 10-bit RGB Packer @tab X @tab X
|
@item Avid 1:1 10-bit RGB Packer @tab X @tab X
|
||||||
@tab fourcc: AVrp
|
@tab fourcc: AVrp
|
||||||
@item AVS (Audio Video Standard) video @tab @tab X
|
@item AVS (Audio Video Standard) video @tab @tab X
|
||||||
@tab Video encoding used by the Creature Shock game.
|
@tab Video encoding used by the Creature Shock game.
|
||||||
|
@item AVS2-P2/IEEE1857.4 @tab E @tab E
|
||||||
|
@tab Supported through external libraries libxavs2 and libdavs2
|
||||||
@item AYUV @tab X @tab X
|
@item AYUV @tab X @tab X
|
||||||
@tab Microsoft uncompressed packed 4:4:4:4
|
@tab Microsoft uncompressed packed 4:4:4:4
|
||||||
@item Beam Software VB @tab @tab X
|
@item Beam Software VB @tab @tab X
|
||||||
|
|
|
@ -277,8 +277,8 @@ audio track.
|
||||||
|
|
||||||
@item list_devices
|
@item list_devices
|
||||||
If set to @option{true}, print a list of devices and exit.
|
If set to @option{true}, print a list of devices and exit.
|
||||||
Defaults to @option{false}. Alternatively you can use the @code{-sources}
|
Defaults to @option{false}. This option is deprecated, please use the
|
||||||
option of ffmpeg to list the available input devices.
|
@code{-sources} option of ffmpeg to list the available input devices.
|
||||||
|
|
||||||
@item list_formats
|
@item list_formats
|
||||||
If set to @option{true}, print a list of supported formats and exit.
|
If set to @option{true}, print a list of supported formats and exit.
|
||||||
|
@ -292,11 +292,6 @@ as @option{pal} (3 letters).
|
||||||
Default behavior is autodetection of the input video format, if the hardware
|
Default behavior is autodetection of the input video format, if the hardware
|
||||||
supports it.
|
supports it.
|
||||||
|
|
||||||
@item bm_v210
|
|
||||||
This is a deprecated option, you can use @option{raw_format} instead.
|
|
||||||
If set to @samp{1}, video is captured in 10 bit v210 instead
|
|
||||||
of uyvy422. Not all Blackmagic devices support this option.
|
|
||||||
|
|
||||||
@item raw_format
|
@item raw_format
|
||||||
Set the pixel format of the captured video.
|
Set the pixel format of the captured video.
|
||||||
Available values are:
|
Available values are:
|
||||||
|
@ -412,7 +407,7 @@ Defaults to @option{false}.
|
||||||
@item
|
@item
|
||||||
List input devices:
|
List input devices:
|
||||||
@example
|
@example
|
||||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
ffmpeg -sources decklink
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item
|
@item
|
||||||
|
@ -430,7 +425,7 @@ ffmpeg -format_code Hi50 -f decklink -i 'Intensity Pro' -c:a copy -c:v copy outp
|
||||||
@item
|
@item
|
||||||
Capture video clip at 1080i50 10 bit:
|
Capture video clip at 1080i50 10 bit:
|
||||||
@example
|
@example
|
||||||
ffmpeg -bm_v210 1 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
|
ffmpeg -raw_format yuv422p10 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item
|
@item
|
||||||
|
@ -1532,7 +1527,7 @@ ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_siz
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item video_size
|
@item video_size
|
||||||
Set the video frame size. Default value is @code{vga}.
|
Set the video frame size. Default is the full desktop.
|
||||||
|
|
||||||
@item grab_x
|
@item grab_x
|
||||||
@item grab_y
|
@item grab_y
|
||||||
|
|
|
@ -236,8 +236,10 @@ This is a deprecated option to set the segment length in microseconds, use @var{
|
||||||
@item seg_duration @var{duration}
|
@item seg_duration @var{duration}
|
||||||
Set the segment length in seconds (fractional value can be set). The value is
|
Set the segment length in seconds (fractional value can be set). The value is
|
||||||
treated as average segment duration when @var{use_template} is enabled and
|
treated as average segment duration when @var{use_template} is enabled and
|
||||||
@var{use_timeline} is disabled and as minimum segment duration for all the other
|
@item frag_duration @var{duration}
|
||||||
use cases.
|
Set the length in seconds of fragments within segments (fractional value can be set).
|
||||||
|
@item frag_type @var{type}
|
||||||
|
Set the type of interval for fragmentation.
|
||||||
@item window_size @var{size}
|
@item window_size @var{size}
|
||||||
Set the maximum number of segments kept in the manifest.
|
Set the maximum number of segments kept in the manifest.
|
||||||
@item extra_window_size @var{size}
|
@item extra_window_size @var{size}
|
||||||
|
@ -278,9 +280,12 @@ To map all video (or audio) streams to an AdaptationSet, "v" (or "a") can be use
|
||||||
|
|
||||||
When no assignment is defined, this defaults to an AdaptationSet for each stream.
|
When no assignment is defined, this defaults to an AdaptationSet for each stream.
|
||||||
|
|
||||||
Optional syntax is "id=x,descriptor=descriptor_string,streams=a,b,c id=y,streams=d,e" and so on, descriptor is useful to the scheme defined by ISO/IEC 23009-1:2014/Amd.2:2015.
|
Optional syntax is "id=x,seg_duration=x,frag_duration=x,frag_type=type,descriptor=descriptor_string,streams=a,b,c id=y,seg_duration=y,frag_type=type,streams=d,e" and so on,
|
||||||
|
descriptor is useful to the scheme defined by ISO/IEC 23009-1:2014/Amd.2:2015.
|
||||||
For example, -adaptation_sets "id=0,descriptor=<SupplementalProperty schemeIdUri=\"urn:mpeg:dash:srd:2014\" value=\"0,0,0,1,1,2,2\"/>,streams=v".
|
For example, -adaptation_sets "id=0,descriptor=<SupplementalProperty schemeIdUri=\"urn:mpeg:dash:srd:2014\" value=\"0,0,0,1,1,2,2\"/>,streams=v".
|
||||||
Please note that descriptor string should be a self-closing xml tag.
|
Please note that descriptor string should be a self-closing xml tag.
|
||||||
|
seg_duration, frag_duration and frag_type override the global option values for each adaptation set.
|
||||||
|
For example, -adaptation_sets "id=0,seg_duration=2,frag_duration=1,frag_type=duration,streams=v id=1,seg_duration=2,frag_type=none,streams=a"
|
||||||
@item timeout @var{timeout}
|
@item timeout @var{timeout}
|
||||||
Set timeout for socket I/O operations. Applicable only for HTTP output.
|
Set timeout for socket I/O operations. Applicable only for HTTP output.
|
||||||
@item index_correction @var{index_correction}
|
@item index_correction @var{index_correction}
|
||||||
|
@ -326,9 +331,26 @@ This option will also try to comply with the above open spec, till Apple's spec
|
||||||
Applicable only when @var{streaming} and @var{hls_playlist} options are enabled.
|
Applicable only when @var{streaming} and @var{hls_playlist} options are enabled.
|
||||||
This is an experimental feature.
|
This is an experimental feature.
|
||||||
|
|
||||||
|
@item ldash @var{ldash}
|
||||||
|
Enable Low-latency Dash by constraining the presence and values of some elements.
|
||||||
|
|
||||||
@item master_m3u8_publish_rate @var{master_m3u8_publish_rate}
|
@item master_m3u8_publish_rate @var{master_m3u8_publish_rate}
|
||||||
Publish master playlist repeatedly every after specified number of segment intervals.
|
Publish master playlist repeatedly every after specified number of segment intervals.
|
||||||
|
|
||||||
|
@item -write_prft @var{write_prft}
|
||||||
|
Write Producer Reference Time elements on supported streams. This also enables writing
|
||||||
|
prft boxes in the underlying muxer. Applicable only when the @var{utc_url} option is enabled.
|
||||||
|
|
||||||
|
@item -mpd_profile @var{mpd_profile}
|
||||||
|
Set one or more manifest profiles.
|
||||||
|
|
||||||
|
@item -http_opts @var{http_opts}
|
||||||
|
List of options to pass to the underlying HTTP protocol. Applicable only for HTTP output.
|
||||||
|
|
||||||
|
@item -target_latency @var{target_latency}
|
||||||
|
Set an intended target latency in seconds (fractional value can be set) for serving. Applicable only when @var{streaming} and @var{write_prft} options are enabled.
|
||||||
|
This is an informative fields clients can use to measure the latency of the service.
|
||||||
|
|
||||||
@end table
|
@end table
|
||||||
|
|
||||||
@anchor{framecrc}
|
@anchor{framecrc}
|
||||||
|
@ -1169,6 +1191,32 @@ The pattern "img%%-%d.jpg" will specify a sequence of filenames of the
|
||||||
form @file{img%-1.jpg}, @file{img%-2.jpg}, ..., @file{img%-10.jpg},
|
form @file{img%-1.jpg}, @file{img%-2.jpg}, ..., @file{img%-10.jpg},
|
||||||
etc.
|
etc.
|
||||||
|
|
||||||
|
The image muxer supports the .Y.U.V image file format. This format is
|
||||||
|
special in that that each image frame consists of three files, for
|
||||||
|
each of the YUV420P components. To read or write this image file format,
|
||||||
|
specify the name of the '.Y' file. The muxer will automatically open the
|
||||||
|
'.U' and '.V' files as required.
|
||||||
|
|
||||||
|
@subsection Options
|
||||||
|
|
||||||
|
@table @option
|
||||||
|
@item frame_pts
|
||||||
|
If set to 1, expand the filename with pts from pkt->pts.
|
||||||
|
Default value is 0.
|
||||||
|
|
||||||
|
@item start_number
|
||||||
|
Start the sequence from the specified number. Default value is 1.
|
||||||
|
|
||||||
|
@item update
|
||||||
|
If set to 1, the filename will always be interpreted as just a
|
||||||
|
filename, not a pattern, and the corresponding file will be continuously
|
||||||
|
overwritten with new images. Default value is 0.
|
||||||
|
|
||||||
|
@item strftime
|
||||||
|
If set to 1, expand the filename with date and time information from
|
||||||
|
@code{strftime()}. Default value is 0.
|
||||||
|
@end table
|
||||||
|
|
||||||
@subsection Examples
|
@subsection Examples
|
||||||
|
|
||||||
The following example shows how to use @command{ffmpeg} for creating a
|
The following example shows how to use @command{ffmpeg} for creating a
|
||||||
|
@ -1209,32 +1257,6 @@ You can set the file name with current frame's PTS:
|
||||||
ffmpeg -f v4l2 -r 1 -i /dev/video0 -copyts -f image2 -frame_pts true %d.jpg"
|
ffmpeg -f v4l2 -r 1 -i /dev/video0 -copyts -f image2 -frame_pts true %d.jpg"
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@subsection Options
|
|
||||||
|
|
||||||
@table @option
|
|
||||||
@item frame_pts
|
|
||||||
If set to 1, expand the filename with pts from pkt->pts.
|
|
||||||
Default value is 0.
|
|
||||||
|
|
||||||
@item start_number
|
|
||||||
Start the sequence from the specified number. Default value is 1.
|
|
||||||
|
|
||||||
@item update
|
|
||||||
If set to 1, the filename will always be interpreted as just a
|
|
||||||
filename, not a pattern, and the corresponding file will be continuously
|
|
||||||
overwritten with new images. Default value is 0.
|
|
||||||
|
|
||||||
@item strftime
|
|
||||||
If set to 1, expand the filename with date and time information from
|
|
||||||
@code{strftime()}. Default value is 0.
|
|
||||||
@end table
|
|
||||||
|
|
||||||
The image muxer supports the .Y.U.V image file format. This format is
|
|
||||||
special in that that each image frame consists of three files, for
|
|
||||||
each of the YUV420P components. To read or write this image file format,
|
|
||||||
specify the name of the '.Y' file. The muxer will automatically open the
|
|
||||||
'.U' and '.V' files as required.
|
|
||||||
|
|
||||||
@section matroska
|
@section matroska
|
||||||
|
|
||||||
Matroska container muxer.
|
Matroska container muxer.
|
||||||
|
|
|
@ -140,8 +140,8 @@ device with @command{-list_formats 1}. Audio sample rate is always 48 kHz.
|
||||||
|
|
||||||
@item list_devices
|
@item list_devices
|
||||||
If set to @option{true}, print a list of devices and exit.
|
If set to @option{true}, print a list of devices and exit.
|
||||||
Defaults to @option{false}. Alternatively you can use the @code{-sinks}
|
Defaults to @option{false}. This option is deprecated, please use the
|
||||||
option of ffmpeg to list the available output devices.
|
@code{-sinks} option of ffmpeg to list the available output devices.
|
||||||
|
|
||||||
@item list_formats
|
@item list_formats
|
||||||
If set to @option{true}, print a list of supported formats and exit.
|
If set to @option{true}, print a list of supported formats and exit.
|
||||||
|
@ -168,7 +168,7 @@ Defaults to @samp{unset}.
|
||||||
@item
|
@item
|
||||||
List output devices:
|
List output devices:
|
||||||
@example
|
@example
|
||||||
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
|
ffmpeg -sinks decklink
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item
|
@item
|
||||||
|
|
|
@ -119,7 +119,7 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
|
||||||
|
|
||||||
void init_dynload(void)
|
void init_dynload(void)
|
||||||
{
|
{
|
||||||
#ifdef _WIN32
|
#if HAVE_SETDLLDIRECTORY && defined(_WIN32)
|
||||||
/* Calling SetDllDirectory with the empty string (but not NULL) removes the
|
/* Calling SetDllDirectory with the empty string (but not NULL) removes the
|
||||||
* current working directory from the DLL search path as a security pre-caution. */
|
* current working directory from the DLL search path as a security pre-caution. */
|
||||||
SetDllDirectory("");
|
SetDllDirectory("");
|
||||||
|
@ -182,7 +182,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||||
|
|
||||||
first = 1;
|
first = 1;
|
||||||
for (po = options; po->name; po++) {
|
for (po = options; po->name; po++) {
|
||||||
char buf[64];
|
char buf[128];
|
||||||
|
|
||||||
if (((po->flags & req_flags) != req_flags) ||
|
if (((po->flags & req_flags) != req_flags) ||
|
||||||
(alt_flags && !(po->flags & alt_flags)) ||
|
(alt_flags && !(po->flags & alt_flags)) ||
|
||||||
|
@ -2039,7 +2039,7 @@ FILE *get_preset_file(char *filename, size_t filename_size,
|
||||||
av_strlcpy(filename, preset_name, filename_size);
|
av_strlcpy(filename, preset_name, filename_size);
|
||||||
f = fopen(filename, "r");
|
f = fopen(filename, "r");
|
||||||
} else {
|
} else {
|
||||||
#ifdef _WIN32
|
#if HAVE_GETMODULEHANDLE && defined(_WIN32)
|
||||||
char datadir[MAX_PATH], *ls;
|
char datadir[MAX_PATH], *ls;
|
||||||
base[2] = NULL;
|
base[2] = NULL;
|
||||||
|
|
||||||
|
|
|
@ -1268,7 +1268,8 @@ static void do_video_out(OutputFile *of,
|
||||||
ost->forced_keyframes_expr_const_values[FKF_N] += 1;
|
ost->forced_keyframes_expr_const_values[FKF_N] += 1;
|
||||||
} else if ( ost->forced_keyframes
|
} else if ( ost->forced_keyframes
|
||||||
&& !strncmp(ost->forced_keyframes, "source", 6)
|
&& !strncmp(ost->forced_keyframes, "source", 6)
|
||||||
&& in_picture->key_frame==1) {
|
&& in_picture->key_frame==1
|
||||||
|
&& !i) {
|
||||||
forced_keyframe = 1;
|
forced_keyframe = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3404,10 +3405,6 @@ static int init_output_stream_encode(OutputStream *ost)
|
||||||
av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
|
av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
|
||||||
"Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
|
"Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
|
||||||
}
|
}
|
||||||
for (j = 0; j < ost->forced_kf_count; j++)
|
|
||||||
ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
|
|
||||||
AV_TIME_BASE_Q,
|
|
||||||
enc_ctx->time_base);
|
|
||||||
|
|
||||||
enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
|
enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
|
||||||
enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
|
enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
|
||||||
|
@ -3599,12 +3596,14 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < ist->st->nb_side_data; i++) {
|
for (i = 0; i < ist->st->nb_side_data; i++) {
|
||||||
AVPacketSideData *sd = &ist->st->side_data[i];
|
AVPacketSideData *sd = &ist->st->side_data[i];
|
||||||
uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
|
if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
|
||||||
if (!dst)
|
uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
|
||||||
return AVERROR(ENOMEM);
|
if (!dst)
|
||||||
memcpy(dst, sd->data, sd->size);
|
return AVERROR(ENOMEM);
|
||||||
if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
|
memcpy(dst, sd->data, sd->size);
|
||||||
av_display_rotation_set((uint32_t *)dst, 0);
|
if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
|
||||||
|
av_display_rotation_set((uint32_t *)dst, 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -786,10 +786,9 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
|
||||||
av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
||||||
av_bprintf(&args,
|
av_bprintf(&args,
|
||||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
|
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
|
||||||
"pixel_aspect=%d/%d:sws_param=flags=%d",
|
"pixel_aspect=%d/%d",
|
||||||
ifilter->width, ifilter->height, ifilter->format,
|
ifilter->width, ifilter->height, ifilter->format,
|
||||||
tb.num, tb.den, sar.num, sar.den,
|
tb.num, tb.den, sar.num, sar.den);
|
||||||
SWS_BILINEAR + ((ist->dec_ctx->flags&AV_CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
|
|
||||||
if (fr.num && fr.den)
|
if (fr.num && fr.den)
|
||||||
av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
|
av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
|
||||||
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
|
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
|
||||||
|
|
|
@ -3202,7 +3202,7 @@ void show_help_default(const char *opt, const char *arg)
|
||||||
OPT_EXIT, 0, 0);
|
OPT_EXIT, 0, 0);
|
||||||
|
|
||||||
show_help_options(options, "Global options (affect whole program "
|
show_help_options(options, "Global options (affect whole program "
|
||||||
"instead of just one file:",
|
"instead of just one file):",
|
||||||
0, per_file | OPT_EXIT | OPT_EXPERT, 0);
|
0, per_file | OPT_EXIT | OPT_EXPERT, 0);
|
||||||
if (show_advanced)
|
if (show_advanced)
|
||||||
show_help_options(options, "Advanced global options:", OPT_EXPERT,
|
show_help_options(options, "Advanced global options:", OPT_EXPERT,
|
||||||
|
|
|
@ -254,6 +254,7 @@ static const OptionDef *options;
|
||||||
|
|
||||||
/* FFprobe context */
|
/* FFprobe context */
|
||||||
static const char *input_filename;
|
static const char *input_filename;
|
||||||
|
static const char *print_input_filename;
|
||||||
static AVInputFormat *iformat = NULL;
|
static AVInputFormat *iformat = NULL;
|
||||||
|
|
||||||
static struct AVHashContext *hash;
|
static struct AVHashContext *hash;
|
||||||
|
@ -2836,7 +2837,8 @@ static void show_error(WriterContext *w, int err)
|
||||||
writer_print_section_footer(w);
|
writer_print_section_footer(w);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int open_input_file(InputFile *ifile, const char *filename)
|
static int open_input_file(InputFile *ifile, const char *filename,
|
||||||
|
const char *print_filename)
|
||||||
{
|
{
|
||||||
int err, i;
|
int err, i;
|
||||||
AVFormatContext *fmt_ctx = NULL;
|
AVFormatContext *fmt_ctx = NULL;
|
||||||
|
@ -2858,6 +2860,10 @@ static int open_input_file(InputFile *ifile, const char *filename)
|
||||||
print_error(filename, err);
|
print_error(filename, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
if (print_filename) {
|
||||||
|
av_freep(&fmt_ctx->url);
|
||||||
|
fmt_ctx->url = av_strdup(print_filename);
|
||||||
|
}
|
||||||
ifile->fmt_ctx = fmt_ctx;
|
ifile->fmt_ctx = fmt_ctx;
|
||||||
if (scan_all_pmts_set)
|
if (scan_all_pmts_set)
|
||||||
av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
|
av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
|
||||||
|
@ -2971,7 +2977,8 @@ static void close_input_file(InputFile *ifile)
|
||||||
avformat_close_input(&ifile->fmt_ctx);
|
avformat_close_input(&ifile->fmt_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int probe_file(WriterContext *wctx, const char *filename)
|
static int probe_file(WriterContext *wctx, const char *filename,
|
||||||
|
const char *print_filename)
|
||||||
{
|
{
|
||||||
InputFile ifile = { 0 };
|
InputFile ifile = { 0 };
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
@ -2980,7 +2987,7 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
||||||
do_read_frames = do_show_frames || do_count_frames;
|
do_read_frames = do_show_frames || do_count_frames;
|
||||||
do_read_packets = do_show_packets || do_count_packets;
|
do_read_packets = do_show_packets || do_count_packets;
|
||||||
|
|
||||||
ret = open_input_file(&ifile, filename);
|
ret = open_input_file(&ifile, filename, print_filename);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto end;
|
goto end;
|
||||||
|
|
||||||
|
@ -3286,6 +3293,12 @@ static int opt_input_file_i(void *optctx, const char *opt, const char *arg)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int opt_print_filename(void *optctx, const char *opt, const char *arg)
|
||||||
|
{
|
||||||
|
print_input_filename = arg;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void show_help_default(const char *opt, const char *arg)
|
void show_help_default(const char *opt, const char *arg)
|
||||||
{
|
{
|
||||||
av_log_set_callback(log_callback_help);
|
av_log_set_callback(log_callback_help);
|
||||||
|
@ -3544,6 +3557,7 @@ static const OptionDef real_options[] = {
|
||||||
{ "read_intervals", HAS_ARG, {.func_arg = opt_read_intervals}, "set read intervals", "read_intervals" },
|
{ "read_intervals", HAS_ARG, {.func_arg = opt_read_intervals}, "set read intervals", "read_intervals" },
|
||||||
{ "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {.func_arg = opt_default}, "generic catch all option", "" },
|
{ "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {.func_arg = opt_default}, "generic catch all option", "" },
|
||||||
{ "i", HAS_ARG, {.func_arg = opt_input_file_i}, "read specified file", "input_file"},
|
{ "i", HAS_ARG, {.func_arg = opt_input_file_i}, "read specified file", "input_file"},
|
||||||
|
{ "print_filename", HAS_ARG, {.func_arg = opt_print_filename}, "override the printed input filename", "print_file"},
|
||||||
{ "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
|
{ "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
|
||||||
"read and decode the streams to fill missing information with heuristics" },
|
"read and decode the streams to fill missing information with heuristics" },
|
||||||
{ NULL, },
|
{ NULL, },
|
||||||
|
@ -3692,7 +3706,7 @@ int main(int argc, char **argv)
|
||||||
av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name);
|
av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name);
|
||||||
ret = AVERROR(EINVAL);
|
ret = AVERROR(EINVAL);
|
||||||
} else if (input_filename) {
|
} else if (input_filename) {
|
||||||
ret = probe_file(wctx, input_filename);
|
ret = probe_file(wctx, input_filename, print_input_filename);
|
||||||
if (ret < 0 && do_show_error)
|
if (ret < 0 && do_show_error)
|
||||||
show_error(wctx, ret);
|
show_error(wctx, ret);
|
||||||
}
|
}
|
||||||
|
|
|
@ -140,8 +140,8 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
|
||||||
break;
|
break;
|
||||||
case AV_CODEC_ID_ADPCM_IMA_APC:
|
case AV_CODEC_ID_ADPCM_IMA_APC:
|
||||||
if (avctx->extradata && avctx->extradata_size >= 8) {
|
if (avctx->extradata && avctx->extradata_size >= 8) {
|
||||||
c->status[0].predictor = AV_RL32(avctx->extradata);
|
c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
|
||||||
c->status[1].predictor = AV_RL32(avctx->extradata + 4);
|
c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case AV_CODEC_ID_ADPCM_IMA_WS:
|
case AV_CODEC_ID_ADPCM_IMA_WS:
|
||||||
|
@ -441,7 +441,7 @@ static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
|
||||||
d = in[16+i+j*4];
|
d = in[16+i+j*4];
|
||||||
|
|
||||||
t = sign_extend(d, 4);
|
t = sign_extend(d, 4);
|
||||||
s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
|
s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
|
||||||
s_2 = s_1;
|
s_2 = s_1;
|
||||||
s_1 = av_clip_int16(s);
|
s_1 = av_clip_int16(s);
|
||||||
out0[j] = s_1;
|
out0[j] = s_1;
|
||||||
|
@ -468,7 +468,7 @@ static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
|
||||||
d = in[16+i+j*4];
|
d = in[16+i+j*4];
|
||||||
|
|
||||||
t = sign_extend(d >> 4, 4);
|
t = sign_extend(d >> 4, 4);
|
||||||
s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
|
s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
|
||||||
s_2 = s_1;
|
s_2 = s_1;
|
||||||
s_1 = av_clip_int16(s);
|
s_1 = av_clip_int16(s);
|
||||||
out1[j] = s_1;
|
out1[j] = s_1;
|
||||||
|
@ -1233,7 +1233,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
}
|
}
|
||||||
for (i=0; i<=st; i++) {
|
for (i=0; i<=st; i++) {
|
||||||
c->status[i].predictor = bytestream2_get_le32u(&gb);
|
c->status[i].predictor = bytestream2_get_le32u(&gb);
|
||||||
if (FFABS(c->status[i].predictor) > (1<<16))
|
if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -423,8 +423,8 @@ static int decode_inter_plane(AGMContext *s, GetBitContext *gb, int size,
|
||||||
int map = s->map[x];
|
int map = s->map[x];
|
||||||
|
|
||||||
if (orig_mv_x >= -32) {
|
if (orig_mv_x >= -32) {
|
||||||
if (y * 8 + mv_y < 0 || y * 8 + mv_y >= h ||
|
if (y * 8 + mv_y < 0 || y * 8 + mv_y + 8 >= h ||
|
||||||
x * 8 + mv_x < 0 || x * 8 + mv_x >= w)
|
x * 8 + mv_x < 0 || x * 8 + mv_x + 8 >= w)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
copy_block8(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
|
copy_block8(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
|
||||||
|
|
|
@ -228,7 +228,7 @@ static void lpc_prediction(int32_t *error_buffer, uint32_t *buffer_out,
|
||||||
sign = sign_only(val) * error_sign;
|
sign = sign_only(val) * error_sign;
|
||||||
lpc_coefs[j] -= sign;
|
lpc_coefs[j] -= sign;
|
||||||
val *= (unsigned)sign;
|
val *= (unsigned)sign;
|
||||||
error_val -= (val >> lpc_quant) * (j + 1);
|
error_val -= (val >> lpc_quant) * (j + 1U);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -776,6 +776,7 @@ extern AVCodec ff_mpeg2_qsv_encoder;
|
||||||
extern AVCodec ff_mpeg2_vaapi_encoder;
|
extern AVCodec ff_mpeg2_vaapi_encoder;
|
||||||
extern AVCodec ff_mpeg4_cuvid_decoder;
|
extern AVCodec ff_mpeg4_cuvid_decoder;
|
||||||
extern AVCodec ff_mpeg4_mediacodec_decoder;
|
extern AVCodec ff_mpeg4_mediacodec_decoder;
|
||||||
|
extern AVCodec ff_mpeg4_omx_encoder;
|
||||||
extern AVCodec ff_mpeg4_v4l2m2m_encoder;
|
extern AVCodec ff_mpeg4_v4l2m2m_encoder;
|
||||||
extern AVCodec ff_vc1_cuvid_decoder;
|
extern AVCodec ff_vc1_cuvid_decoder;
|
||||||
extern AVCodec ff_vp8_cuvid_decoder;
|
extern AVCodec ff_vp8_cuvid_decoder;
|
||||||
|
|
|
@ -496,6 +496,7 @@ static inline int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb,
|
||||||
x = (overflow << rice->k) + get_bits(gb, rice->k);
|
x = (overflow << rice->k) + get_bits(gb, rice->k);
|
||||||
} else {
|
} else {
|
||||||
av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %"PRIu32"\n", rice->k);
|
av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %"PRIu32"\n", rice->k);
|
||||||
|
ctx->error = 1;
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
rice->ksum += x - (rice->ksum + 8 >> 4);
|
rice->ksum += x - (rice->ksum + 8 >> 4);
|
||||||
|
|
|
@ -105,7 +105,7 @@ int ff_ass_add_rect(AVSubtitle *sub, const char *dialog,
|
||||||
char *ass_str;
|
char *ass_str;
|
||||||
AVSubtitleRect **rects;
|
AVSubtitleRect **rects;
|
||||||
|
|
||||||
rects = av_realloc_array(sub->rects, (sub->num_rects+1), sizeof(*sub->rects));
|
rects = av_realloc_array(sub->rects, sub->num_rects+1, sizeof(*sub->rects));
|
||||||
if (!rects)
|
if (!rects)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
sub->rects = rects;
|
sub->rects = rects;
|
||||||
|
|
|
@ -223,8 +223,18 @@ static inline int parse_band_ext(ATRAC9Context *s, ATRAC9BlockData *b,
|
||||||
b->channel[0].band_ext = get_bits(gb, 2);
|
b->channel[0].band_ext = get_bits(gb, 2);
|
||||||
b->channel[0].band_ext = ext_band > 2 ? b->channel[0].band_ext : 4;
|
b->channel[0].band_ext = ext_band > 2 ? b->channel[0].band_ext : 4;
|
||||||
|
|
||||||
if (!get_bits(gb, 5))
|
if (!get_bits(gb, 5)) {
|
||||||
|
for (int i = 0; i <= stereo; i++) {
|
||||||
|
ATRAC9ChannelData *c = &b->channel[i];
|
||||||
|
const int count = at9_tab_band_ext_cnt[c->band_ext][ext_band];
|
||||||
|
for (int j = 0; j < count; j++) {
|
||||||
|
int len = at9_tab_band_ext_lengths[c->band_ext][ext_band][j];
|
||||||
|
c->band_ext_data[j] = av_clip_uintp2_c(c->band_ext_data[j], len);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
for (int i = 0; i <= stereo; i++) {
|
for (int i = 0; i <= stereo; i++) {
|
||||||
ATRAC9ChannelData *c = &b->channel[i];
|
ATRAC9ChannelData *c = &b->channel[i];
|
||||||
|
|
|
@ -656,6 +656,7 @@ enum AVCodecID {
|
||||||
AV_CODEC_ID_ATRAC9,
|
AV_CODEC_ID_ATRAC9,
|
||||||
AV_CODEC_ID_HCOM,
|
AV_CODEC_ID_HCOM,
|
||||||
AV_CODEC_ID_ACELP_KELVIN,
|
AV_CODEC_ID_ACELP_KELVIN,
|
||||||
|
AV_CODEC_ID_MPEGH_3D_AUDIO,
|
||||||
|
|
||||||
/* subtitle codecs */
|
/* subtitle codecs */
|
||||||
AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
|
AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
|
||||||
|
@ -1175,6 +1176,11 @@ typedef struct AVCPBProperties {
|
||||||
uint64_t vbv_delay;
|
uint64_t vbv_delay;
|
||||||
} AVCPBProperties;
|
} AVCPBProperties;
|
||||||
|
|
||||||
|
typedef struct AVProducerReferenceTime {
|
||||||
|
int64_t wallclock;
|
||||||
|
int flags;
|
||||||
|
} AVProducerReferenceTime;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The decoder will keep a reference to the frame and may reuse it later.
|
* The decoder will keep a reference to the frame and may reuse it later.
|
||||||
*/
|
*/
|
||||||
|
@ -1409,6 +1415,11 @@ enum AVPacketSideDataType {
|
||||||
*/
|
*/
|
||||||
AV_PKT_DATA_AFD,
|
AV_PKT_DATA_AFD,
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Producer Reference Time data corresponding to the AVProducerReferenceTime struct.
|
||||||
|
*/
|
||||||
|
AV_PKT_DATA_PRFT,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The number of side data types.
|
* The number of side data types.
|
||||||
* This is not part of the public API/ABI in the sense that it may
|
* This is not part of the public API/ABI in the sense that it may
|
||||||
|
|
|
@ -741,3 +741,25 @@ int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, i
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int ff_side_data_set_prft(AVPacket *pkt, int64_t timestamp)
|
||||||
|
{
|
||||||
|
AVProducerReferenceTime *prft;
|
||||||
|
uint8_t *side_data;
|
||||||
|
int side_data_size;
|
||||||
|
|
||||||
|
side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_PRFT, &side_data_size);
|
||||||
|
if (!side_data) {
|
||||||
|
side_data_size = sizeof(AVProducerReferenceTime);
|
||||||
|
side_data = av_packet_new_side_data(pkt, AV_PKT_DATA_PRFT, side_data_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!side_data || side_data_size < sizeof(AVProducerReferenceTime))
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
|
prft = (AVProducerReferenceTime *)side_data;
|
||||||
|
prft->wallclock = timestamp;
|
||||||
|
prft->flags = 0;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
|
@ -493,7 +493,7 @@ int ff_bgmc_decode_init(GetBitContext *gb, unsigned int *h,
|
||||||
|
|
||||||
*h = TOP_VALUE;
|
*h = TOP_VALUE;
|
||||||
*l = 0;
|
*l = 0;
|
||||||
*v = get_bits_long(gb, VALUE_BITS);
|
*v = get_bits(gb, VALUE_BITS);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -153,7 +153,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
|
||||||
static float get_float(GetBitContext *gb)
|
static float get_float(GetBitContext *gb)
|
||||||
{
|
{
|
||||||
int power = get_bits(gb, 5);
|
int power = get_bits(gb, 5);
|
||||||
float f = ldexpf(get_bits_long(gb, 23), power - 23);
|
float f = ldexpf(get_bits(gb, 23), power - 23);
|
||||||
if (get_bits1(gb))
|
if (get_bits1(gb))
|
||||||
f = -f;
|
f = -f;
|
||||||
return f;
|
return f;
|
||||||
|
|
|
@ -82,6 +82,7 @@ const AVClass *av_bsf_get_class(void)
|
||||||
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
|
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
|
||||||
{
|
{
|
||||||
AVBSFContext *ctx;
|
AVBSFContext *ctx;
|
||||||
|
AVBSFInternal *bsfi;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ctx = av_mallocz(sizeof(*ctx));
|
ctx = av_mallocz(sizeof(*ctx));
|
||||||
|
@ -98,14 +99,15 @@ int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->internal = av_mallocz(sizeof(*ctx->internal));
|
bsfi = av_mallocz(sizeof(*bsfi));
|
||||||
if (!ctx->internal) {
|
if (!bsfi) {
|
||||||
ret = AVERROR(ENOMEM);
|
ret = AVERROR(ENOMEM);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
ctx->internal = bsfi;
|
||||||
|
|
||||||
ctx->internal->buffer_pkt = av_packet_alloc();
|
bsfi->buffer_pkt = av_packet_alloc();
|
||||||
if (!ctx->internal->buffer_pkt) {
|
if (!bsfi->buffer_pkt) {
|
||||||
ret = AVERROR(ENOMEM);
|
ret = AVERROR(ENOMEM);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -175,9 +177,11 @@ int av_bsf_init(AVBSFContext *ctx)
|
||||||
|
|
||||||
void av_bsf_flush(AVBSFContext *ctx)
|
void av_bsf_flush(AVBSFContext *ctx)
|
||||||
{
|
{
|
||||||
ctx->internal->eof = 0;
|
AVBSFInternal *bsfi = ctx->internal;
|
||||||
|
|
||||||
av_packet_unref(ctx->internal->buffer_pkt);
|
bsfi->eof = 0;
|
||||||
|
|
||||||
|
av_packet_unref(bsfi->buffer_pkt);
|
||||||
|
|
||||||
if (ctx->filter->flush)
|
if (ctx->filter->flush)
|
||||||
ctx->filter->flush(ctx);
|
ctx->filter->flush(ctx);
|
||||||
|
@ -185,26 +189,27 @@ void av_bsf_flush(AVBSFContext *ctx)
|
||||||
|
|
||||||
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
|
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
|
||||||
{
|
{
|
||||||
|
AVBSFInternal *bsfi = ctx->internal;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!pkt || (!pkt->data && !pkt->side_data_elems)) {
|
if (!pkt || (!pkt->data && !pkt->side_data_elems)) {
|
||||||
ctx->internal->eof = 1;
|
bsfi->eof = 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx->internal->eof) {
|
if (bsfi->eof) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "A non-NULL packet sent after an EOF.\n");
|
av_log(ctx, AV_LOG_ERROR, "A non-NULL packet sent after an EOF.\n");
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx->internal->buffer_pkt->data ||
|
if (bsfi->buffer_pkt->data ||
|
||||||
ctx->internal->buffer_pkt->side_data_elems)
|
bsfi->buffer_pkt->side_data_elems)
|
||||||
return AVERROR(EAGAIN);
|
return AVERROR(EAGAIN);
|
||||||
|
|
||||||
ret = av_packet_make_refcounted(pkt);
|
ret = av_packet_make_refcounted(pkt);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
av_packet_move_ref(ctx->internal->buffer_pkt, pkt);
|
av_packet_move_ref(bsfi->buffer_pkt, pkt);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -216,38 +221,38 @@ int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
|
||||||
|
|
||||||
int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt)
|
int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt)
|
||||||
{
|
{
|
||||||
AVBSFInternal *in = ctx->internal;
|
AVBSFInternal *bsfi = ctx->internal;
|
||||||
AVPacket *tmp_pkt;
|
AVPacket *tmp_pkt;
|
||||||
|
|
||||||
if (in->eof)
|
if (bsfi->eof)
|
||||||
return AVERROR_EOF;
|
return AVERROR_EOF;
|
||||||
|
|
||||||
if (!ctx->internal->buffer_pkt->data &&
|
if (!bsfi->buffer_pkt->data &&
|
||||||
!ctx->internal->buffer_pkt->side_data_elems)
|
!bsfi->buffer_pkt->side_data_elems)
|
||||||
return AVERROR(EAGAIN);
|
return AVERROR(EAGAIN);
|
||||||
|
|
||||||
tmp_pkt = av_packet_alloc();
|
tmp_pkt = av_packet_alloc();
|
||||||
if (!tmp_pkt)
|
if (!tmp_pkt)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
*pkt = ctx->internal->buffer_pkt;
|
*pkt = bsfi->buffer_pkt;
|
||||||
ctx->internal->buffer_pkt = tmp_pkt;
|
bsfi->buffer_pkt = tmp_pkt;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt)
|
int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt)
|
||||||
{
|
{
|
||||||
AVBSFInternal *in = ctx->internal;
|
AVBSFInternal *bsfi = ctx->internal;
|
||||||
|
|
||||||
if (in->eof)
|
if (bsfi->eof)
|
||||||
return AVERROR_EOF;
|
return AVERROR_EOF;
|
||||||
|
|
||||||
if (!ctx->internal->buffer_pkt->data &&
|
if (!bsfi->buffer_pkt->data &&
|
||||||
!ctx->internal->buffer_pkt->side_data_elems)
|
!bsfi->buffer_pkt->side_data_elems)
|
||||||
return AVERROR(EAGAIN);
|
return AVERROR(EAGAIN);
|
||||||
|
|
||||||
av_packet_move_ref(pkt, ctx->internal->buffer_pkt);
|
av_packet_move_ref(pkt, bsfi->buffer_pkt);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -517,8 +522,8 @@ static int bsf_parse_single(const char *str, AVBSFList *bsf_lst)
|
||||||
|
|
||||||
ret = av_bsf_list_append2(bsf_lst, bsf_name, &bsf_options);
|
ret = av_bsf_list_append2(bsf_lst, bsf_name, &bsf_options);
|
||||||
|
|
||||||
av_dict_free(&bsf_options);
|
|
||||||
end:
|
end:
|
||||||
|
av_dict_free(&bsf_options);
|
||||||
av_free(buf);
|
av_free(buf);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -105,7 +105,7 @@ typedef struct AV1RawSequenceHeader {
|
||||||
uint8_t use_128x128_superblock;
|
uint8_t use_128x128_superblock;
|
||||||
uint8_t enable_filter_intra;
|
uint8_t enable_filter_intra;
|
||||||
uint8_t enable_intra_edge_filter;
|
uint8_t enable_intra_edge_filter;
|
||||||
uint8_t enable_intraintra_compound;
|
uint8_t enable_interintra_compound;
|
||||||
uint8_t enable_masked_compound;
|
uint8_t enable_masked_compound;
|
||||||
uint8_t enable_warped_motion;
|
uint8_t enable_warped_motion;
|
||||||
uint8_t enable_dual_filter;
|
uint8_t enable_dual_filter;
|
||||||
|
@ -256,20 +256,20 @@ typedef struct AV1RawFrameHeader {
|
||||||
uint8_t update_grain;
|
uint8_t update_grain;
|
||||||
uint8_t film_grain_params_ref_idx;
|
uint8_t film_grain_params_ref_idx;
|
||||||
uint8_t num_y_points;
|
uint8_t num_y_points;
|
||||||
uint8_t point_y_value[16];
|
uint8_t point_y_value[14];
|
||||||
uint8_t point_y_scaling[16];
|
uint8_t point_y_scaling[14];
|
||||||
uint8_t chroma_scaling_from_luma;
|
uint8_t chroma_scaling_from_luma;
|
||||||
uint8_t num_cb_points;
|
uint8_t num_cb_points;
|
||||||
uint8_t point_cb_value[16];
|
uint8_t point_cb_value[10];
|
||||||
uint8_t point_cb_scaling[16];
|
uint8_t point_cb_scaling[10];
|
||||||
uint8_t num_cr_points;
|
uint8_t num_cr_points;
|
||||||
uint8_t point_cr_value[16];
|
uint8_t point_cr_value[10];
|
||||||
uint8_t point_cr_scaling[16];
|
uint8_t point_cr_scaling[10];
|
||||||
uint8_t grain_scaling_minus_8;
|
uint8_t grain_scaling_minus_8;
|
||||||
uint8_t ar_coeff_lag;
|
uint8_t ar_coeff_lag;
|
||||||
uint8_t ar_coeffs_y_plus_128[24];
|
uint8_t ar_coeffs_y_plus_128[24];
|
||||||
uint8_t ar_coeffs_cb_plus_128[24];
|
uint8_t ar_coeffs_cb_plus_128[25];
|
||||||
uint8_t ar_coeffs_cr_plus_128[24];
|
uint8_t ar_coeffs_cr_plus_128[25];
|
||||||
uint8_t ar_coeff_shift_minus_6;
|
uint8_t ar_coeff_shift_minus_6;
|
||||||
uint8_t grain_scale_shift;
|
uint8_t grain_scale_shift;
|
||||||
uint8_t cb_mult;
|
uint8_t cb_mult;
|
||||||
|
|
|
@ -268,7 +268,7 @@ static int FUNC(sequence_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
|
||||||
flag(enable_intra_edge_filter);
|
flag(enable_intra_edge_filter);
|
||||||
|
|
||||||
if (current->reduced_still_picture_header) {
|
if (current->reduced_still_picture_header) {
|
||||||
infer(enable_intraintra_compound, 0);
|
infer(enable_interintra_compound, 0);
|
||||||
infer(enable_masked_compound, 0);
|
infer(enable_masked_compound, 0);
|
||||||
infer(enable_warped_motion, 0);
|
infer(enable_warped_motion, 0);
|
||||||
infer(enable_dual_filter, 0);
|
infer(enable_dual_filter, 0);
|
||||||
|
@ -281,7 +281,7 @@ static int FUNC(sequence_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
|
||||||
infer(seq_force_integer_mv,
|
infer(seq_force_integer_mv,
|
||||||
AV1_SELECT_INTEGER_MV);
|
AV1_SELECT_INTEGER_MV);
|
||||||
} else {
|
} else {
|
||||||
flag(enable_intraintra_compound);
|
flag(enable_interintra_compound);
|
||||||
flag(enable_masked_compound);
|
flag(enable_masked_compound);
|
||||||
flag(enable_warped_motion);
|
flag(enable_warped_motion);
|
||||||
flag(enable_dual_filter);
|
flag(enable_dual_filter);
|
||||||
|
@ -1155,7 +1155,7 @@ static int FUNC(film_grain_params)(CodedBitstreamContext *ctx, RWContext *rw,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
fb(4, num_y_points);
|
fc(4, num_y_points, 0, 14);
|
||||||
for (i = 0; i < current->num_y_points; i++) {
|
for (i = 0; i < current->num_y_points; i++) {
|
||||||
fbs(8, point_y_value[i], 1, i);
|
fbs(8, point_y_value[i], 1, i);
|
||||||
fbs(8, point_y_scaling[i], 1, i);
|
fbs(8, point_y_scaling[i], 1, i);
|
||||||
|
@ -1174,12 +1174,12 @@ static int FUNC(film_grain_params)(CodedBitstreamContext *ctx, RWContext *rw,
|
||||||
infer(num_cb_points, 0);
|
infer(num_cb_points, 0);
|
||||||
infer(num_cr_points, 0);
|
infer(num_cr_points, 0);
|
||||||
} else {
|
} else {
|
||||||
fb(4, num_cb_points);
|
fc(4, num_cb_points, 0, 10);
|
||||||
for (i = 0; i < current->num_cb_points; i++) {
|
for (i = 0; i < current->num_cb_points; i++) {
|
||||||
fbs(8, point_cb_value[i], 1, i);
|
fbs(8, point_cb_value[i], 1, i);
|
||||||
fbs(8, point_cb_scaling[i], 1, i);
|
fbs(8, point_cb_scaling[i], 1, i);
|
||||||
}
|
}
|
||||||
fb(4, num_cr_points);
|
fc(4, num_cr_points, 0, 10);
|
||||||
for (i = 0; i < current->num_cr_points; i++) {
|
for (i = 0; i < current->num_cr_points; i++) {
|
||||||
fbs(8, point_cr_value[i], 1, i);
|
fbs(8, point_cr_value[i], 1, i);
|
||||||
fbs(8, point_cr_scaling[i], 1, i);
|
fbs(8, point_cr_scaling[i], 1, i);
|
||||||
|
|
|
@ -568,7 +568,10 @@ static int cbs_h2645_fragment_add_nals(CodedBitstreamContext *ctx,
|
||||||
// Remove trailing zeroes.
|
// Remove trailing zeroes.
|
||||||
while (size > 0 && nal->data[size - 1] == 0)
|
while (size > 0 && nal->data[size - 1] == 0)
|
||||||
--size;
|
--size;
|
||||||
av_assert0(size > 0);
|
if (size == 0) {
|
||||||
|
av_log(ctx->log_ctx, AV_LOG_VERBOSE, "Discarding empty 0 NAL unit\n");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
ref = (nal->data == nal->raw_data) ? frag->data_ref
|
ref = (nal->data == nal->raw_data) ? frag->data_ref
|
||||||
: packet->rbsp.rbsp_buffer_ref;
|
: packet->rbsp.rbsp_buffer_ref;
|
||||||
|
|
|
@ -954,6 +954,7 @@ static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw,
|
||||||
current->payload[k].payload_type = payload_type;
|
current->payload[k].payload_type = payload_type;
|
||||||
current->payload[k].payload_size = payload_size;
|
current->payload[k].payload_size = payload_size;
|
||||||
|
|
||||||
|
current->payload_count++;
|
||||||
CHECK(FUNC(sei_payload)(ctx, rw, ¤t->payload[k]));
|
CHECK(FUNC(sei_payload)(ctx, rw, ¤t->payload[k]));
|
||||||
|
|
||||||
if (!cbs_h2645_read_more_rbsp_data(rw))
|
if (!cbs_h2645_read_more_rbsp_data(rw))
|
||||||
|
@ -964,7 +965,6 @@ static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw,
|
||||||
"SEI message: found %d.\n", k);
|
"SEI message: found %d.\n", k);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
current->payload_count = k + 1;
|
|
||||||
#else
|
#else
|
||||||
for (k = 0; k < current->payload_count; k++) {
|
for (k = 0; k < current->payload_count; k++) {
|
||||||
PutBitContext start_state;
|
PutBitContext start_state;
|
||||||
|
|
|
@ -2184,6 +2184,7 @@ static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw,
|
||||||
current->payload[k].payload_type = payload_type;
|
current->payload[k].payload_type = payload_type;
|
||||||
current->payload[k].payload_size = payload_size;
|
current->payload[k].payload_size = payload_size;
|
||||||
|
|
||||||
|
current->payload_count++;
|
||||||
CHECK(FUNC(sei_payload)(ctx, rw, ¤t->payload[k], prefix));
|
CHECK(FUNC(sei_payload)(ctx, rw, ¤t->payload[k], prefix));
|
||||||
|
|
||||||
if (!cbs_h2645_read_more_rbsp_data(rw))
|
if (!cbs_h2645_read_more_rbsp_data(rw))
|
||||||
|
@ -2194,7 +2195,6 @@ static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw,
|
||||||
"SEI message: found %d.\n", k);
|
"SEI message: found %d.\n", k);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
current->payload_count = k + 1;
|
|
||||||
#else
|
#else
|
||||||
for (k = 0; k < current->payload_count; k++) {
|
for (k = 0; k < current->payload_count; k++) {
|
||||||
PutBitContext start_state;
|
PutBitContext start_state;
|
||||||
|
|
|
@ -416,6 +416,9 @@ static int cbs_vp9_split_fragment(CodedBitstreamContext *ctx,
|
||||||
uint8_t superframe_header;
|
uint8_t superframe_header;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (frag->data_size == 0)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
// Last byte in the packet.
|
// Last byte in the packet.
|
||||||
superframe_header = frag->data[frag->data_size - 1];
|
superframe_header = frag->data[frag->data_size - 1];
|
||||||
|
|
||||||
|
@ -428,6 +431,9 @@ static int cbs_vp9_split_fragment(CodedBitstreamContext *ctx,
|
||||||
index_size = 2 + (((superframe_header & 0x18) >> 3) + 1) *
|
index_size = 2 + (((superframe_header & 0x18) >> 3) + 1) *
|
||||||
((superframe_header & 0x07) + 1);
|
((superframe_header & 0x07) + 1);
|
||||||
|
|
||||||
|
if (index_size > frag->data_size)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
err = init_get_bits(&gbc, frag->data + frag->data_size - index_size,
|
err = init_get_bits(&gbc, frag->data + frag->data_size - index_size,
|
||||||
8 * index_size);
|
8 * index_size);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -759,7 +759,7 @@ static int decouple_info(COOKContext *q, COOKSubpacket *p, int *decouple_tab)
|
||||||
for (i = 0; i < length; i++)
|
for (i = 0; i < length; i++)
|
||||||
decouple_tab[start + i] = get_vlc2(&q->gb,
|
decouple_tab[start + i] = get_vlc2(&q->gb,
|
||||||
p->channel_coupling.table,
|
p->channel_coupling.table,
|
||||||
p->channel_coupling.bits, 2);
|
p->channel_coupling.bits, 3);
|
||||||
else
|
else
|
||||||
for (i = 0; i < length; i++) {
|
for (i = 0; i < length; i++) {
|
||||||
int v = get_bits(&q->gb, p->js_vlc_bits);
|
int v = get_bits(&q->gb, p->js_vlc_bits);
|
||||||
|
|
|
@ -479,32 +479,32 @@ static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
|
||||||
|
|
||||||
side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
|
side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
|
||||||
if(side && side_size>=10) {
|
if(side && side_size>=10) {
|
||||||
avctx->internal->skip_samples = AV_RL32(side) * avctx->internal->skip_samples_multiplier;
|
avci->skip_samples = AV_RL32(side) * avci->skip_samples_multiplier;
|
||||||
discard_padding = AV_RL32(side + 4);
|
discard_padding = AV_RL32(side + 4);
|
||||||
av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
|
av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
|
||||||
avctx->internal->skip_samples, (int)discard_padding);
|
avci->skip_samples, (int)discard_padding);
|
||||||
skip_reason = AV_RL8(side + 8);
|
skip_reason = AV_RL8(side + 8);
|
||||||
discard_reason = AV_RL8(side + 9);
|
discard_reason = AV_RL8(side + 9);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
|
if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
|
||||||
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
|
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
|
||||||
avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
|
avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
|
||||||
got_frame = 0;
|
got_frame = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (avctx->internal->skip_samples > 0 && got_frame &&
|
if (avci->skip_samples > 0 && got_frame &&
|
||||||
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
|
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
|
||||||
if(frame->nb_samples <= avctx->internal->skip_samples){
|
if(frame->nb_samples <= avci->skip_samples){
|
||||||
got_frame = 0;
|
got_frame = 0;
|
||||||
avctx->internal->skip_samples -= frame->nb_samples;
|
avci->skip_samples -= frame->nb_samples;
|
||||||
av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
|
av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
|
||||||
avctx->internal->skip_samples);
|
avci->skip_samples);
|
||||||
} else {
|
} else {
|
||||||
av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples,
|
av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
|
||||||
frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
|
frame->nb_samples - avci->skip_samples, avctx->channels, frame->format);
|
||||||
if(avctx->pkt_timebase.num && avctx->sample_rate) {
|
if(avctx->pkt_timebase.num && avctx->sample_rate) {
|
||||||
int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
|
int64_t diff_ts = av_rescale_q(avci->skip_samples,
|
||||||
(AVRational){1, avctx->sample_rate},
|
(AVRational){1, avctx->sample_rate},
|
||||||
avctx->pkt_timebase);
|
avctx->pkt_timebase);
|
||||||
if(frame->pts!=AV_NOPTS_VALUE)
|
if(frame->pts!=AV_NOPTS_VALUE)
|
||||||
|
@ -523,9 +523,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||||
av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
|
av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
|
||||||
}
|
}
|
||||||
av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
|
av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
|
||||||
avctx->internal->skip_samples, frame->nb_samples);
|
avci->skip_samples, frame->nb_samples);
|
||||||
frame->nb_samples -= avctx->internal->skip_samples;
|
frame->nb_samples -= avci->skip_samples;
|
||||||
avctx->internal->skip_samples = 0;
|
avci->skip_samples = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -551,11 +551,11 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||||
if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
|
if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
|
||||||
AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
|
AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
|
||||||
if (fside) {
|
if (fside) {
|
||||||
AV_WL32(fside->data, avctx->internal->skip_samples);
|
AV_WL32(fside->data, avci->skip_samples);
|
||||||
AV_WL32(fside->data + 4, discard_padding);
|
AV_WL32(fside->data + 4, discard_padding);
|
||||||
AV_WL8(fside->data + 8, skip_reason);
|
AV_WL8(fside->data + 8, skip_reason);
|
||||||
AV_WL8(fside->data + 9, discard_reason);
|
AV_WL8(fside->data + 9, discard_reason);
|
||||||
avctx->internal->skip_samples = 0;
|
avci->skip_samples = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -580,7 +580,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||||
|
|
||||||
/* do not stop draining when actual_got_frame != 0 or ret < 0 */
|
/* do not stop draining when actual_got_frame != 0 or ret < 0 */
|
||||||
/* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
|
/* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
|
||||||
if (avctx->internal->draining && !actual_got_frame) {
|
if (avci->draining && !actual_got_frame) {
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
/* prevent infinite loop if a decoder wrongly always return error on draining */
|
/* prevent infinite loop if a decoder wrongly always return error on draining */
|
||||||
/* reasonable nb_errors_max = maximum b frames + thread count */
|
/* reasonable nb_errors_max = maximum b frames + thread count */
|
||||||
|
@ -1925,7 +1925,7 @@ static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
} else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
} else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||||
if (frame->nb_samples * avctx->channels > avctx->max_samples) {
|
if (frame->nb_samples * (int64_t)avctx->channels > avctx->max_samples) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
|
av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
@ -2030,15 +2030,17 @@ static void bsfs_flush(AVCodecContext *avctx)
|
||||||
|
|
||||||
void avcodec_flush_buffers(AVCodecContext *avctx)
|
void avcodec_flush_buffers(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
avctx->internal->draining = 0;
|
AVCodecInternal *avci = avctx->internal;
|
||||||
avctx->internal->draining_done = 0;
|
|
||||||
avctx->internal->nb_draining_errors = 0;
|
|
||||||
av_frame_unref(avctx->internal->buffer_frame);
|
|
||||||
av_frame_unref(avctx->internal->compat_decode_frame);
|
|
||||||
av_packet_unref(avctx->internal->buffer_pkt);
|
|
||||||
avctx->internal->buffer_pkt_valid = 0;
|
|
||||||
|
|
||||||
av_packet_unref(avctx->internal->ds.in_pkt);
|
avci->draining = 0;
|
||||||
|
avci->draining_done = 0;
|
||||||
|
avci->nb_draining_errors = 0;
|
||||||
|
av_frame_unref(avci->buffer_frame);
|
||||||
|
av_frame_unref(avci->compat_decode_frame);
|
||||||
|
av_packet_unref(avci->buffer_pkt);
|
||||||
|
avci->buffer_pkt_valid = 0;
|
||||||
|
|
||||||
|
av_packet_unref(avci->ds.in_pkt);
|
||||||
|
|
||||||
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
|
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
|
||||||
ff_thread_flush(avctx);
|
ff_thread_flush(avctx);
|
||||||
|
@ -2051,7 +2053,7 @@ void avcodec_flush_buffers(AVCodecContext *avctx)
|
||||||
bsfs_flush(avctx);
|
bsfs_flush(avctx);
|
||||||
|
|
||||||
if (!avctx->refcounted_frames)
|
if (!avctx->refcounted_frames)
|
||||||
av_frame_unref(avctx->internal->to_free);
|
av_frame_unref(avci->to_free);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ff_decode_bsfs_uninit(AVCodecContext *avctx)
|
void ff_decode_bsfs_uninit(AVCodecContext *avctx)
|
||||||
|
|
|
@ -121,7 +121,7 @@ static int read_map(GetBitContext *gb, Table *t, unsigned int map[DST_MAX_CHANNE
|
||||||
|
|
||||||
static av_always_inline int get_sr_golomb_dst(GetBitContext *gb, unsigned int k)
|
static av_always_inline int get_sr_golomb_dst(GetBitContext *gb, unsigned int k)
|
||||||
{
|
{
|
||||||
int v = get_ur_golomb(gb, k, get_bits_left(gb), 0);
|
int v = get_ur_golomb_jpegls(gb, k, get_bits_left(gb), 0);
|
||||||
if (v && get_bits1(gb))
|
if (v && get_bits1(gb))
|
||||||
v = -v;
|
v = -v;
|
||||||
return v;
|
return v;
|
||||||
|
|
|
@ -252,7 +252,7 @@ static int escape124_decode_frame(AVCodecContext *avctx,
|
||||||
if (i == 2) {
|
if (i == 2) {
|
||||||
// This codebook can be cut off at places other than
|
// This codebook can be cut off at places other than
|
||||||
// powers of 2, leaving some of the entries undefined.
|
// powers of 2, leaving some of the entries undefined.
|
||||||
cb_size = get_bits_long(&gb, 20);
|
cb_size = get_bits(&gb, 20);
|
||||||
if (!cb_size) {
|
if (!cb_size) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "Invalid codebook size 0.\n");
|
av_log(avctx, AV_LOG_ERROR, "Invalid codebook size 0.\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
|
@ -37,7 +37,7 @@ static inline int RENAME(get_context)(PlaneContext *p, TYPE *src,
|
||||||
const int RT = last[1];
|
const int RT = last[1];
|
||||||
const int L = src[-1];
|
const int L = src[-1];
|
||||||
|
|
||||||
if (p->quant_table[3][127]) {
|
if (p->quant_table[3][127] || p->quant_table[4][127]) {
|
||||||
const int TT = last2[0];
|
const int TT = last2[0];
|
||||||
const int LL = src[-2];
|
const int LL = src[-2];
|
||||||
return p->quant_table[0][(L - LT) & 0xFF] +
|
return p->quant_table[0][(L - LT) & 0xFF] +
|
||||||
|
|
|
@ -350,7 +350,8 @@ fail:
|
||||||
static void wavesynth_synth_sample(struct wavesynth_context *ws, int64_t ts,
|
static void wavesynth_synth_sample(struct wavesynth_context *ws, int64_t ts,
|
||||||
int32_t *channels)
|
int32_t *channels)
|
||||||
{
|
{
|
||||||
int32_t amp, val, *cv;
|
int32_t amp, *cv;
|
||||||
|
unsigned val;
|
||||||
struct ws_interval *in;
|
struct ws_interval *in;
|
||||||
int i, *last, pink;
|
int i, *last, pink;
|
||||||
uint32_t c, all_ch = 0;
|
uint32_t c, all_ch = 0;
|
||||||
|
|
|
@ -217,9 +217,9 @@ int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
|
||||||
}
|
}
|
||||||
|
|
||||||
skip_bits(&gb, 24); /* skip min frame size */
|
skip_bits(&gb, 24); /* skip min frame size */
|
||||||
s->max_framesize = get_bits_long(&gb, 24);
|
s->max_framesize = get_bits(&gb, 24);
|
||||||
|
|
||||||
s->samplerate = get_bits_long(&gb, 20);
|
s->samplerate = get_bits(&gb, 20);
|
||||||
s->channels = get_bits(&gb, 3) + 1;
|
s->channels = get_bits(&gb, 3) + 1;
|
||||||
s->bps = get_bits(&gb, 5) + 1;
|
s->bps = get_bits(&gb, 5) + 1;
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ int ff_flv_decode_picture_header(MpegEncContext *s)
|
||||||
int format, width, height;
|
int format, width, height;
|
||||||
|
|
||||||
/* picture header */
|
/* picture header */
|
||||||
if (get_bits_long(&s->gb, 17) != 1) {
|
if (get_bits(&s->gb, 17) != 1) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
|
@ -486,14 +486,14 @@ static int16_t apply_tilt_comp(int16_t* out, int16_t* res_pst, int refl_coeff,
|
||||||
|
|
||||||
if (refl_coeff > 0) {
|
if (refl_coeff > 0) {
|
||||||
gt = (refl_coeff * G729_TILT_FACTOR_PLUS + 0x4000) >> 15;
|
gt = (refl_coeff * G729_TILT_FACTOR_PLUS + 0x4000) >> 15;
|
||||||
fact = 0x4000; // 0.5 in (0.15)
|
fact = 0x2000; // 0.5 in (0.15)
|
||||||
sh_fact = 15;
|
sh_fact = 14;
|
||||||
} else {
|
} else {
|
||||||
gt = (refl_coeff * G729_TILT_FACTOR_MINUS + 0x4000) >> 15;
|
gt = (refl_coeff * G729_TILT_FACTOR_MINUS + 0x4000) >> 15;
|
||||||
fact = 0x800; // 0.5 in (3.12)
|
fact = 0x400; // 0.5 in (3.12)
|
||||||
sh_fact = 12;
|
sh_fact = 11;
|
||||||
}
|
}
|
||||||
ga = (fact << 15) / av_clip_int16(32768 - FFABS(gt));
|
ga = (fact << 16) / av_clip_int16(32768 - FFABS(gt));
|
||||||
gt >>= 1;
|
gt >>= 1;
|
||||||
|
|
||||||
/* Apply tilt compensation filter to signal. */
|
/* Apply tilt compensation filter to signal. */
|
||||||
|
@ -503,12 +503,12 @@ static int16_t apply_tilt_comp(int16_t* out, int16_t* res_pst, int refl_coeff,
|
||||||
tmp2 = (gt * res_pst[i-1]) * 2 + 0x4000;
|
tmp2 = (gt * res_pst[i-1]) * 2 + 0x4000;
|
||||||
tmp2 = res_pst[i] + (tmp2 >> 15);
|
tmp2 = res_pst[i] + (tmp2 >> 15);
|
||||||
|
|
||||||
tmp2 = (tmp2 * ga * 2 + fact) >> sh_fact;
|
tmp2 = (tmp2 * ga + fact) >> sh_fact;
|
||||||
out[i] = tmp2;
|
out[i] = tmp2;
|
||||||
}
|
}
|
||||||
tmp2 = (gt * ht_prev_data) * 2 + 0x4000;
|
tmp2 = (gt * ht_prev_data) * 2 + 0x4000;
|
||||||
tmp2 = res_pst[0] + (tmp2 >> 15);
|
tmp2 = res_pst[0] + (tmp2 >> 15);
|
||||||
tmp2 = (tmp2 * ga * 2 + fact) >> sh_fact;
|
tmp2 = (tmp2 * ga + fact) >> sh_fact;
|
||||||
out[0] = tmp2;
|
out[0] = tmp2;
|
||||||
|
|
||||||
return tmp;
|
return tmp;
|
||||||
|
|
|
@ -313,7 +313,7 @@ static inline int get_interleaved_se_golomb(GetBitContext *gb)
|
||||||
} else {
|
} else {
|
||||||
int log;
|
int log;
|
||||||
skip_bits(gb, 8);
|
skip_bits(gb, 8);
|
||||||
buf |= 1 | show_bits_long(gb, 24);
|
buf |= 1 | show_bits(gb, 24);
|
||||||
|
|
||||||
if ((buf & 0xAAAAAAAA) == 0)
|
if ((buf & 0xAAAAAAAA) == 0)
|
||||||
return INVALID_VLC;
|
return INVALID_VLC;
|
||||||
|
|
|
@ -381,7 +381,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
|
||||||
} else {
|
} else {
|
||||||
goto invalid_user_data;
|
goto invalid_user_data;
|
||||||
}
|
}
|
||||||
if (i & 1)
|
if (j & 1)
|
||||||
udu->uuid_iso_iec_11578[j / 2] |= v;
|
udu->uuid_iso_iec_11578[j / 2] |= v;
|
||||||
else
|
else
|
||||||
udu->uuid_iso_iec_11578[j / 2] = v << 4;
|
udu->uuid_iso_iec_11578[j / 2] = v << 4;
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
|
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
|
#include "libavutil/avassert.h"
|
||||||
#include "libavutil/intreadwrite.h"
|
#include "libavutil/intreadwrite.h"
|
||||||
#include "libavutil/mem.h"
|
#include "libavutil/mem.h"
|
||||||
|
|
||||||
|
@ -68,7 +69,7 @@ static int h264_extradata_to_annexb(AVBSFContext *ctx, const int padding)
|
||||||
{
|
{
|
||||||
H264BSFContext *s = ctx->priv_data;
|
H264BSFContext *s = ctx->priv_data;
|
||||||
uint16_t unit_size;
|
uint16_t unit_size;
|
||||||
uint64_t total_size = 0;
|
uint32_t total_size = 0;
|
||||||
uint8_t *out = NULL, unit_nb, sps_done = 0,
|
uint8_t *out = NULL, unit_nb, sps_done = 0,
|
||||||
sps_seen = 0, pps_seen = 0;
|
sps_seen = 0, pps_seen = 0;
|
||||||
const uint8_t *extradata = ctx->par_in->extradata + 4;
|
const uint8_t *extradata = ctx->par_in->extradata + 4;
|
||||||
|
@ -91,12 +92,7 @@ static int h264_extradata_to_annexb(AVBSFContext *ctx, const int padding)
|
||||||
|
|
||||||
unit_size = AV_RB16(extradata);
|
unit_size = AV_RB16(extradata);
|
||||||
total_size += unit_size + 4;
|
total_size += unit_size + 4;
|
||||||
if (total_size > INT_MAX - padding) {
|
av_assert1(total_size <= INT_MAX - padding);
|
||||||
av_log(ctx, AV_LOG_ERROR,
|
|
||||||
"Too big extradata size, corrupted stream or invalid MP4/AVCC bitstream\n");
|
|
||||||
av_free(out);
|
|
||||||
return AVERROR(EINVAL);
|
|
||||||
}
|
|
||||||
if (extradata + 2 + unit_size > ctx->par_in->extradata + ctx->par_in->extradata_size) {
|
if (extradata + 2 + unit_size > ctx->par_in->extradata + ctx->par_in->extradata_size) {
|
||||||
av_log(ctx, AV_LOG_ERROR, "Packet header is not contained in global extradata, "
|
av_log(ctx, AV_LOG_ERROR, "Packet header is not contained in global extradata, "
|
||||||
"corrupted stream or invalid MP4/AVCC bitstream\n");
|
"corrupted stream or invalid MP4/AVCC bitstream\n");
|
||||||
|
|
|
@ -186,7 +186,7 @@ static inline int decode_vui_parameters(GetBitContext *gb, AVCodecContext *avctx
|
||||||
}
|
}
|
||||||
|
|
||||||
if (show_bits1(gb) && get_bits_left(gb) < 10) {
|
if (show_bits1(gb) && get_bits_left(gb) < 10) {
|
||||||
av_log(avctx, AV_LOG_WARNING, "Truncated VUI\n");
|
av_log(avctx, AV_LOG_WARNING, "Truncated VUI (%d)\n", get_bits_left(gb));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -247,14 +247,14 @@ static int decode_unregistered_user_data(H264SEIUnregistered *h, GetBitContext *
|
||||||
uint8_t *user_data;
|
uint8_t *user_data;
|
||||||
int e, build, i;
|
int e, build, i;
|
||||||
|
|
||||||
if (size < 16 || size >= INT_MAX - 16)
|
if (size < 16 || size >= INT_MAX - 1)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
user_data = av_malloc(16 + size + 1);
|
user_data = av_malloc(size + 1);
|
||||||
if (!user_data)
|
if (!user_data)
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
for (i = 0; i < size + 16; i++)
|
for (i = 0; i < size; i++)
|
||||||
user_data[i] = get_bits(gb, 8);
|
user_data[i] = get_bits(gb, 8);
|
||||||
|
|
||||||
user_data[i] = 0;
|
user_data[i] = 0;
|
||||||
|
|
|
@ -832,8 +832,6 @@ int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl);
|
||||||
|
|
||||||
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height);
|
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height);
|
||||||
|
|
||||||
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl,
|
|
||||||
const H2645NAL *nal);
|
|
||||||
/**
|
/**
|
||||||
* Submit a slice for decoding.
|
* Submit a slice for decoding.
|
||||||
*
|
*
|
||||||
|
|
|
@ -66,7 +66,7 @@ static const int8_t num_bins_in_se[] = {
|
||||||
1, // no_residual_data_flag
|
1, // no_residual_data_flag
|
||||||
3, // split_transform_flag
|
3, // split_transform_flag
|
||||||
2, // cbf_luma
|
2, // cbf_luma
|
||||||
4, // cbf_cb, cbf_cr
|
5, // cbf_cb, cbf_cr
|
||||||
2, // transform_skip_flag[][]
|
2, // transform_skip_flag[][]
|
||||||
2, // explicit_rdpcm_flag[][]
|
2, // explicit_rdpcm_flag[][]
|
||||||
2, // explicit_rdpcm_dir_flag[][]
|
2, // explicit_rdpcm_dir_flag[][]
|
||||||
|
@ -122,23 +122,23 @@ static const int elem_offset[sizeof(num_bins_in_se)] = {
|
||||||
37, // split_transform_flag
|
37, // split_transform_flag
|
||||||
40, // cbf_luma
|
40, // cbf_luma
|
||||||
42, // cbf_cb, cbf_cr
|
42, // cbf_cb, cbf_cr
|
||||||
46, // transform_skip_flag[][]
|
47, // transform_skip_flag[][]
|
||||||
48, // explicit_rdpcm_flag[][]
|
49, // explicit_rdpcm_flag[][]
|
||||||
50, // explicit_rdpcm_dir_flag[][]
|
51, // explicit_rdpcm_dir_flag[][]
|
||||||
52, // last_significant_coeff_x_prefix
|
53, // last_significant_coeff_x_prefix
|
||||||
70, // last_significant_coeff_y_prefix
|
71, // last_significant_coeff_y_prefix
|
||||||
88, // last_significant_coeff_x_suffix
|
89, // last_significant_coeff_x_suffix
|
||||||
88, // last_significant_coeff_y_suffix
|
89, // last_significant_coeff_y_suffix
|
||||||
88, // significant_coeff_group_flag
|
89, // significant_coeff_group_flag
|
||||||
92, // significant_coeff_flag
|
93, // significant_coeff_flag
|
||||||
136, // coeff_abs_level_greater1_flag
|
137, // coeff_abs_level_greater1_flag
|
||||||
160, // coeff_abs_level_greater2_flag
|
161, // coeff_abs_level_greater2_flag
|
||||||
166, // coeff_abs_level_remaining
|
167, // coeff_abs_level_remaining
|
||||||
166, // coeff_sign_flag
|
167, // coeff_sign_flag
|
||||||
166, // log2_res_scale_abs
|
167, // log2_res_scale_abs
|
||||||
174, // res_scale_sign_flag
|
175, // res_scale_sign_flag
|
||||||
176, // cu_chroma_qp_offset_flag
|
177, // cu_chroma_qp_offset_flag
|
||||||
177, // cu_chroma_qp_offset_idx
|
178, // cu_chroma_qp_offset_idx
|
||||||
};
|
};
|
||||||
|
|
||||||
#define CNU 154
|
#define CNU 154
|
||||||
|
@ -189,7 +189,7 @@ static const uint8_t init_values[3][HEVC_CONTEXTS] = {
|
||||||
// cbf_luma
|
// cbf_luma
|
||||||
111, 141,
|
111, 141,
|
||||||
// cbf_cb, cbf_cr
|
// cbf_cb, cbf_cr
|
||||||
94, 138, 182, 154,
|
94, 138, 182, 154, 154,
|
||||||
// transform_skip_flag
|
// transform_skip_flag
|
||||||
139, 139,
|
139, 139,
|
||||||
// explicit_rdpcm_flag
|
// explicit_rdpcm_flag
|
||||||
|
@ -266,7 +266,7 @@ static const uint8_t init_values[3][HEVC_CONTEXTS] = {
|
||||||
// cbf_luma
|
// cbf_luma
|
||||||
153, 111,
|
153, 111,
|
||||||
// cbf_cb, cbf_cr
|
// cbf_cb, cbf_cr
|
||||||
149, 107, 167, 154,
|
149, 107, 167, 154, 154,
|
||||||
// transform_skip_flag
|
// transform_skip_flag
|
||||||
139, 139,
|
139, 139,
|
||||||
// explicit_rdpcm_flag
|
// explicit_rdpcm_flag
|
||||||
|
@ -343,7 +343,7 @@ static const uint8_t init_values[3][HEVC_CONTEXTS] = {
|
||||||
// cbf_luma
|
// cbf_luma
|
||||||
153, 111,
|
153, 111,
|
||||||
// cbf_cb, cbf_cr
|
// cbf_cb, cbf_cr
|
||||||
149, 92, 167, 154,
|
149, 92, 167, 154, 154,
|
||||||
// transform_skip_flag
|
// transform_skip_flag
|
||||||
139, 139,
|
139, 139,
|
||||||
// explicit_rdpcm_flag
|
// explicit_rdpcm_flag
|
||||||
|
|
|
@ -76,8 +76,8 @@ static int decode_nal_sei_mastering_display_info(HEVCSEIMasteringDisplay *s, Get
|
||||||
static int decode_nal_sei_content_light_info(HEVCSEIContentLight *s, GetBitContext *gb)
|
static int decode_nal_sei_content_light_info(HEVCSEIContentLight *s, GetBitContext *gb)
|
||||||
{
|
{
|
||||||
// Max and average light levels
|
// Max and average light levels
|
||||||
s->max_content_light_level = get_bits_long(gb, 16);
|
s->max_content_light_level = get_bits(gb, 16);
|
||||||
s->max_pic_average_light_level = get_bits_long(gb, 16);
|
s->max_pic_average_light_level = get_bits(gb, 16);
|
||||||
// As this SEI message comes before the first frame that references it,
|
// As this SEI message comes before the first frame that references it,
|
||||||
// initialize the flag to 2 and decrement on IRAP access unit so it
|
// initialize the flag to 2 and decrement on IRAP access unit so it
|
||||||
// persists for the coded video sequence (e.g., between two IRAPs)
|
// persists for the coded video sequence (e.g., between two IRAPs)
|
||||||
|
@ -177,7 +177,8 @@ static int decode_registered_user_data_closed_caption(HEVCSEIA53Caption *s, GetB
|
||||||
size -= 2;
|
size -= 2;
|
||||||
|
|
||||||
if (cc_count && size >= cc_count * 3) {
|
if (cc_count && size >= cc_count * 3) {
|
||||||
const uint64_t new_size = (s->a53_caption_size + cc_count
|
int old_size = s->buf_ref ? s->buf_ref->size : 0;
|
||||||
|
const uint64_t new_size = (old_size + cc_count
|
||||||
* UINT64_C(3));
|
* UINT64_C(3));
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
|
@ -185,14 +186,14 @@ static int decode_registered_user_data_closed_caption(HEVCSEIA53Caption *s, GetB
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
|
|
||||||
/* Allow merging of the cc data from two fields. */
|
/* Allow merging of the cc data from two fields. */
|
||||||
ret = av_reallocp(&s->a53_caption, new_size);
|
ret = av_buffer_realloc(&s->buf_ref, new_size);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
for (i = 0; i < cc_count; i++) {
|
for (i = 0; i < cc_count; i++) {
|
||||||
s->a53_caption[s->a53_caption_size++] = get_bits(gb, 8);
|
s->buf_ref->data[old_size++] = get_bits(gb, 8);
|
||||||
s->a53_caption[s->a53_caption_size++] = get_bits(gb, 8);
|
s->buf_ref->data[old_size++] = get_bits(gb, 8);
|
||||||
s->a53_caption[s->a53_caption_size++] = get_bits(gb, 8);
|
s->buf_ref->data[old_size++] = get_bits(gb, 8);
|
||||||
}
|
}
|
||||||
skip_bits(gb, 8); // marker_bits
|
skip_bits(gb, 8); // marker_bits
|
||||||
}
|
}
|
||||||
|
@ -363,6 +364,5 @@ int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s,
|
||||||
|
|
||||||
void ff_hevc_reset_sei(HEVCSEI *s)
|
void ff_hevc_reset_sei(HEVCSEI *s)
|
||||||
{
|
{
|
||||||
s->a53_caption.a53_caption_size = 0;
|
av_buffer_unref(&s->a53_caption.buf_ref);
|
||||||
av_freep(&s->a53_caption.a53_caption);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,8 +83,7 @@ typedef struct HEVCSEIPictureTiming {
|
||||||
} HEVCSEIPictureTiming;
|
} HEVCSEIPictureTiming;
|
||||||
|
|
||||||
typedef struct HEVCSEIA53Caption {
|
typedef struct HEVCSEIA53Caption {
|
||||||
int a53_caption_size;
|
AVBufferRef *buf_ref;
|
||||||
uint8_t *a53_caption;
|
|
||||||
} HEVCSEIA53Caption;
|
} HEVCSEIA53Caption;
|
||||||
|
|
||||||
typedef struct HEVCSEIMasteringDisplay {
|
typedef struct HEVCSEIMasteringDisplay {
|
||||||
|
|
|
@ -2778,14 +2778,14 @@ static int set_side_data(HEVCContext *s)
|
||||||
metadata->MaxCLL, metadata->MaxFALL);
|
metadata->MaxCLL, metadata->MaxFALL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->sei.a53_caption.a53_caption) {
|
if (s->sei.a53_caption.buf_ref) {
|
||||||
AVFrameSideData* sd = av_frame_new_side_data(out,
|
HEVCSEIA53Caption *a53 = &s->sei.a53_caption;
|
||||||
AV_FRAME_DATA_A53_CC,
|
|
||||||
s->sei.a53_caption.a53_caption_size);
|
AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, a53->buf_ref);
|
||||||
if (sd)
|
if (!sd)
|
||||||
memcpy(sd->data, s->sei.a53_caption.a53_caption, s->sei.a53_caption.a53_caption_size);
|
av_buffer_unref(&a53->buf_ref);
|
||||||
av_freep(&s->sei.a53_caption.a53_caption);
|
a53->buf_ref = NULL;
|
||||||
s->sei.a53_caption.a53_caption_size = 0;
|
|
||||||
s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
|
s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3463,6 +3463,13 @@ static int hevc_update_thread_context(AVCodecContext *dst,
|
||||||
s->max_ra = INT_MAX;
|
s->max_ra = INT_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
av_buffer_unref(&s->sei.a53_caption.buf_ref);
|
||||||
|
if (s0->sei.a53_caption.buf_ref) {
|
||||||
|
s->sei.a53_caption.buf_ref = av_buffer_ref(s0->sei.a53_caption.buf_ref);
|
||||||
|
if (!s->sei.a53_caption.buf_ref)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
s->sei.frame_packing = s0->sei.frame_packing;
|
s->sei.frame_packing = s0->sei.frame_packing;
|
||||||
s->sei.display_orientation = s0->sei.display_orientation;
|
s->sei.display_orientation = s0->sei.display_orientation;
|
||||||
s->sei.mastering_display = s0->sei.mastering_display;
|
s->sei.mastering_display = s0->sei.mastering_display;
|
||||||
|
|
|
@ -322,6 +322,8 @@ static int extract_header(AVCodecContext *const avctx,
|
||||||
av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
|
av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
if (s->video_size && s->planesize * s->bpp * avctx->height > s->video_size)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
av_freep(&s->ham_buf);
|
av_freep(&s->ham_buf);
|
||||||
av_freep(&s->ham_palbuf);
|
av_freep(&s->ham_palbuf);
|
||||||
|
@ -1359,6 +1361,8 @@ static void decode_delta_d(uint8_t *dst,
|
||||||
bytestream2_seek_p(&pb, (offset / planepitch_byte) * pitch + (offset % planepitch_byte) + k * planepitch, SEEK_SET);
|
bytestream2_seek_p(&pb, (offset / planepitch_byte) * pitch + (offset % planepitch_byte) + k * planepitch, SEEK_SET);
|
||||||
if (opcode >= 0) {
|
if (opcode >= 0) {
|
||||||
uint32_t x = bytestream2_get_be32(&gb);
|
uint32_t x = bytestream2_get_be32(&gb);
|
||||||
|
if (opcode && 4 + (opcode - 1LL) * pitch > bytestream2_get_bytes_left_p(&pb))
|
||||||
|
continue;
|
||||||
while (opcode && bytestream2_get_bytes_left_p(&pb) > 0) {
|
while (opcode && bytestream2_get_bytes_left_p(&pb) > 0) {
|
||||||
bytestream2_put_be32(&pb, x);
|
bytestream2_put_be32(&pb, x);
|
||||||
bytestream2_skip_p(&pb, pitch - 4);
|
bytestream2_skip_p(&pb, pitch - 4);
|
||||||
|
|
|
@ -264,7 +264,7 @@ static int decode_gop_header(IVI45DecContext *ctx, AVCodecContext *avctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_bits1(&ctx->gb))
|
if (get_bits1(&ctx->gb))
|
||||||
skip_bits_long(&ctx->gb, 24); /* skip transparency fill color */
|
skip_bits(&ctx->gb, 24); /* skip transparency fill color */
|
||||||
}
|
}
|
||||||
|
|
||||||
align_get_bits(&ctx->gb);
|
align_get_bits(&ctx->gb);
|
||||||
|
@ -348,7 +348,7 @@ static int decode_pic_hdr(IVI45DecContext *ctx, AVCodecContext *avctx)
|
||||||
if (ctx->frame_type != FRAMETYPE_NULL) {
|
if (ctx->frame_type != FRAMETYPE_NULL) {
|
||||||
ctx->frame_flags = get_bits(&ctx->gb, 8);
|
ctx->frame_flags = get_bits(&ctx->gb, 8);
|
||||||
|
|
||||||
ctx->pic_hdr_size = (ctx->frame_flags & 1) ? get_bits_long(&ctx->gb, 24) : 0;
|
ctx->pic_hdr_size = (ctx->frame_flags & 1) ? get_bits(&ctx->gb, 24) : 0;
|
||||||
|
|
||||||
ctx->checksum = (ctx->frame_flags & 0x10) ? get_bits(&ctx->gb, 16) : 0;
|
ctx->checksum = (ctx->frame_flags & 0x10) ? get_bits(&ctx->gb, 16) : 0;
|
||||||
|
|
||||||
|
@ -392,7 +392,7 @@ static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
band->data_size = (ctx->frame_flags & 0x80) ? get_bits_long(&ctx->gb, 24) : 0;
|
band->data_size = (ctx->frame_flags & 0x80) ? get_bits(&ctx->gb, 24) : 0;
|
||||||
|
|
||||||
band->inherit_mv = band_flags & 2;
|
band->inherit_mv = band_flags & 2;
|
||||||
band->inherit_qdelta = band_flags & 8;
|
band->inherit_qdelta = band_flags & 8;
|
||||||
|
|
|
@ -33,7 +33,7 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* picture header */
|
/* picture header */
|
||||||
if (get_bits_long(&s->gb, 22) != 0x20) {
|
if (get_bits(&s->gb, 22) != 0x20) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
|
av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -392,6 +392,8 @@ AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx);
|
||||||
|
|
||||||
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type);
|
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type);
|
||||||
|
|
||||||
|
int ff_side_data_set_prft(AVPacket *pkt, int64_t timestamp);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check AVFrame for A53 side data and allocate and fill SEI message with A53 info
|
* Check AVFrame for A53 side data and allocate and fill SEI message with A53 info
|
||||||
*
|
*
|
||||||
|
|
|
@ -476,7 +476,7 @@ static int ivi_dec_tile_data_size(GetBitContext *gb)
|
||||||
if (get_bits1(gb)) {
|
if (get_bits1(gb)) {
|
||||||
len = get_bits(gb, 8);
|
len = get_bits(gb, 8);
|
||||||
if (len == 255)
|
if (len == 255)
|
||||||
len = get_bits_long(gb, 24);
|
len = get_bits(gb, 24);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* align the bitstream reader on the byte boundary */
|
/* align the bitstream reader on the byte boundary */
|
||||||
|
@ -1193,7 +1193,7 @@ int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
|
||||||
left = get_bits_count(&ctx->gb) & 0x18;
|
left = get_bits_count(&ctx->gb) & 0x18;
|
||||||
skip_bits_long(&ctx->gb, 64 - left);
|
skip_bits_long(&ctx->gb, 64 - left);
|
||||||
if (get_bits_left(&ctx->gb) > 18 &&
|
if (get_bits_left(&ctx->gb) > 18 &&
|
||||||
show_bits_long(&ctx->gb, 21) == 0xBFFF8) { // syncheader + inter type
|
show_bits(&ctx->gb, 21) == 0xBFFF8) { // syncheader + inter type
|
||||||
AVPacket pkt;
|
AVPacket pkt;
|
||||||
pkt.data = avpkt->data + (get_bits_count(&ctx->gb) >> 3);
|
pkt.data = avpkt->data + (get_bits_count(&ctx->gb) >> 3);
|
||||||
pkt.size = get_bits_left(&ctx->gb) >> 3;
|
pkt.size = get_bits_left(&ctx->gb) >> 3;
|
||||||
|
|
|
@ -110,8 +110,8 @@ static av_cold int libkvazaar_init(AVCodecContext *avctx)
|
||||||
entry->key, entry->value);
|
entry->key, entry->value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
av_dict_free(&dict);
|
|
||||||
}
|
}
|
||||||
|
av_dict_free(&dict);
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->encoder = enc = api->encoder_open(cfg);
|
ctx->encoder = enc = api->encoder_open(cfg);
|
||||||
|
|
|
@ -42,7 +42,7 @@ typedef struct librav1eContext {
|
||||||
size_t pass_pos;
|
size_t pass_pos;
|
||||||
int pass_size;
|
int pass_size;
|
||||||
|
|
||||||
char *rav1e_opts;
|
AVDictionary *rav1e_opts;
|
||||||
int quantizer;
|
int quantizer;
|
||||||
int speed;
|
int speed;
|
||||||
int tiles;
|
int tiles;
|
||||||
|
@ -244,17 +244,12 @@ static av_cold int librav1e_encode_init(AVCodecContext *avctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx->rav1e_opts) {
|
{
|
||||||
AVDictionary *dict = NULL;
|
|
||||||
AVDictionaryEntry *en = NULL;
|
AVDictionaryEntry *en = NULL;
|
||||||
|
while ((en = av_dict_get(ctx->rav1e_opts, "", en, AV_DICT_IGNORE_SUFFIX))) {
|
||||||
if (!av_dict_parse_string(&dict, ctx->rav1e_opts, "=", ":", 0)) {
|
int parse_ret = rav1e_config_parse(cfg, en->key, en->value);
|
||||||
while (en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX)) {
|
if (parse_ret < 0)
|
||||||
int parse_ret = rav1e_config_parse(cfg, en->key, en->value);
|
av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value);
|
||||||
if (parse_ret < 0)
|
|
||||||
av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value);
|
|
||||||
}
|
|
||||||
av_dict_free(&dict);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -538,7 +533,7 @@ static const AVOption options[] = {
|
||||||
{ "tiles", "number of tiles encode with", OFFSET(tiles), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
|
{ "tiles", "number of tiles encode with", OFFSET(tiles), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
|
||||||
{ "tile-rows", "number of tiles rows to encode with", OFFSET(tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
|
{ "tile-rows", "number of tiles rows to encode with", OFFSET(tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
|
||||||
{ "tile-columns", "number of tiles columns to encode with", OFFSET(tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
|
{ "tile-columns", "number of tiles columns to encode with", OFFSET(tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
|
||||||
{ "rav1e-params", "set the rav1e configuration using a :-separated list of key=value parameters", OFFSET(rav1e_opts), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
|
{ "rav1e-params", "set the rav1e configuration using a :-separated list of key=value parameters", OFFSET(rav1e_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
|
||||||
{ NULL }
|
{ NULL }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -100,7 +100,7 @@ typedef struct VPxEncoderContext {
|
||||||
int rc_undershoot_pct;
|
int rc_undershoot_pct;
|
||||||
int rc_overshoot_pct;
|
int rc_overshoot_pct;
|
||||||
|
|
||||||
char *vp8_ts_parameters;
|
AVDictionary *vp8_ts_parameters;
|
||||||
|
|
||||||
// VP9-only
|
// VP9-only
|
||||||
int lossless;
|
int lossless;
|
||||||
|
@ -757,19 +757,13 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||||
|
|
||||||
enccfg.g_error_resilient = ctx->error_resilient || ctx->flags & VP8F_ERROR_RESILIENT;
|
enccfg.g_error_resilient = ctx->error_resilient || ctx->flags & VP8F_ERROR_RESILIENT;
|
||||||
|
|
||||||
if (CONFIG_LIBVPX_VP8_ENCODER && avctx->codec_id == AV_CODEC_ID_VP8 && ctx->vp8_ts_parameters) {
|
if (CONFIG_LIBVPX_VP8_ENCODER && avctx->codec_id == AV_CODEC_ID_VP8) {
|
||||||
AVDictionary *dict = NULL;
|
|
||||||
AVDictionaryEntry* en = NULL;
|
AVDictionaryEntry* en = NULL;
|
||||||
|
while ((en = av_dict_get(ctx->vp8_ts_parameters, "", en, AV_DICT_IGNORE_SUFFIX))) {
|
||||||
if (!av_dict_parse_string(&dict, ctx->vp8_ts_parameters, "=", ":", 0)) {
|
if (vp8_ts_param_parse(&enccfg, en->key, en->value) < 0)
|
||||||
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
|
av_log(avctx, AV_LOG_WARNING,
|
||||||
if (vp8_ts_param_parse(&enccfg, en->key, en->value) < 0)
|
"Error parsing option '%s = %s'.\n",
|
||||||
av_log(avctx, AV_LOG_WARNING,
|
en->key, en->value);
|
||||||
"Error parsing option '%s = %s'.\n",
|
|
||||||
en->key, en->value);
|
|
||||||
}
|
|
||||||
|
|
||||||
av_dict_free(&dict);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1047,8 +1041,7 @@ static int queue_frames(AVCodecContext *avctx, AVPacket *pkt_out)
|
||||||
if (size < 0)
|
if (size < 0)
|
||||||
return size;
|
return size;
|
||||||
} else {
|
} else {
|
||||||
struct FrameListData *cx_frame =
|
struct FrameListData *cx_frame = av_malloc(sizeof(*cx_frame));
|
||||||
av_malloc(sizeof(struct FrameListData));
|
|
||||||
|
|
||||||
if (!cx_frame) {
|
if (!cx_frame) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
@ -1462,7 +1455,7 @@ static const AVOption vp8_options[] = {
|
||||||
"frames (2-pass only)", OFFSET(auto_alt_ref), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 2, VE},
|
"frames (2-pass only)", OFFSET(auto_alt_ref), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 2, VE},
|
||||||
{ "cpu-used", "Quality/Speed ratio modifier", OFFSET(cpu_used), AV_OPT_TYPE_INT, {.i64 = 1}, -16, 16, VE},
|
{ "cpu-used", "Quality/Speed ratio modifier", OFFSET(cpu_used), AV_OPT_TYPE_INT, {.i64 = 1}, -16, 16, VE},
|
||||||
{ "ts-parameters", "Temporal scaling configuration using a "
|
{ "ts-parameters", "Temporal scaling configuration using a "
|
||||||
":-separated list of key=value parameters", OFFSET(vp8_ts_parameters), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, VE},
|
":-separated list of key=value parameters", OFFSET(vp8_ts_parameters), AV_OPT_TYPE_DICT, {.str=NULL}, 0, 0, VE},
|
||||||
LEGACY_OPTIONS
|
LEGACY_OPTIONS
|
||||||
{ NULL }
|
{ NULL }
|
||||||
};
|
};
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "libavutil/mem.h"
|
#include "libavutil/mem.h"
|
||||||
#include "libavutil/pixdesc.h"
|
#include "libavutil/pixdesc.h"
|
||||||
#include "libavutil/stereo3d.h"
|
#include "libavutil/stereo3d.h"
|
||||||
|
#include "libavutil/time.h"
|
||||||
#include "libavutil/intreadwrite.h"
|
#include "libavutil/intreadwrite.h"
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
@ -44,6 +45,11 @@
|
||||||
// blocks of pixels (with respect to the luma plane)
|
// blocks of pixels (with respect to the luma plane)
|
||||||
#define MB_SIZE 16
|
#define MB_SIZE 16
|
||||||
|
|
||||||
|
typedef struct X264Opaque {
|
||||||
|
int64_t reordered_opaque;
|
||||||
|
int64_t wallclock;
|
||||||
|
} X264Opaque;
|
||||||
|
|
||||||
typedef struct X264Context {
|
typedef struct X264Context {
|
||||||
AVClass *class;
|
AVClass *class;
|
||||||
x264_param_t params;
|
x264_param_t params;
|
||||||
|
@ -95,10 +101,10 @@ typedef struct X264Context {
|
||||||
int scenechange_threshold;
|
int scenechange_threshold;
|
||||||
int noise_reduction;
|
int noise_reduction;
|
||||||
|
|
||||||
char *x264_params;
|
AVDictionary *x264_params;
|
||||||
|
|
||||||
int nb_reordered_opaque, next_reordered_opaque;
|
int nb_reordered_opaque, next_reordered_opaque;
|
||||||
int64_t *reordered_opaque;
|
X264Opaque *reordered_opaque;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the encoder does not support ROI then warn the first time we
|
* If the encoder does not support ROI then warn the first time we
|
||||||
|
@ -292,7 +298,8 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
|
||||||
x264_picture_t pic_out = {0};
|
x264_picture_t pic_out = {0};
|
||||||
int pict_type;
|
int pict_type;
|
||||||
int bit_depth;
|
int bit_depth;
|
||||||
int64_t *out_opaque;
|
int64_t wallclock = 0;
|
||||||
|
X264Opaque *out_opaque;
|
||||||
AVFrameSideData *sd;
|
AVFrameSideData *sd;
|
||||||
|
|
||||||
x264_picture_init( &x4->pic );
|
x264_picture_init( &x4->pic );
|
||||||
|
@ -314,7 +321,8 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
|
||||||
|
|
||||||
x4->pic.i_pts = frame->pts;
|
x4->pic.i_pts = frame->pts;
|
||||||
|
|
||||||
x4->reordered_opaque[x4->next_reordered_opaque] = frame->reordered_opaque;
|
x4->reordered_opaque[x4->next_reordered_opaque].reordered_opaque = frame->reordered_opaque;
|
||||||
|
x4->reordered_opaque[x4->next_reordered_opaque].wallclock = av_gettime();
|
||||||
x4->pic.opaque = &x4->reordered_opaque[x4->next_reordered_opaque];
|
x4->pic.opaque = &x4->reordered_opaque[x4->next_reordered_opaque];
|
||||||
x4->next_reordered_opaque++;
|
x4->next_reordered_opaque++;
|
||||||
x4->next_reordered_opaque %= x4->nb_reordered_opaque;
|
x4->next_reordered_opaque %= x4->nb_reordered_opaque;
|
||||||
|
@ -443,7 +451,8 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
|
||||||
out_opaque = pic_out.opaque;
|
out_opaque = pic_out.opaque;
|
||||||
if (out_opaque >= x4->reordered_opaque &&
|
if (out_opaque >= x4->reordered_opaque &&
|
||||||
out_opaque < &x4->reordered_opaque[x4->nb_reordered_opaque]) {
|
out_opaque < &x4->reordered_opaque[x4->nb_reordered_opaque]) {
|
||||||
ctx->reordered_opaque = *out_opaque;
|
ctx->reordered_opaque = out_opaque->reordered_opaque;
|
||||||
|
wallclock = out_opaque->wallclock;
|
||||||
} else {
|
} else {
|
||||||
// Unexpected opaque pointer on picture output
|
// Unexpected opaque pointer on picture output
|
||||||
ctx->reordered_opaque = 0;
|
ctx->reordered_opaque = 0;
|
||||||
|
@ -473,6 +482,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||||
pkt->flags |= AV_PKT_FLAG_KEY*pic_out.b_keyframe;
|
pkt->flags |= AV_PKT_FLAG_KEY*pic_out.b_keyframe;
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ff_side_data_set_encoder_stats(pkt, (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA, NULL, 0, pict_type);
|
ff_side_data_set_encoder_stats(pkt, (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA, NULL, 0, pict_type);
|
||||||
|
if (wallclock)
|
||||||
|
ff_side_data_set_prft(pkt, wallclock);
|
||||||
|
|
||||||
#if FF_API_CODED_FRAME
|
#if FF_API_CODED_FRAME
|
||||||
FF_DISABLE_DEPRECATION_WARNINGS
|
FF_DISABLE_DEPRECATION_WARNINGS
|
||||||
|
@ -892,19 +903,14 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (x4->x264_params) {
|
|
||||||
AVDictionary *dict = NULL;
|
{
|
||||||
AVDictionaryEntry *en = NULL;
|
AVDictionaryEntry *en = NULL;
|
||||||
|
while (en = av_dict_get(x4->x264_params, "", en, AV_DICT_IGNORE_SUFFIX)) {
|
||||||
if (!av_dict_parse_string(&dict, x4->x264_params, "=", ":", 0)) {
|
if (x264_param_parse(&x4->params, en->key, en->value) < 0)
|
||||||
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
|
av_log(avctx, AV_LOG_WARNING,
|
||||||
if (x264_param_parse(&x4->params, en->key, en->value) < 0)
|
"Error parsing option '%s = %s'.\n",
|
||||||
av_log(avctx, AV_LOG_WARNING,
|
en->key, en->value);
|
||||||
"Error parsing option '%s = %s'.\n",
|
|
||||||
en->key, en->value);
|
|
||||||
}
|
|
||||||
|
|
||||||
av_dict_free(&dict);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1116,7 +1122,7 @@ static const AVOption options[] = {
|
||||||
{ "sc_threshold", "Scene change threshold", OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX, VE },
|
{ "sc_threshold", "Scene change threshold", OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX, VE },
|
||||||
{ "noise_reduction", "Noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX, VE },
|
{ "noise_reduction", "Noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX, VE },
|
||||||
|
|
||||||
{ "x264-params", "Override the x264 configuration using a :-separated list of key=value parameters", OFFSET(x264_params), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
|
{ "x264-params", "Override the x264 configuration using a :-separated list of key=value parameters", OFFSET(x264_params), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
|
||||||
{ NULL },
|
{ NULL },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -42,11 +42,12 @@ typedef struct libx265Context {
|
||||||
const x265_api *api;
|
const x265_api *api;
|
||||||
|
|
||||||
float crf;
|
float crf;
|
||||||
|
int cqp;
|
||||||
int forced_idr;
|
int forced_idr;
|
||||||
char *preset;
|
char *preset;
|
||||||
char *tune;
|
char *tune;
|
||||||
char *profile;
|
char *profile;
|
||||||
char *x265_opts;
|
AVDictionary *x265_opts;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the encoder does not support ROI then warn the first time we
|
* If the encoder does not support ROI then warn the first time we
|
||||||
|
@ -82,10 +83,41 @@ static av_cold int libx265_encode_close(AVCodecContext *avctx)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static av_cold int libx265_param_parse_float(AVCodecContext *avctx,
|
||||||
|
const char *key, float value)
|
||||||
|
{
|
||||||
|
libx265Context *ctx = avctx->priv_data;
|
||||||
|
char buf[256];
|
||||||
|
|
||||||
|
snprintf(buf, sizeof(buf), "%2.2f", value);
|
||||||
|
if (ctx->api->param_parse(ctx->params, key, buf) == X265_PARAM_BAD_VALUE) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Invalid value %2.2f for param \"%s\".\n", value, key);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static av_cold int libx265_param_parse_int(AVCodecContext *avctx,
|
||||||
|
const char *key, int value)
|
||||||
|
{
|
||||||
|
libx265Context *ctx = avctx->priv_data;
|
||||||
|
char buf[256];
|
||||||
|
|
||||||
|
snprintf(buf, sizeof(buf), "%d", value);
|
||||||
|
if (ctx->api->param_parse(ctx->params, key, buf) == X265_PARAM_BAD_VALUE) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Invalid value %d for param \"%s\".\n", value, key);
|
||||||
|
return AVERROR(EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static av_cold int libx265_encode_init(AVCodecContext *avctx)
|
static av_cold int libx265_encode_init(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
libx265Context *ctx = avctx->priv_data;
|
libx265Context *ctx = avctx->priv_data;
|
||||||
AVCPBProperties *cpb_props = NULL;
|
AVCPBProperties *cpb_props = NULL;
|
||||||
|
int ret;
|
||||||
|
|
||||||
ctx->api = x265_api_get(av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth);
|
ctx->api = x265_api_get(av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth);
|
||||||
if (!ctx->api)
|
if (!ctx->api)
|
||||||
|
@ -159,6 +191,10 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx)
|
||||||
// x265 validates the parameters internally
|
// x265 validates the parameters internally
|
||||||
ctx->params->vui.colorPrimaries = avctx->color_primaries;
|
ctx->params->vui.colorPrimaries = avctx->color_primaries;
|
||||||
ctx->params->vui.transferCharacteristics = avctx->color_trc;
|
ctx->params->vui.transferCharacteristics = avctx->color_trc;
|
||||||
|
#if X265_BUILD >= 159
|
||||||
|
if (avctx->color_trc == AVCOL_TRC_ARIB_STD_B67)
|
||||||
|
ctx->params->preferredTransferCharacteristics = ctx->params->vui.transferCharacteristics;
|
||||||
|
#endif
|
||||||
ctx->params->vui.matrixCoeffs = avctx->colorspace;
|
ctx->params->vui.matrixCoeffs = avctx->colorspace;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -222,6 +258,48 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx)
|
||||||
} else if (avctx->bit_rate > 0) {
|
} else if (avctx->bit_rate > 0) {
|
||||||
ctx->params->rc.bitrate = avctx->bit_rate / 1000;
|
ctx->params->rc.bitrate = avctx->bit_rate / 1000;
|
||||||
ctx->params->rc.rateControlMode = X265_RC_ABR;
|
ctx->params->rc.rateControlMode = X265_RC_ABR;
|
||||||
|
} else if (ctx->cqp >= 0) {
|
||||||
|
ret = libx265_param_parse_int(avctx, "qp", ctx->cqp);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if X265_BUILD >= 89
|
||||||
|
if (avctx->qmin >= 0) {
|
||||||
|
ret = libx265_param_parse_int(avctx, "qpmin", avctx->qmin);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if (avctx->qmax >= 0) {
|
||||||
|
ret = libx265_param_parse_int(avctx, "qpmax", avctx->qmax);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
if (avctx->max_qdiff >= 0) {
|
||||||
|
ret = libx265_param_parse_int(avctx, "qpstep", avctx->max_qdiff);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if (avctx->qblur >= 0) {
|
||||||
|
ret = libx265_param_parse_float(avctx, "qblur", avctx->qblur);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if (avctx->qcompress >= 0) {
|
||||||
|
ret = libx265_param_parse_float(avctx, "qcomp", avctx->qcompress);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if (avctx->i_quant_factor >= 0) {
|
||||||
|
ret = libx265_param_parse_float(avctx, "ipratio", avctx->i_quant_factor);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if (avctx->b_quant_factor >= 0) {
|
||||||
|
ret = libx265_param_parse_float(avctx, "pbratio", avctx->b_quant_factor);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx->params->rc.vbvBufferSize = avctx->rc_buffer_size / 1000;
|
ctx->params->rc.vbvBufferSize = avctx->rc_buffer_size / 1000;
|
||||||
|
@ -237,28 +315,44 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx)
|
||||||
if (!(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER))
|
if (!(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER))
|
||||||
ctx->params->bRepeatHeaders = 1;
|
ctx->params->bRepeatHeaders = 1;
|
||||||
|
|
||||||
if (ctx->x265_opts) {
|
if (avctx->gop_size >= 0) {
|
||||||
AVDictionary *dict = NULL;
|
ret = libx265_param_parse_int(avctx, "keyint", avctx->gop_size);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if (avctx->keyint_min > 0) {
|
||||||
|
ret = libx265_param_parse_int(avctx, "min-keyint", avctx->keyint_min);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if (avctx->max_b_frames >= 0) {
|
||||||
|
ret = libx265_param_parse_int(avctx, "bframes", avctx->max_b_frames);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
if (avctx->refs >= 0) {
|
||||||
|
ret = libx265_param_parse_int(avctx, "ref", avctx->refs);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
AVDictionaryEntry *en = NULL;
|
AVDictionaryEntry *en = NULL;
|
||||||
|
while ((en = av_dict_get(ctx->x265_opts, "", en, AV_DICT_IGNORE_SUFFIX))) {
|
||||||
|
int parse_ret = ctx->api->param_parse(ctx->params, en->key, en->value);
|
||||||
|
|
||||||
if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) {
|
switch (parse_ret) {
|
||||||
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
|
case X265_PARAM_BAD_NAME:
|
||||||
int parse_ret = ctx->api->param_parse(ctx->params, en->key, en->value);
|
av_log(avctx, AV_LOG_WARNING,
|
||||||
|
"Unknown option: %s.\n", en->key);
|
||||||
switch (parse_ret) {
|
break;
|
||||||
case X265_PARAM_BAD_NAME:
|
case X265_PARAM_BAD_VALUE:
|
||||||
av_log(avctx, AV_LOG_WARNING,
|
av_log(avctx, AV_LOG_WARNING,
|
||||||
"Unknown option: %s.\n", en->key);
|
"Invalid value for %s: %s.\n", en->key, en->value);
|
||||||
break;
|
break;
|
||||||
case X265_PARAM_BAD_VALUE:
|
default:
|
||||||
av_log(avctx, AV_LOG_WARNING,
|
break;
|
||||||
"Invalid value for %s: %s.\n", en->key, en->value);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
av_dict_free(&dict);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -383,6 +477,7 @@ static int libx265_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||||
x265_picture x265pic_out = { 0 };
|
x265_picture x265pic_out = { 0 };
|
||||||
x265_nal *nal;
|
x265_nal *nal;
|
||||||
uint8_t *dst;
|
uint8_t *dst;
|
||||||
|
int pict_type;
|
||||||
int payload = 0;
|
int payload = 0;
|
||||||
int nnal;
|
int nnal;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -442,20 +537,23 @@ static int libx265_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||||
pkt->pts = x265pic_out.pts;
|
pkt->pts = x265pic_out.pts;
|
||||||
pkt->dts = x265pic_out.dts;
|
pkt->dts = x265pic_out.dts;
|
||||||
|
|
||||||
#if FF_API_CODED_FRAME
|
|
||||||
FF_DISABLE_DEPRECATION_WARNINGS
|
|
||||||
switch (x265pic_out.sliceType) {
|
switch (x265pic_out.sliceType) {
|
||||||
case X265_TYPE_IDR:
|
case X265_TYPE_IDR:
|
||||||
case X265_TYPE_I:
|
case X265_TYPE_I:
|
||||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
pict_type = AV_PICTURE_TYPE_I;
|
||||||
break;
|
break;
|
||||||
case X265_TYPE_P:
|
case X265_TYPE_P:
|
||||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
|
pict_type = AV_PICTURE_TYPE_P;
|
||||||
break;
|
break;
|
||||||
case X265_TYPE_B:
|
case X265_TYPE_B:
|
||||||
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;
|
case X265_TYPE_BREF:
|
||||||
|
pict_type = AV_PICTURE_TYPE_B;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if FF_API_CODED_FRAME
|
||||||
|
FF_DISABLE_DEPRECATION_WARNINGS
|
||||||
|
avctx->coded_frame->pict_type = pict_type;
|
||||||
FF_ENABLE_DEPRECATION_WARNINGS
|
FF_ENABLE_DEPRECATION_WARNINGS
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -466,6 +564,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||||
#endif
|
#endif
|
||||||
pkt->flags |= AV_PKT_FLAG_DISPOSABLE;
|
pkt->flags |= AV_PKT_FLAG_DISPOSABLE;
|
||||||
|
|
||||||
|
ff_side_data_set_encoder_stats(pkt, x265pic_out.frameData.qp * FF_QP2LAMBDA, NULL, 0, pict_type);
|
||||||
|
|
||||||
*got_packet = 1;
|
*got_packet = 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -535,11 +635,12 @@ static av_cold void libx265_encode_init_csp(AVCodec *codec)
|
||||||
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
|
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
|
||||||
static const AVOption options[] = {
|
static const AVOption options[] = {
|
||||||
{ "crf", "set the x265 crf", OFFSET(crf), AV_OPT_TYPE_FLOAT, { .dbl = -1 }, -1, FLT_MAX, VE },
|
{ "crf", "set the x265 crf", OFFSET(crf), AV_OPT_TYPE_FLOAT, { .dbl = -1 }, -1, FLT_MAX, VE },
|
||||||
|
{ "qp", "set the x265 qp", OFFSET(cqp), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, VE },
|
||||||
{ "forced-idr", "if forcing keyframes, force them as IDR frames", OFFSET(forced_idr),AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
|
{ "forced-idr", "if forcing keyframes, force them as IDR frames", OFFSET(forced_idr),AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
|
||||||
{ "preset", "set the x265 preset", OFFSET(preset), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
|
{ "preset", "set the x265 preset", OFFSET(preset), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
|
||||||
{ "tune", "set the x265 tune parameter", OFFSET(tune), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
|
{ "tune", "set the x265 tune parameter", OFFSET(tune), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
|
||||||
{ "profile", "set the x265 profile", OFFSET(profile), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
|
{ "profile", "set the x265 profile", OFFSET(profile), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
|
||||||
{ "x265-params", "set the x265 configuration using a :-separated list of key=value parameters", OFFSET(x265_opts), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
|
{ "x265-params", "set the x265 configuration using a :-separated list of key=value parameters", OFFSET(x265_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
|
||||||
{ NULL }
|
{ NULL }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -552,6 +653,17 @@ static const AVClass class = {
|
||||||
|
|
||||||
static const AVCodecDefault x265_defaults[] = {
|
static const AVCodecDefault x265_defaults[] = {
|
||||||
{ "b", "0" },
|
{ "b", "0" },
|
||||||
|
{ "bf", "-1" },
|
||||||
|
{ "g", "-1" },
|
||||||
|
{ "keyint_min", "-1" },
|
||||||
|
{ "refs", "-1" },
|
||||||
|
{ "qmin", "-1" },
|
||||||
|
{ "qmax", "-1" },
|
||||||
|
{ "qdiff", "-1" },
|
||||||
|
{ "qblur", "-1" },
|
||||||
|
{ "qcomp", "-1" },
|
||||||
|
{ "i_qfactor", "-1" },
|
||||||
|
{ "b_qfactor", "-1" },
|
||||||
{ NULL },
|
{ NULL },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@ typedef struct XAVS2EContext {
|
||||||
int log_level;
|
int log_level;
|
||||||
|
|
||||||
void *encoder;
|
void *encoder;
|
||||||
char *xavs2_opts;
|
AVDictionary *xavs2_opts;
|
||||||
|
|
||||||
xavs2_outpacket_t packet;
|
xavs2_outpacket_t packet;
|
||||||
xavs2_param_t *param;
|
xavs2_param_t *param;
|
||||||
|
@ -92,16 +92,10 @@ static av_cold int xavs2_init(AVCodecContext *avctx)
|
||||||
|
|
||||||
xavs2_opt_set2("OpenGOP", "%d", !(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP));
|
xavs2_opt_set2("OpenGOP", "%d", !(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP));
|
||||||
|
|
||||||
if (cae->xavs2_opts) {
|
{
|
||||||
AVDictionary *dict = NULL;
|
|
||||||
AVDictionaryEntry *en = NULL;
|
AVDictionaryEntry *en = NULL;
|
||||||
|
while ((en = av_dict_get(cae->xavs2_opts, "", en, AV_DICT_IGNORE_SUFFIX)))
|
||||||
if (!av_dict_parse_string(&dict, cae->xavs2_opts, "=", ":", 0)) {
|
xavs2_opt_set2(en->key, "%s", en->value);
|
||||||
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
|
|
||||||
xavs2_opt_set2(en->key, "%s", en->value);
|
|
||||||
}
|
|
||||||
av_dict_free(&dict);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Rate control */
|
/* Rate control */
|
||||||
|
@ -267,7 +261,7 @@ static const AVOption options[] = {
|
||||||
{ "min_qp" , "min qp for rate control" , OFFSET(min_qp) , AV_OPT_TYPE_INT, {.i64 = 20 }, 0, 63, VE },
|
{ "min_qp" , "min qp for rate control" , OFFSET(min_qp) , AV_OPT_TYPE_INT, {.i64 = 20 }, 0, 63, VE },
|
||||||
{ "speed_level" , "Speed level, higher is better but slower", OFFSET(preset_level) , AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 9, VE },
|
{ "speed_level" , "Speed level, higher is better but slower", OFFSET(preset_level) , AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 9, VE },
|
||||||
{ "log_level" , "log level: -1: none, 0: error, 1: warning, 2: info, 3: debug", OFFSET(log_level) , AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 3, VE },
|
{ "log_level" , "log level: -1: none, 0: error, 1: warning, 2: info, 3: debug", OFFSET(log_level) , AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 3, VE },
|
||||||
{ "xavs2-params" , "set the xavs2 configuration using a :-separated list of key=value parameters", OFFSET(xavs2_opts), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
|
{ "xavs2-params" , "set the xavs2 configuration using a :-separated list of key=value parameters", OFFSET(xavs2_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
|
||||||
{ NULL },
|
{ NULL },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ static int decode_mvdv(MidiVidContext *s, AVCodecContext *avctx, AVFrame *frame)
|
||||||
if (intra_flag) {
|
if (intra_flag) {
|
||||||
nb_blocks = (avctx->width / 2) * (avctx->height / 2);
|
nb_blocks = (avctx->width / 2) * (avctx->height / 2);
|
||||||
} else {
|
} else {
|
||||||
int skip_linesize;
|
int ret, skip_linesize;
|
||||||
|
|
||||||
nb_blocks = bytestream2_get_le32(gb);
|
nb_blocks = bytestream2_get_le32(gb);
|
||||||
skip_linesize = avctx->width >> 1;
|
skip_linesize = avctx->width >> 1;
|
||||||
|
@ -73,7 +73,9 @@ static int decode_mvdv(MidiVidContext *s, AVCodecContext *avctx, AVFrame *frame)
|
||||||
if (bytestream2_get_bytes_left(gb) < mask_size)
|
if (bytestream2_get_bytes_left(gb) < mask_size)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
init_get_bits8(&mask, mask_start, mask_size);
|
ret = init_get_bits8(&mask, mask_start, mask_size);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
bytestream2_skip(gb, mask_size);
|
bytestream2_skip(gb, mask_size);
|
||||||
skip = s->skip;
|
skip = s->skip;
|
||||||
|
|
||||||
|
|
|
@ -73,10 +73,7 @@ static av_cold void h264_pred_init_msa(H264PredContext *h, int codec_id,
|
||||||
|
|
||||||
switch (codec_id) {
|
switch (codec_id) {
|
||||||
case AV_CODEC_ID_SVQ3:
|
case AV_CODEC_ID_SVQ3:
|
||||||
;
|
|
||||||
break;
|
|
||||||
case AV_CODEC_ID_RV40:
|
case AV_CODEC_ID_RV40:
|
||||||
;
|
|
||||||
break;
|
break;
|
||||||
case AV_CODEC_ID_VP7:
|
case AV_CODEC_ID_VP7:
|
||||||
case AV_CODEC_ID_VP8:
|
case AV_CODEC_ID_VP8:
|
||||||
|
|
|
@ -2049,7 +2049,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
|
||||||
unsigned nummarkers;
|
unsigned nummarkers;
|
||||||
|
|
||||||
id = get_bits_long(&s->gb, 32);
|
id = get_bits_long(&s->gb, 32);
|
||||||
id2 = get_bits_long(&s->gb, 24);
|
id2 = get_bits(&s->gb, 24);
|
||||||
len -= 7;
|
len -= 7;
|
||||||
if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
|
if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
|
||||||
av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
|
av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
|
||||||
|
|
|
@ -102,7 +102,7 @@ int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_bits_long(gb, 24) != 0xf8726f) /* Sync words */
|
if (get_bits(gb, 24) != 0xf8726f) /* Sync words */
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
mh->stream_type = get_bits(gb, 8);
|
mh->stream_type = get_bits(gb, 8);
|
||||||
|
|
|
@ -61,6 +61,8 @@ static int mlp_parse(AVCodecParserContext *s,
|
||||||
int ret;
|
int ret;
|
||||||
int i, p = 0;
|
int i, p = 0;
|
||||||
|
|
||||||
|
s->key_frame = 0;
|
||||||
|
|
||||||
*poutbuf_size = 0;
|
*poutbuf_size = 0;
|
||||||
if (buf_size == 0)
|
if (buf_size == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -136,6 +138,8 @@ static int mlp_parse(AVCodecParserContext *s,
|
||||||
* access unit header and all the 2- or 4-byte substream headers. */
|
* access unit header and all the 2- or 4-byte substream headers. */
|
||||||
// Only check when this isn't a sync frame - syncs have a checksum.
|
// Only check when this isn't a sync frame - syncs have a checksum.
|
||||||
|
|
||||||
|
s->key_frame = 0;
|
||||||
|
|
||||||
parity_bits = 0;
|
parity_bits = 0;
|
||||||
for (i = -1; i < mp->num_substreams; i++) {
|
for (i = -1; i < mp->num_substreams; i++) {
|
||||||
parity_bits ^= buf[p++];
|
parity_bits ^= buf[p++];
|
||||||
|
@ -159,6 +163,8 @@ static int mlp_parse(AVCodecParserContext *s,
|
||||||
if (ff_mlp_read_major_sync(avctx, &mh, &gb) < 0)
|
if (ff_mlp_read_major_sync(avctx, &mh, &gb) < 0)
|
||||||
goto lost_sync;
|
goto lost_sync;
|
||||||
|
|
||||||
|
s->key_frame = 1;
|
||||||
|
|
||||||
avctx->bits_per_raw_sample = mh.group1_bits;
|
avctx->bits_per_raw_sample = mh.group1_bits;
|
||||||
if (avctx->bits_per_raw_sample > 16)
|
if (avctx->bits_per_raw_sample > 16)
|
||||||
avctx->sample_fmt = AV_SAMPLE_FMT_S32;
|
avctx->sample_fmt = AV_SAMPLE_FMT_S32;
|
||||||
|
|
|
@ -62,6 +62,11 @@ static int mp3_header_decompress(AVBSFContext *ctx, AVPacket *out)
|
||||||
lsf = sample_rate < (24000+32000)/2;
|
lsf = sample_rate < (24000+32000)/2;
|
||||||
mpeg25 = sample_rate < (12000+16000)/2;
|
mpeg25 = sample_rate < (12000+16000)/2;
|
||||||
sample_rate_index= (header>>10)&3;
|
sample_rate_index= (header>>10)&3;
|
||||||
|
if (sample_rate_index == 3) {
|
||||||
|
ret = AVERROR_INVALIDDATA;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
sample_rate= avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25); //in case sample rate is a little off
|
sample_rate= avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25); //in case sample rate is a little off
|
||||||
|
|
||||||
for(bitrate_index=2; bitrate_index<30; bitrate_index++){
|
for(bitrate_index=2; bitrate_index<30; bitrate_index++){
|
||||||
|
|
|
@ -1669,8 +1669,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->avctx->hwaccel &&
|
if (s->avctx->hwaccel) {
|
||||||
(s->avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD)) {
|
|
||||||
if ((ret = s->avctx->hwaccel->end_frame(s->avctx)) < 0) {
|
if ((ret = s->avctx->hwaccel->end_frame(s->avctx)) < 0) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"hardware accelerator failed to decode first field\n");
|
"hardware accelerator failed to decode first field\n");
|
||||||
|
|
|
@ -118,8 +118,8 @@ int ff_mpeg4audio_get_config_gb(MPEG4AudioConfig *c, GetBitContext *gb,
|
||||||
|
|
||||||
if (c->object_type == AOT_ALS) {
|
if (c->object_type == AOT_ALS) {
|
||||||
skip_bits(gb, 5);
|
skip_bits(gb, 5);
|
||||||
if (show_bits_long(gb, 24) != MKBETAG('\0','A','L','S'))
|
if (show_bits(gb, 24) != MKBETAG('\0','A','L','S'))
|
||||||
skip_bits_long(gb, 24);
|
skip_bits(gb, 24);
|
||||||
|
|
||||||
specific_config_bitindex = get_bits_count(gb);
|
specific_config_bitindex = get_bits_count(gb);
|
||||||
|
|
||||||
|
|
|
@ -711,7 +711,7 @@ static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (show_bits_long(&s->gb, 19) == DC_MARKER)
|
if (show_bits(&s->gb, 19) == DC_MARKER)
|
||||||
return mb_num - 1;
|
return mb_num - 1;
|
||||||
|
|
||||||
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
|
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
|
||||||
|
@ -1001,7 +1001,7 @@ int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx)
|
||||||
if (s->pict_type == AV_PICTURE_TYPE_I) {
|
if (s->pict_type == AV_PICTURE_TYPE_I) {
|
||||||
while (show_bits(&s->gb, 9) == 1)
|
while (show_bits(&s->gb, 9) == 1)
|
||||||
skip_bits(&s->gb, 9);
|
skip_bits(&s->gb, 9);
|
||||||
if (get_bits_long(&s->gb, 19) != DC_MARKER) {
|
if (get_bits(&s->gb, 19) != DC_MARKER) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
"marker missing after first I partition at %d %d\n",
|
"marker missing after first I partition at %d %d\n",
|
||||||
s->mb_x, s->mb_y);
|
s->mb_x, s->mb_y);
|
||||||
|
@ -1782,7 +1782,7 @@ static void next_start_code_studio(GetBitContext *gb)
|
||||||
{
|
{
|
||||||
align_get_bits(gb);
|
align_get_bits(gb);
|
||||||
|
|
||||||
while (get_bits_left(gb) >= 24 && show_bits_long(gb, 24) != 0x1) {
|
while (get_bits_left(gb) >= 24 && show_bits(gb, 24) != 0x1) {
|
||||||
get_bits(gb, 8);
|
get_bits(gb, 8);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -412,8 +412,6 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
|
||||||
|
|
||||||
ff_mpeg_er_frame_start(s);
|
ff_mpeg_er_frame_start(s);
|
||||||
|
|
||||||
v->bits = buf_size * 8;
|
|
||||||
|
|
||||||
v->end_mb_x = (w + 15) >> 4;
|
v->end_mb_x = (w + 15) >> 4;
|
||||||
s->end_mb_y = (h + 15) >> 4;
|
s->end_mb_y = (h + 15) >> 4;
|
||||||
if (v->respic & 1)
|
if (v->respic & 1)
|
||||||
|
|
|
@ -161,6 +161,9 @@ static int decode_frame(AVCodecContext *avctx,
|
||||||
type = AV_RB32(avpkt->data);
|
type = AV_RB32(avpkt->data);
|
||||||
size = AV_RL32(avpkt->data + 4);
|
size = AV_RL32(avpkt->data + 4);
|
||||||
|
|
||||||
|
if (size < 1 || size >= avpkt->size)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ static int noise(AVBSFContext *ctx, AVPacket *pkt)
|
||||||
{
|
{
|
||||||
NoiseContext *s = ctx->priv_data;
|
NoiseContext *s = ctx->priv_data;
|
||||||
int amount = s->amount > 0 ? s->amount : (s->state % 10001 + 1);
|
int amount = s->amount > 0 ? s->amount : (s->state % 10001 + 1);
|
||||||
int i, ret = 0;
|
int i, ret;
|
||||||
|
|
||||||
if (amount <= 0)
|
if (amount <= 0)
|
||||||
return AVERROR(EINVAL);
|
return AVERROR(EINVAL);
|
||||||
|
@ -55,19 +55,18 @@ static int noise(AVBSFContext *ctx, AVPacket *pkt)
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = av_packet_make_writable(pkt);
|
ret = av_packet_make_writable(pkt);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
goto fail;
|
av_packet_unref(pkt);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < pkt->size; i++) {
|
for (i = 0; i < pkt->size; i++) {
|
||||||
s->state += pkt->data[i] + 1;
|
s->state += pkt->data[i] + 1;
|
||||||
if (s->state % amount == 0)
|
if (s->state % amount == 0)
|
||||||
pkt->data[i] = s->state;
|
pkt->data[i] = s->state;
|
||||||
}
|
}
|
||||||
fail:
|
|
||||||
if (ret < 0)
|
|
||||||
av_packet_unref(pkt);
|
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define OFFSET(x) offsetof(NoiseContext, x)
|
#define OFFSET(x) offsetof(NoiseContext, x)
|
||||||
|
|
|
@ -50,6 +50,10 @@ static int nvdec_mpeg12_start_frame(AVCodecContext *avctx, const uint8_t *buffer
|
||||||
.FrameHeightInMbs = (cur_frame->height + 15) / 16,
|
.FrameHeightInMbs = (cur_frame->height + 15) / 16,
|
||||||
.CurrPicIdx = cf->idx,
|
.CurrPicIdx = cf->idx,
|
||||||
|
|
||||||
|
.field_pic_flag = s->picture_structure != PICT_FRAME,
|
||||||
|
.bottom_field_flag = s->picture_structure == PICT_BOTTOM_FIELD,
|
||||||
|
.second_field = s->picture_structure != PICT_FRAME && !s->first_field,
|
||||||
|
|
||||||
.intra_pic_flag = s->pict_type == AV_PICTURE_TYPE_I,
|
.intra_pic_flag = s->pict_type == AV_PICTURE_TYPE_I,
|
||||||
.ref_pic_flag = s->pict_type == AV_PICTURE_TYPE_I ||
|
.ref_pic_flag = s->pict_type == AV_PICTURE_TYPE_I ||
|
||||||
s->pict_type == AV_PICTURE_TYPE_P,
|
s->pict_type == AV_PICTURE_TYPE_P,
|
||||||
|
|
|
@ -2262,3 +2262,8 @@ int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
av_cold void ff_nvenc_encode_flush(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
ff_nvenc_send_frame(avctx, NULL);
|
||||||
|
}
|
||||||
|
|
|
@ -214,6 +214,8 @@ int ff_nvenc_receive_packet(AVCodecContext *avctx, AVPacket *pkt);
|
||||||
int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||||
const AVFrame *frame, int *got_packet);
|
const AVFrame *frame, int *got_packet);
|
||||||
|
|
||||||
|
void ff_nvenc_encode_flush(AVCodecContext *avctx);
|
||||||
|
|
||||||
extern const enum AVPixelFormat ff_nvenc_pix_fmts[];
|
extern const enum AVPixelFormat ff_nvenc_pix_fmts[];
|
||||||
|
|
||||||
#endif /* AVCODEC_NVENC_H */
|
#endif /* AVCODEC_NVENC_H */
|
||||||
|
|
|
@ -240,6 +240,7 @@ AVCodec ff_h264_nvenc_encoder = {
|
||||||
.receive_packet = ff_nvenc_receive_packet,
|
.receive_packet = ff_nvenc_receive_packet,
|
||||||
.encode2 = ff_nvenc_encode_frame,
|
.encode2 = ff_nvenc_encode_frame,
|
||||||
.close = ff_nvenc_encode_close,
|
.close = ff_nvenc_encode_close,
|
||||||
|
.flush = ff_nvenc_encode_flush,
|
||||||
.priv_data_size = sizeof(NvencContext),
|
.priv_data_size = sizeof(NvencContext),
|
||||||
.priv_class = &h264_nvenc_class,
|
.priv_class = &h264_nvenc_class,
|
||||||
.defaults = defaults,
|
.defaults = defaults,
|
||||||
|
|
|
@ -198,6 +198,7 @@ AVCodec ff_hevc_nvenc_encoder = {
|
||||||
.receive_packet = ff_nvenc_receive_packet,
|
.receive_packet = ff_nvenc_receive_packet,
|
||||||
.encode2 = ff_nvenc_encode_frame,
|
.encode2 = ff_nvenc_encode_frame,
|
||||||
.close = ff_nvenc_encode_close,
|
.close = ff_nvenc_encode_close,
|
||||||
|
.flush = ff_nvenc_encode_flush,
|
||||||
.priv_data_size = sizeof(NvencContext),
|
.priv_data_size = sizeof(NvencContext),
|
||||||
.priv_class = &hevc_nvenc_class,
|
.priv_class = &hevc_nvenc_class,
|
||||||
.defaults = defaults,
|
.defaults = defaults,
|
||||||
|
|
|
@ -141,8 +141,8 @@ static const AVOption avcodec_options[] = {
|
||||||
{"explode", "abort decoding on minor error detection", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_EXPLODE }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
|
{"explode", "abort decoding on minor error detection", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_EXPLODE }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
|
||||||
{"ignore_err", "ignore errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_IGNORE_ERR }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
|
{"ignore_err", "ignore errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_IGNORE_ERR }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
|
||||||
{"careful", "consider things that violate the spec, are fast to check and have not been seen in the wild as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CAREFUL }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
|
{"careful", "consider things that violate the spec, are fast to check and have not been seen in the wild as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CAREFUL }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
|
||||||
{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
|
{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT | AV_EF_CAREFUL }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
|
||||||
{"aggressive", "consider things that a sane encoder should not do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
|
{"aggressive", "consider things that a sane encoder should not do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE | AV_EF_COMPLIANT | AV_EF_CAREFUL}, INT_MIN, INT_MAX, A|V|D, "err_detect"},
|
||||||
{"has_b_frames", NULL, OFFSET(has_b_frames), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
|
{"has_b_frames", NULL, OFFSET(has_b_frames), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
|
||||||
{"block_align", NULL, OFFSET(block_align), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
|
{"block_align", NULL, OFFSET(block_align), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
|
||||||
#if FF_API_PRIVATE_OPT
|
#if FF_API_PRIVATE_OPT
|
||||||
|
|
|
@ -691,8 +691,11 @@ static int decode(AVCodecContext *avctx, void *data, int *data_size,
|
||||||
ret = AVERROR_INVALIDDATA;
|
ret = AVERROR_INVALIDDATA;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE))
|
if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
|
||||||
|
avsubtitle_free(data);
|
||||||
|
*data_size = 0;
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
buf += segment_length;
|
buf += segment_length;
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,6 +117,9 @@ int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!pnm_space(s->bytestream[-1]))
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
/* check that all tags are present */
|
/* check that all tags are present */
|
||||||
if (w <= 0 || h <= 0 || maxval <= 0 || maxval > UINT16_MAX || depth <= 0 || tuple_type[0] == '\0' ||
|
if (w <= 0 || h <= 0 || maxval <= 0 || maxval > UINT16_MAX || depth <= 0 || tuple_type[0] == '\0' ||
|
||||||
av_image_check_size(w, h, 0, avctx) || s->bytestream >= s->bytestream_end)
|
av_image_check_size(w, h, 0, avctx) || s->bytestream >= s->bytestream_end)
|
||||||
|
@ -197,6 +200,10 @@ int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s)
|
||||||
}
|
}
|
||||||
}else
|
}else
|
||||||
s->maxval=1;
|
s->maxval=1;
|
||||||
|
|
||||||
|
if (!pnm_space(s->bytestream[-1]))
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
/* more check if YUV420 */
|
/* more check if YUV420 */
|
||||||
if (av_pix_fmt_desc_get(avctx->pix_fmt)->flags & AV_PIX_FMT_FLAG_PLANAR) {
|
if (av_pix_fmt_desc_get(avctx->pix_fmt)->flags & AV_PIX_FMT_FLAG_PLANAR) {
|
||||||
if ((avctx->width & 1) != 0)
|
if ((avctx->width & 1) != 0)
|
||||||
|
|
|
@ -143,7 +143,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data,
|
||||||
v = (*s->bytestream++)&1;
|
v = (*s->bytestream++)&1;
|
||||||
} else {
|
} else {
|
||||||
/* read a sequence of digits */
|
/* read a sequence of digits */
|
||||||
for (k = 0; k < 5 && c <= 9; k += 1) {
|
for (k = 0; k < 6 && c <= 9; k += 1) {
|
||||||
v = 10*v + c;
|
v = 10*v + c;
|
||||||
c = (*s->bytestream++) - '0';
|
c = (*s->bytestream++) - '0';
|
||||||
}
|
}
|
||||||
|
|
|
@ -224,7 +224,7 @@ static void encode_codeword(PutBitContext *pb, int val, int codebook)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define QSCALE(qmat,ind,val) ((val) / ((qmat)[ind]))
|
#define QSCALE(qmat,ind,val) ((val) / ((qmat)[ind]))
|
||||||
#define TO_GOLOMB(val) (((val) << 1) ^ ((val) >> 31))
|
#define TO_GOLOMB(val) (((val) * 2) ^ ((val) >> 31))
|
||||||
#define DIFF_SIGN(val, sign) (((val) >> 31) ^ (sign))
|
#define DIFF_SIGN(val, sign) (((val) >> 31) ^ (sign))
|
||||||
#define IS_NEGATIVE(val) ((((val) >> 31) ^ -1) + 1)
|
#define IS_NEGATIVE(val) ((((val) >> 31) ^ -1) + 1)
|
||||||
#define TO_GOLOMB2(val,sign) ((val)==0 ? 0 : ((val) << 1) + (sign))
|
#define TO_GOLOMB2(val,sign) ((val)==0 ? 0 : ((val) << 1) + (sign))
|
||||||
|
|
|
@ -72,58 +72,6 @@ int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
|
||||||
return AVERROR(ENOSYS);
|
return AVERROR(ENOSYS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static const struct {
|
|
||||||
enum AVCodecID codec_id;
|
|
||||||
int codec_profile;
|
|
||||||
int mfx_profile;
|
|
||||||
} qsv_profile_map[] = {
|
|
||||||
#define MAP(c, p, v) { AV_CODEC_ID_ ## c, FF_PROFILE_ ## p, MFX_PROFILE_ ## v }
|
|
||||||
MAP(MPEG2VIDEO, MPEG2_SIMPLE, MPEG2_SIMPLE ),
|
|
||||||
MAP(MPEG2VIDEO, MPEG2_MAIN, MPEG2_MAIN ),
|
|
||||||
MAP(MPEG2VIDEO, MPEG2_HIGH, MPEG2_HIGH ),
|
|
||||||
|
|
||||||
MAP(H264, H264_BASELINE, AVC_BASELINE ),
|
|
||||||
MAP(H264, H264_CONSTRAINED_BASELINE, AVC_BASELINE),
|
|
||||||
#if QSV_VERSION_ATLEAST(1, 3)
|
|
||||||
MAP(H264, H264_EXTENDED, AVC_EXTENDED ),
|
|
||||||
#endif
|
|
||||||
MAP(H264, H264_MAIN, AVC_MAIN ),
|
|
||||||
MAP(H264, H264_HIGH, AVC_HIGH ),
|
|
||||||
MAP(H264, H264_HIGH_422, AVC_HIGH_422 ),
|
|
||||||
|
|
||||||
#if QSV_VERSION_ATLEAST(1, 8)
|
|
||||||
MAP(HEVC, HEVC_MAIN, HEVC_MAIN ),
|
|
||||||
MAP(HEVC, HEVC_MAIN_10, HEVC_MAIN10 ),
|
|
||||||
MAP(HEVC, HEVC_MAIN_STILL_PICTURE, HEVC_MAINSP ),
|
|
||||||
#endif
|
|
||||||
#if QSV_VERSION_ATLEAST(1, 16)
|
|
||||||
MAP(HEVC, HEVC_REXT, HEVC_REXT ),
|
|
||||||
#endif
|
|
||||||
|
|
||||||
MAP(VC1, VC1_SIMPLE, VC1_SIMPLE ),
|
|
||||||
MAP(VC1, VC1_MAIN, VC1_MAIN ),
|
|
||||||
MAP(VC1, VC1_COMPLEX, VC1_ADVANCED ),
|
|
||||||
MAP(VC1, VC1_ADVANCED, VC1_ADVANCED ),
|
|
||||||
#undef MAP
|
|
||||||
};
|
|
||||||
|
|
||||||
int ff_qsv_profile_to_mfx(enum AVCodecID codec_id, int profile)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
if (profile == FF_PROFILE_UNKNOWN)
|
|
||||||
return MFX_PROFILE_UNKNOWN;
|
|
||||||
|
|
||||||
for (i = 0; i < FF_ARRAY_ELEMS(qsv_profile_map); i++) {
|
|
||||||
if (qsv_profile_map[i].codec_id != codec_id)
|
|
||||||
continue;
|
|
||||||
if (qsv_profile_map[i].codec_profile == profile)
|
|
||||||
return qsv_profile_map[i].mfx_profile;
|
|
||||||
}
|
|
||||||
|
|
||||||
return MFX_PROFILE_UNKNOWN;
|
|
||||||
}
|
|
||||||
|
|
||||||
int ff_qsv_level_to_mfx(enum AVCodecID codec_id, int level)
|
int ff_qsv_level_to_mfx(enum AVCodecID codec_id, int level)
|
||||||
{
|
{
|
||||||
if (level == FF_LEVEL_UNKNOWN)
|
if (level == FF_LEVEL_UNKNOWN)
|
||||||
|
|
|
@ -116,7 +116,6 @@ int ff_qsv_print_warning(void *log_ctx, mfxStatus err,
|
||||||
const char *warning_string);
|
const char *warning_string);
|
||||||
|
|
||||||
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id);
|
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id);
|
||||||
int ff_qsv_profile_to_mfx(enum AVCodecID codec_id, int profile);
|
|
||||||
int ff_qsv_level_to_mfx(enum AVCodecID codec_id, int level);
|
int ff_qsv_level_to_mfx(enum AVCodecID codec_id, int level);
|
||||||
|
|
||||||
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc);
|
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc);
|
||||||
|
|
|
@ -74,7 +74,7 @@ static int ff_qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, A
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
|
av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(EINVAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
frame->linesize[1] = frame->linesize[0];
|
frame->linesize[1] = frame->linesize[0];
|
||||||
|
@ -99,9 +99,11 @@ static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession ses
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (q->gpu_copy == MFX_GPUCOPY_ON &&
|
if (q->gpu_copy == MFX_GPUCOPY_ON &&
|
||||||
!(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY))
|
!(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) {
|
||||||
av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy "
|
av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy "
|
||||||
"only works in MFX_IOPATTERN_OUT_SYSTEM_MEMORY.\n");
|
"only works in system memory mode.\n");
|
||||||
|
q->gpu_copy = MFX_GPUCOPY_OFF;
|
||||||
|
}
|
||||||
if (session) {
|
if (session) {
|
||||||
q->session = session;
|
q->session = session;
|
||||||
} else if (hw_frames_ref) {
|
} else if (hw_frames_ref) {
|
||||||
|
|
|
@ -139,6 +139,9 @@ static void dump_video_param(AVCodecContext *avctx, QSVEncContext *q,
|
||||||
#if QSV_HAVE_CO3
|
#if QSV_HAVE_CO3
|
||||||
mfxExtCodingOption3 *co3 = (mfxExtCodingOption3*)coding_opts[2];
|
mfxExtCodingOption3 *co3 = (mfxExtCodingOption3*)coding_opts[2];
|
||||||
#endif
|
#endif
|
||||||
|
#if QSV_HAVE_EXT_HEVC_TILES
|
||||||
|
mfxExtHEVCTiles *exthevctiles = (mfxExtHEVCTiles *)coding_opts[3 + QSV_HAVE_CO_VPS];
|
||||||
|
#endif
|
||||||
|
|
||||||
av_log(avctx, AV_LOG_VERBOSE, "profile: %s; level: %"PRIu16"\n",
|
av_log(avctx, AV_LOG_VERBOSE, "profile: %s; level: %"PRIu16"\n",
|
||||||
print_profile(info->CodecProfile), info->CodecLevel);
|
print_profile(info->CodecProfile), info->CodecLevel);
|
||||||
|
@ -204,6 +207,12 @@ static void dump_video_param(AVCodecContext *avctx, QSVEncContext *q,
|
||||||
av_log(avctx, AV_LOG_VERBOSE, "RateDistortionOpt: %s\n",
|
av_log(avctx, AV_LOG_VERBOSE, "RateDistortionOpt: %s\n",
|
||||||
print_threestate(co->RateDistortionOpt));
|
print_threestate(co->RateDistortionOpt));
|
||||||
|
|
||||||
|
#if QSV_HAVE_EXT_HEVC_TILES
|
||||||
|
if (avctx->codec_id == AV_CODEC_ID_HEVC)
|
||||||
|
av_log(avctx, AV_LOG_VERBOSE, "NumTileColumns: %"PRIu16"; NumTileRows: %"PRIu16"\n",
|
||||||
|
exthevctiles->NumTileColumns, exthevctiles->NumTileRows);
|
||||||
|
#endif
|
||||||
|
|
||||||
#if QSV_HAVE_CO2
|
#if QSV_HAVE_CO2
|
||||||
av_log(avctx, AV_LOG_VERBOSE,
|
av_log(avctx, AV_LOG_VERBOSE,
|
||||||
"RecoveryPointSEI: %s IntRefType: %"PRIu16"; IntRefCycleSize: %"PRIu16"; IntRefQPDelta: %"PRId16"\n",
|
"RecoveryPointSEI: %s IntRefType: %"PRIu16"; IntRefCycleSize: %"PRIu16"; IntRefQPDelta: %"PRId16"\n",
|
||||||
|
@ -771,6 +780,16 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if QSV_HAVE_EXT_HEVC_TILES
|
||||||
|
if (avctx->codec_id == AV_CODEC_ID_HEVC) {
|
||||||
|
q->exthevctiles.Header.BufferId = MFX_EXTBUFF_HEVC_TILES;
|
||||||
|
q->exthevctiles.Header.BufferSz = sizeof(q->exthevctiles);
|
||||||
|
q->exthevctiles.NumTileColumns = q->tile_cols;
|
||||||
|
q->exthevctiles.NumTileRows = q->tile_rows;
|
||||||
|
q->extparam_internal[q->nb_extparam_internal++] = (mfxExtBuffer *)&q->exthevctiles;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (!check_enc_param(avctx,q)) {
|
if (!check_enc_param(avctx,q)) {
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
"some encoding parameters are not supported by the QSV "
|
"some encoding parameters are not supported by the QSV "
|
||||||
|
@ -889,7 +908,14 @@ static int qsv_retrieve_enc_params(AVCodecContext *avctx, QSVEncContext *q)
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
mfxExtBuffer *ext_buffers[2 + QSV_HAVE_CO2 + QSV_HAVE_CO3 + QSV_HAVE_CO_VPS];
|
#if QSV_HAVE_EXT_HEVC_TILES
|
||||||
|
mfxExtHEVCTiles hevc_tile_buf = {
|
||||||
|
.Header.BufferId = MFX_EXTBUFF_HEVC_TILES,
|
||||||
|
.Header.BufferSz = sizeof(hevc_tile_buf),
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
mfxExtBuffer *ext_buffers[2 + QSV_HAVE_CO2 + QSV_HAVE_CO3 + QSV_HAVE_CO_VPS + QSV_HAVE_EXT_HEVC_TILES];
|
||||||
|
|
||||||
int need_pps = avctx->codec_id != AV_CODEC_ID_MPEG2VIDEO;
|
int need_pps = avctx->codec_id != AV_CODEC_ID_MPEG2VIDEO;
|
||||||
int ret, ext_buf_num = 0, extradata_offset = 0;
|
int ret, ext_buf_num = 0, extradata_offset = 0;
|
||||||
|
@ -907,6 +933,10 @@ static int qsv_retrieve_enc_params(AVCodecContext *avctx, QSVEncContext *q)
|
||||||
if (q->hevc_vps)
|
if (q->hevc_vps)
|
||||||
ext_buffers[ext_buf_num++] = (mfxExtBuffer*)&extradata_vps;
|
ext_buffers[ext_buf_num++] = (mfxExtBuffer*)&extradata_vps;
|
||||||
#endif
|
#endif
|
||||||
|
#if QSV_HAVE_EXT_HEVC_TILES
|
||||||
|
if (avctx->codec_id == AV_CODEC_ID_HEVC)
|
||||||
|
ext_buffers[ext_buf_num++] = (mfxExtBuffer*)&hevc_tile_buf;
|
||||||
|
#endif
|
||||||
|
|
||||||
q->param.ExtParam = ext_buffers;
|
q->param.ExtParam = ext_buffers;
|
||||||
q->param.NumExtParam = ext_buf_num;
|
q->param.NumExtParam = ext_buf_num;
|
||||||
|
|
|
@ -38,6 +38,7 @@
|
||||||
#define QSV_HAVE_CO3 QSV_VERSION_ATLEAST(1, 11)
|
#define QSV_HAVE_CO3 QSV_VERSION_ATLEAST(1, 11)
|
||||||
#define QSV_HAVE_CO_VPS QSV_VERSION_ATLEAST(1, 17)
|
#define QSV_HAVE_CO_VPS QSV_VERSION_ATLEAST(1, 17)
|
||||||
|
|
||||||
|
#define QSV_HAVE_EXT_HEVC_TILES QSV_VERSION_ATLEAST(1, 13)
|
||||||
#define QSV_HAVE_EXT_VP9_PARAM QSV_VERSION_ATLEAST(1, 26)
|
#define QSV_HAVE_EXT_VP9_PARAM QSV_VERSION_ATLEAST(1, 26)
|
||||||
|
|
||||||
#define QSV_HAVE_TRELLIS QSV_VERSION_ATLEAST(1, 8)
|
#define QSV_HAVE_TRELLIS QSV_VERSION_ATLEAST(1, 8)
|
||||||
|
@ -124,6 +125,9 @@ typedef struct QSVEncContext {
|
||||||
mfxExtMultiFrameParam extmfp;
|
mfxExtMultiFrameParam extmfp;
|
||||||
mfxExtMultiFrameControl extmfc;
|
mfxExtMultiFrameControl extmfc;
|
||||||
#endif
|
#endif
|
||||||
|
#if QSV_HAVE_EXT_HEVC_TILES
|
||||||
|
mfxExtHEVCTiles exthevctiles;
|
||||||
|
#endif
|
||||||
#if QSV_HAVE_EXT_VP9_PARAM
|
#if QSV_HAVE_EXT_VP9_PARAM
|
||||||
mfxExtVP9Param extvp9param;
|
mfxExtVP9Param extvp9param;
|
||||||
#endif
|
#endif
|
||||||
|
@ -161,6 +165,9 @@ typedef struct QSVEncContext {
|
||||||
int max_frame_size;
|
int max_frame_size;
|
||||||
int max_slice_size;
|
int max_slice_size;
|
||||||
|
|
||||||
|
int tile_cols;
|
||||||
|
int tile_rows;
|
||||||
|
|
||||||
int aud;
|
int aud;
|
||||||
|
|
||||||
int single_sei_nal_unit;
|
int single_sei_nal_unit;
|
||||||
|
|
|
@ -243,6 +243,9 @@ static const AVOption options[] = {
|
||||||
|
|
||||||
{ "gpb", "1: GPB (generalized P/B frame); 0: regular P frame", OFFSET(qsv.gpb), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE},
|
{ "gpb", "1: GPB (generalized P/B frame); 0: regular P frame", OFFSET(qsv.gpb), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE},
|
||||||
|
|
||||||
|
{ "tile_cols", "Number of columns for tiled encoding", OFFSET(qsv.tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE },
|
||||||
|
{ "tile_rows", "Number of rows for tiled encoding", OFFSET(qsv.tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE },
|
||||||
|
|
||||||
{ NULL },
|
{ NULL },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -121,7 +121,7 @@ static inline void FUNC6(idctRowCondDC)(idctin *row, int extra_shift)
|
||||||
// TODO: Add DC-only support for int32_t input
|
// TODO: Add DC-only support for int32_t input
|
||||||
#if IN_IDCT_DEPTH == 16
|
#if IN_IDCT_DEPTH == 16
|
||||||
#if HAVE_FAST_64BIT
|
#if HAVE_FAST_64BIT
|
||||||
#define ROW0_MASK (0xffffLL << 48 * HAVE_BIGENDIAN)
|
#define ROW0_MASK (0xffffULL << 48 * HAVE_BIGENDIAN)
|
||||||
if (((AV_RN64A(row) & ~ROW0_MASK) | AV_RN64A(row+4)) == 0) {
|
if (((AV_RN64A(row) & ~ROW0_MASK) | AV_RN64A(row+4)) == 0) {
|
||||||
uint64_t temp;
|
uint64_t temp;
|
||||||
if (DC_SHIFT - extra_shift >= 0) {
|
if (DC_SHIFT - extra_shift >= 0) {
|
||||||
|
|
|
@ -144,6 +144,8 @@ static inline av_flatten int get_symbol(RangeCoder *c, uint8_t *state, int is_si
|
||||||
e= 0;
|
e= 0;
|
||||||
while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
|
while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
|
||||||
e++;
|
e++;
|
||||||
|
if (e > 31)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
a= 1;
|
a= 1;
|
||||||
|
|
|
@ -132,12 +132,6 @@ static int decode_frame(AVCodecContext *avctx,
|
||||||
h = bytestream2_get_le16(&s->gb);
|
h = bytestream2_get_le16(&s->gb);
|
||||||
bpp = bytestream2_get_byte(&s->gb);
|
bpp = bytestream2_get_byte(&s->gb);
|
||||||
|
|
||||||
if (bytestream2_get_bytes_left(&s->gb) <= idlen) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR,
|
|
||||||
"Not enough data to read header\n");
|
|
||||||
return AVERROR_INVALIDDATA;
|
|
||||||
}
|
|
||||||
|
|
||||||
flags = bytestream2_get_byte(&s->gb);
|
flags = bytestream2_get_byte(&s->gb);
|
||||||
|
|
||||||
if (!pal && (first_clr || colors || csize)) {
|
if (!pal && (first_clr || colors || csize)) {
|
||||||
|
@ -146,6 +140,12 @@ static int decode_frame(AVCodecContext *avctx,
|
||||||
first_clr = colors = csize = 0;
|
first_clr = colors = csize = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (bytestream2_get_bytes_left(&s->gb) < idlen + 2*colors) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
"Not enough data to read header\n");
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
}
|
||||||
|
|
||||||
// skip identifier if any
|
// skip identifier if any
|
||||||
bytestream2_skip(&s->gb, idlen);
|
bytestream2_skip(&s->gb, idlen);
|
||||||
|
|
||||||
|
|
|
@ -1218,6 +1218,8 @@ static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
|
||||||
|
|
||||||
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
||||||
{
|
{
|
||||||
|
AVFrameSideData *sd;
|
||||||
|
GetByteContext gb_temp;
|
||||||
unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
|
unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
|
||||||
int i, start;
|
int i, start;
|
||||||
int pos;
|
int pos;
|
||||||
|
@ -1643,6 +1645,22 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
case TIFF_ICC_PROFILE:
|
||||||
|
if (type != TIFF_UNDEFINED)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
|
gb_temp = s->gb;
|
||||||
|
bytestream2_seek(&gb_temp, SEEK_SET, off);
|
||||||
|
|
||||||
|
if (bytestream2_get_bytes_left(&gb_temp) < count)
|
||||||
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
|
sd = av_frame_new_side_data(frame, AV_FRAME_DATA_ICC_PROFILE, count);
|
||||||
|
if (!sd)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
|
bytestream2_get_bufferu(&gb_temp, sd->data, count);
|
||||||
|
break;
|
||||||
case TIFF_ARTIST:
|
case TIFF_ARTIST:
|
||||||
ADD_METADATA(count, "artist", NULL);
|
ADD_METADATA(count, "artist", NULL);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -92,6 +92,7 @@ enum TiffTags {
|
||||||
TIFF_MODEL_TIEPOINT = 0x8482,
|
TIFF_MODEL_TIEPOINT = 0x8482,
|
||||||
TIFF_MODEL_PIXEL_SCALE = 0x830E,
|
TIFF_MODEL_PIXEL_SCALE = 0x830E,
|
||||||
TIFF_MODEL_TRANSFORMATION= 0x8480,
|
TIFF_MODEL_TRANSFORMATION= 0x8480,
|
||||||
|
TIFF_ICC_PROFILE = 0x8773,
|
||||||
TIFF_GEO_KEY_DIRECTORY = 0x87AF,
|
TIFF_GEO_KEY_DIRECTORY = 0x87AF,
|
||||||
TIFF_GEO_DOUBLE_PARAMS = 0x87B0,
|
TIFF_GEO_DOUBLE_PARAMS = 0x87B0,
|
||||||
TIFF_GEO_ASCII_PARAMS = 0x87B1,
|
TIFF_GEO_ASCII_PARAMS = 0x87B1,
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue