This commit is contained in:
MrBesen 2020-01-16 13:11:09 +01:00
commit a1b65bf80a
Signed by: MrBesen
GPG Key ID: 596B2350DCD67504
424 changed files with 6590 additions and 3215 deletions

View File

@ -5,8 +5,8 @@ version <next>:
- v360 filter
- Intel QSV-accelerated MJPEG decoding
- Intel QSV-accelerated VP9 decoding
- support for TrueHD in mp4
- Supoort AMD AMF encoder on Linux (via Vulkan)
- Support for TrueHD in mp4
- Support AMD AMF encoder on Linux (via Vulkan)
- IMM5 video decoder
- ZeroMQ protocol
- support Sipro ACELP.KELVIN decoding
@ -27,6 +27,9 @@ version <next>:
- axcorrelate filter
- mvdv decoder
- mvha decoder
- MPEG-H 3D Audio support in mp4
- thistogram filter
- freezeframes filter
version 4.2:

View File

@ -35,7 +35,6 @@ Specifically, the GPL parts of FFmpeg are:
- `vf_eq.c`
- `vf_find_rect.c`
- `vf_fspp.c`
- `vf_geq.c`
- `vf_histeq.c`
- `vf_hqdn3d.c`
- `vf_kerndeint.c`

View File

@ -1096,7 +1096,7 @@ AVSC_INLINE AVS_Library * avs_load_library() {
AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library));
if (library == NULL)
return NULL;
library->handle = LoadLibrary("avisynth");
library->handle = LoadLibraryA("avisynth");
if (library->handle == NULL)
goto fail;

12
configure vendored
View File

@ -482,6 +482,7 @@ Developer options (useful when working on FFmpeg itself):
--ignore-tests=TESTS comma-separated list (without "fate-" prefix
in the name) of tests whose result is ignored
--enable-linux-perf enable Linux Performance Monitor API
--disable-large-tests disable tests that use a large amount of memory
NOTE: Object files are built at the place where configure is launched.
EOF
@ -1931,6 +1932,7 @@ CONFIG_LIST="
$SUBSYSTEM_LIST
autodetect
fontconfig
large_tests
linux_perf
memory_poisoning
neon_clobber_test
@ -2194,6 +2196,7 @@ SYSTEM_FUNCS="
getaddrinfo
gethrtime
getopt
GetModuleHandle
GetProcessAffinityMask
GetProcessMemoryInfo
GetProcessTimes
@ -2223,6 +2226,7 @@ SYSTEM_FUNCS="
SecItemImport
SetConsoleTextAttribute
SetConsoleCtrlHandler
SetDllDirectory
setmode
setrlimit
Sleep
@ -3499,7 +3503,6 @@ freezedetect_filter_select="scene_sad"
frei0r_filter_deps="frei0r libdl"
frei0r_src_filter_deps="frei0r libdl"
fspp_filter_deps="gpl"
geq_filter_deps="gpl"
headphone_filter_select="fft"
histeq_filter_deps="gpl"
hqdn3d_filter_deps="gpl"
@ -3576,6 +3579,7 @@ tinterlace_filter_deps="gpl"
tinterlace_merge_test_deps="tinterlace_filter"
tinterlace_pad_test_deps="tinterlace_filter"
tonemap_filter_deps="const_nan"
tonemap_vaapi_filter_deps="vaapi VAProcFilterParameterBufferHDRToneMapping"
tonemap_opencl_filter_deps="opencl const_nan"
transpose_opencl_filter_deps="opencl"
transpose_vaapi_filter_deps="vaapi VAProcPipelineCaps_rotation_flags"
@ -3724,6 +3728,7 @@ enable asm
enable debug
enable doc
enable faan faandct faanidct
enable large_tests
enable optimizations
enable runtime_cpudetect
enable safe_bitstream_reader
@ -6032,6 +6037,7 @@ check_func_headers mach/mach_time.h mach_absolute_time
check_func_headers stdlib.h getenv
check_func_headers sys/stat.h lstat
check_func_headers windows.h GetModuleHandle
check_func_headers windows.h GetProcessAffinityMask
check_func_headers windows.h GetProcessTimes
check_func_headers windows.h GetSystemTimeAsFileTime
@ -6040,6 +6046,7 @@ check_func_headers windows.h MapViewOfFile
check_func_headers windows.h PeekNamedPipe
check_func_headers windows.h SetConsoleTextAttribute
check_func_headers windows.h SetConsoleCtrlHandler
check_func_headers windows.h SetDllDirectory
check_func_headers windows.h Sleep
check_func_headers windows.h VirtualAlloc
check_func_headers glob.h glob
@ -6577,6 +6584,7 @@ if enabled vaapi; then
check_type "va/va.h va/va_dec_hevc.h" "VAPictureParameterBufferHEVC"
check_struct "va/va.h" "VADecPictureParameterBufferVP9" bit_depth
check_type "va/va.h va/va_vpp.h" "VAProcFilterParameterBufferHDRToneMapping"
check_struct "va/va.h va/va_vpp.h" "VAProcPipelineCaps" rotation_flags
check_type "va/va.h va/va_enc_hevc.h" "VAEncPictureParameterBufferHEVC"
check_type "va/va.h va/va_enc_jpeg.h" "VAEncPictureParameterBufferJPEG"
@ -7448,7 +7456,7 @@ cat > $TMPH <<EOF
#define FFMPEG_CONFIG_H
#define FFMPEG_CONFIGURATION "$(c_escape $FFMPEG_CONFIGURATION)"
#define FFMPEG_LICENSE "$(c_escape $license)"
#define CONFIG_THIS_YEAR 2019
#define CONFIG_THIS_YEAR 2020
#define FFMPEG_DATADIR "$(eval c_escape $datadir)"
#define AVCONV_DATADIR "$(eval c_escape $datadir)"
#define CC_IDENT "$(c_escape ${cc_ident:-Unknown compiler})"

View File

@ -15,6 +15,15 @@ libavutil: 2017-10-21
API changes, most recent first:
2020-01-15 - xxxxxxxxxx - lavc 58.66.100 - avcodec.h
Add AV_PKT_DATA_PRFT and AVProducerReferenceTime.
2019-12-27 - xxxxxxxxxx - lavu 56.38.100 - eval.h
Add av_expr_count_func().
2019-12-xx - xxxxxxxxxx - lavu 56.37.100 - buffer.h
Add av_buffer_pool_buffer_get_opaque().
2019-11-17 - 1c23abc88f - lavu 56.36.100 - eval API
Add av_expr_count_vars().

View File

@ -456,6 +456,17 @@ nanosecond precision.
@item video_size
Set the video size of the images to read. If not specified the video
size is guessed from the first image file in the sequence.
@item export_path_metadata
If set to 1, will add two extra fields to the metadata found in input, making them
also available for other filters (see @var{drawtext} filter for examples). Default
value is 0. The extra fields are described below:
@table @option
@item lavf.image2dec.source_path
Corresponds to the full path to the input file being read.
@item lavf.image2dec.source_basename
Corresponds to the name of the file being read.
@end table
@end table
@subsection Examples

View File

@ -1893,7 +1893,7 @@ key=value pairs. For example, to specify temporal scalability parameters
with @code{ffmpeg}:
@example
ffmpeg -i INPUT -c:v libvpx -ts-parameters ts_number_layers=3:\
ts_target_bitrate=250000,500000,1000000:ts_rate_decimator=4,2,1:\
ts_target_bitrate=250,500,1000:ts_rate_decimator=4,2,1:\
ts_periodicity=4:ts_layer_id=0,2,1,2 OUTPUT
@end example
Below is a brief explanation of each of the parameters, please
@ -1903,7 +1903,8 @@ details.
@item ts_number_layers
Number of temporal coding layers.
@item ts_target_bitrate
Target bitrate for each temporal layer.
Target bitrate for each temporal layer (in kbps).
(bitrate should be inclusive of the lower temporal layer).
@item ts_rate_decimator
Frame rate decimation factor for each temporal layer.
@item ts_periodicity
@ -2414,6 +2415,20 @@ during configuration. You need to explicitly configure the build with
@subsection Options
@table @option
@item b
Sets target video bitrate.
@item bf
@item g
Set the GOP size.
@item keyint_min
Minimum GOP size.
@item refs
Number of reference frames each P-frame can use. The range is from @var{1-16}.
@item preset
Set the x265 preset.
@ -2426,6 +2441,28 @@ Set profile restrictions.
@item crf
Set the quality for constant quality mode.
@item qp
Set constant quantization rate control method parameter.
@item qmin
Minimum quantizer scale.
@item qmax
Maximum quantizer scale.
@item qdiff
Maximum difference between quantizer scales.
@item qblur
Quantizer curve blur
@item qcomp
Quantizer curve compression factor
@item i_qfactor
@item b_qfactor
@item forced-idr
Normally, when forcing a I-frame type, the encoder can select any type
of I-frame. This option forces it to choose an IDR-frame.

View File

@ -879,12 +879,19 @@ Deprecated see -bsf
@item -force_key_frames[:@var{stream_specifier}] @var{time}[,@var{time}...] (@emph{output,per-stream})
@item -force_key_frames[:@var{stream_specifier}] expr:@var{expr} (@emph{output,per-stream})
Force key frames at the specified timestamps, more precisely at the first
frames after each specified time.
@item -force_key_frames[:@var{stream_specifier}] source (@emph{output,per-stream})
If the argument is prefixed with @code{expr:}, the string @var{expr}
is interpreted like an expression and is evaluated for each frame. A
key frame is forced in case the evaluation is non-zero.
@var{force_key_frames} can take arguments of the following form:
@table @option
@item @var{time}[,@var{time}...]
If the argument consists of timestamps, ffmpeg will round the specified times to the nearest
output timestamp as per the encoder time base and force a keyframe at the first frame having
timestamp equal or greater than the computed timestamp. Note that if the encoder time base is too
coarse, then the keyframes may be forced on frames with timestamps lower than the specified time.
The default encoder time base is the inverse of the output framerate but may be set otherwise
via @code{-enc_time_base}.
If one of the times is "@code{chapters}[@var{delta}]", it is expanded into
the time of the beginning of all chapters in the file, shifted by
@ -898,6 +905,11 @@ before the beginning of every chapter:
-force_key_frames 0:05:00,chapters-0.1
@end example
@item expr:@var{expr}
If the argument is prefixed with @code{expr:}, the string @var{expr}
is interpreted like an expression and is evaluated for each frame. A
key frame is forced in case the evaluation is non-zero.
The expression in @var{expr} can contain the following constants:
@table @option
@item n
@ -925,6 +937,12 @@ starting from second 13:
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
@end example
@item source
If the argument is @code{source}, ffmpeg will force a key frame if
the current frame being encoded is marked as a key frame in its source.
@end table
Note that forcing too many keyframes is very harmful for the lookahead
algorithms of certain encoders: using fixed-GOP options or similar
would be more efficient.

View File

@ -443,6 +443,10 @@ How much to use compressed signal in output. Default is 1.
Range is between 0 and 1.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section acontrast
Simple audio dynamic range compression/expansion filter.
@ -1179,7 +1183,7 @@ afftfilt="real='hypot(re,im)*cos((random(0)*2-1)*2*3.14)':imag='hypot(re,im)*sin
@anchor{afir}
@section afir
Apply an arbitrary Frequency Impulse Response filter.
Apply an arbitrary Finite Impulse Response filter.
This filter is designed for applying long FIR filters,
up to 60 seconds long.
@ -1188,10 +1192,10 @@ It can be used as component for digital crossover filters,
room equalization, cross talk cancellation, wavefield synthesis,
auralization, ambiophonics, ambisonics and spatialization.
This filter uses the second stream as FIR coefficients.
If the second stream holds a single channel, it will be used
This filter uses the streams higher than first one as FIR coefficients.
If the non-first stream holds a single channel, it will be used
for all input channels in the first stream, otherwise
the number of channels in the second stream must be same as
the number of channels in the non-first stream must be same as
the number of channels in the first stream.
It accepts the following parameters:
@ -1253,13 +1257,22 @@ Set video stream frame rate. This option is used only when @var{response} is ena
@item minp
Set minimal partition size used for convolution. Default is @var{8192}.
Allowed range is from @var{8} to @var{32768}.
Allowed range is from @var{1} to @var{32768}.
Lower values decreases latency at cost of higher CPU usage.
@item maxp
Set maximal partition size used for convolution. Default is @var{8192}.
Allowed range is from @var{8} to @var{32768}.
Lower values may increase CPU usage.
@item nbirs
Set number of input impulse responses streams which will be switchable at runtime.
Allowed range is from @var{1} to @var{32}. Default is @var{1}.
@item ir
Set IR stream which will be used for convolution, starting from @var{0}, should always be
lower than supplied value by @code{nbirs} option. Default is @var{0}.
This option can be changed at runtime via @ref{commands}.
@end table
@subsection Examples
@ -1281,13 +1294,13 @@ negotiate the most appropriate format to minimize conversions.
It accepts the following parameters:
@table @option
@item sample_fmts
@item sample_fmts, f
A '|'-separated list of requested sample formats.
@item sample_rates
@item sample_rates, r
A '|'-separated list of requested sample rates.
@item channel_layouts
@item channel_layouts, cl
A '|'-separated list of requested channel layouts.
See @ref{channel layout syntax,,the Channel Layout section in the ffmpeg-utils(1) manual,ffmpeg-utils}
@ -3213,6 +3226,10 @@ Sets the intensity of effect (default: 2.0). Must be in range between 0.0
Enable clipping. By default is enabled.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section dcshift
Apply a DC shift to the audio.
@ -3438,8 +3455,20 @@ value. Instead, the threshold value will be adjusted for each individual
frame.
In general, smaller parameters result in stronger compression, and vice versa.
Values below 3.0 are not recommended, because audible distortion may appear.
@item threshold, t
Set the target threshold value. This specifies the lowest permissible
magnitude level for the audio input which will be normalized.
If input frame volume is above this value frame will be normalized.
Otherwise frame may not be normalized at all. The default value is set
to 0, which means all input frames will be normalized.
This option is mostly useful if digital noise is not wanted to be amplified.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section earwax
Make audio easier to listen to on headphones.
@ -3558,6 +3587,10 @@ Sets the difference coefficient (default: 2.5). 0.0 means mono sound
Enable clipping. By default is enabled.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section firequalizer
Apply FIR Equalization using arbitrary frequency response.
@ -4660,6 +4693,10 @@ How much to use compressed signal in output. Default is 1.
Range is between 0 and 1.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@subsection Examples
@itemize
@ -5167,6 +5204,10 @@ channels. Default is 0.3.
Set level of input signal of original channel. Default is 0.8.
@end table
@subsection Commands
This filter supports the all above options except @code{delay} as @ref{commands}.
@section superequalizer
Apply 18 band equalizer.
@ -5522,6 +5563,11 @@ Pre-amplification gain in dB to apply to the selected replaygain gain.
Default value for @var{replaygain_preamp} is 0.0.
@item replaygain_noclip
Prevent clipping by limiting the gain applied.
Default value for @var{replaygain_noclip} is 1.
@item eval
Set when the volume expression is evaluated.
@ -5581,11 +5627,6 @@ The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@item replaygain_noclip
Prevent clipping by limiting the gain applied.
Default value for @var{replaygain_noclip} is 1.
@end table
@subsection Examples
@ -8354,6 +8395,9 @@ Draw rows and columns numbers on left and top of video.
@item opacity
Set background opacity.
@item format
Set display number format. Can be @code{hex}, or @code{dec}. Default is @code{hex}.
@end table
@section dctdnoiz
@ -8629,6 +8673,10 @@ Limit the maximum change for each plane, default is 65535.
If 0, plane will remain unchanged.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section deflicker
Remove temporal frame luminance variations.
@ -8970,6 +9018,10 @@ Flags to local 3x3 coordinates maps like this:
6 7 8
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section displace
Displace pixels as indicated by second and third input stream.
@ -9027,8 +9079,8 @@ ffmpeg -i INPUT -f lavfi -i nullsrc=hd720,geq='r=128+80*(sin(sqrt((X-W/2)*(X-W/2
@section dnn_processing
Do image processing with deep neural networks. Currently only AVFrame with RGB24
and BGR24 are supported, more formats will be added later.
Do image processing with deep neural networks. It works together with another filter
which converts the pixel format of the Frame to what the dnn network requires.
The filter accepts the following options:
@ -9063,12 +9115,23 @@ Set the input name of the dnn network.
@item output
Set the output name of the dnn network.
@item fmt
Set the pixel format for the Frame. Allowed values are @code{AV_PIX_FMT_RGB24}, and @code{AV_PIX_FMT_BGR24}.
Default value is @code{AV_PIX_FMT_RGB24}.
@end table
@itemize
@item
Halve the red channle of the frame with format rgb24:
@example
ffmpeg -i input.jpg -vf format=rgb24,dnn_processing=model=halve_first_channel.model:input=dnn_in:output=dnn_out:dnn_backend=native out.native.png
@end example
@item
Halve the pixel value of the frame with format gray32f:
@example
ffmpeg -i input.jpg -vf format=grayf32,dnn_processing=model=halve_gray_float.model:input=dnn_in:output=dnn_out:dnn_backend=native -y out.native.png
@end example
@end itemize
@section drawbox
Draw a colored box on the input image.
@ -9258,6 +9321,9 @@ Set size of graph video. For the syntax of this option, check the
@ref{video size syntax,,"Video size" section in the ffmpeg-utils manual,ffmpeg-utils}.
The default value is @code{900x256}.
@item rate, r
Set the output frame rate. Default value is @code{25}.
The foreground color expressions can use the following variables:
@table @option
@item MIN
@ -9869,6 +9935,15 @@ drawtext=fontfile=FreeSans.ttf:text=DOG:fontsize=24:x=10:y=20+24-max_glyph_a,
drawtext=fontfile=FreeSans.ttf:text=cow:fontsize=24:x=80:y=20+24-max_glyph_a
@end example
@item
Plot special @var{lavf.image2dec.source_basename} metadata onto each frame if
such metadata exists. Otherwise, plot the string "NA". Note that image2 demuxer
must have option @option{-export_path_metadata 1} for the special metadata fields
to be available for filters.
@example
drawtext="fontsize=20:fontcolor=white:fontfile=FreeSans.ttf:text='%@{metadata\:lavf.image2dec.source_basename\:NA@}':x=10:y=10"
@end example
@end itemize
For more information about libfreetype, check:
@ -10118,6 +10193,10 @@ Flags to local 3x3 coordinates maps like this:
6 7 8
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section extractplanes
Extract color channel components from input video stream into
@ -11143,6 +11222,25 @@ specified value) or as a difference ratio between 0 and 1. Default is -60dB, or
Set freeze duration until notification (default is 2 seconds).
@end table
@section freezeframes
Freeze video frames.
This filter freezes video frames using frame from 2nd input.
The filter accepts the following options:
@table @option
@item first
Set number of first frame from which to start freeze.
@item last
Set number of last frame from which to end freeze.
@item replace
Set number of frame from 2nd input which will be used instead of replaced frames.
@end table
@anchor{frei0r}
@section frei0r
@ -11671,6 +11769,7 @@ the histogram. Possible values are @code{none}, @code{weak} or
@code{strong}. It defaults to @code{none}.
@end table
@anchor{histogram}
@section histogram
Compute and draw a color distribution histogram for the input video.
@ -12165,6 +12264,10 @@ Default value is @code{none}.
Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is @code{0}.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section inflate
Apply inflate effect to the video.
@ -12183,6 +12286,10 @@ Limit the maximum change for each plane, default is 65535.
If 0, plane will remain unchanged.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section interlace
Simple interlacing filter from progressive contents. This interleaves upper (or
@ -15345,42 +15452,16 @@ Set the line to start scanning for EIA-608 data. Default is @code{0}.
@item scan_max
Set the line to end scanning for EIA-608 data. Default is @code{29}.
@item mac
Set minimal acceptable amplitude change for sync codes detection.
Default is @code{0.2}. Allowed range is @code{[0.001 - 1]}.
@item spw
Set the ratio of width reserved for sync code detection.
Default is @code{0.27}. Allowed range is @code{[0.01 - 0.7]}.
@item mhd
Set the max peaks height difference for sync code detection.
Default is @code{0.1}. Allowed range is @code{[0.0 - 0.5]}.
@item mpd
Set max peaks period difference for sync code detection.
Default is @code{0.1}. Allowed range is @code{[0.0 - 0.5]}.
@item msd
Set the first two max start code bits differences.
Default is @code{0.02}. Allowed range is @code{[0.0 - 0.5]}.
@item bhd
Set the minimum ratio of bits height compared to 3rd start code bit.
Default is @code{0.75}. Allowed range is @code{[0.01 - 1]}.
@item th_w
Set the white color threshold. Default is @code{0.35}. Allowed range is @code{[0.1 - 1]}.
@item th_b
Set the black color threshold. Default is @code{0.15}. Allowed range is @code{[0.0 - 0.5]}.
Default is @code{0.27}. Allowed range is @code{[0.1 - 0.7]}.
@item chp
Enable checking the parity bit. In the event of a parity error, the filter will output
@code{0x00} for that character. Default is false.
@item lp
Lowpass lines prior to further processing. Default is disabled.
Lowpass lines prior to further processing. Default is enabled.
@end table
@subsection Examples
@ -16058,6 +16139,19 @@ pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1.
@item ovsub
horizontal and vertical output chroma subsample values. For example for the
pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1.
@item n
The (sequential) number of the input frame, starting from 0.
Only available with @code{eval=frame}.
@item t
The presentation timestamp of the input frame, expressed as a number of
seconds. Only available with @code{eval=frame}.
@item pos
The position (byte offset) of the frame in the input stream, or NaN if
this information is unavailable and/or meaningless (for example in case of synthetic video).
Only available with @code{eval=frame}.
@end table
@subsection Examples
@ -16281,6 +16375,19 @@ The main input video's display aspect ratio. Calculated from
The main input video's horizontal and vertical chroma subsample values.
For example for the pixel format "yuv422p" @var{hsub} is 2 and @var{vsub}
is 1.
@item main_n
The (sequential) number of the main input frame, starting from 0.
Only available with @code{eval=frame}.
@item main_t
The presentation timestamp of the main input frame, expressed as a number of
seconds. Only available with @code{eval=frame}.
@item main_pos
The position (byte offset) of the frame in the main input stream, or NaN if
this information is unavailable and/or meaningless (for example in case of synthetic video).
Only available with @code{eval=frame}.
@end table
@subsection Examples
@ -16299,6 +16406,19 @@ Scale a logo to 1/10th the height of a video, while preserving its display aspec
@end example
@end itemize
@subsection Commands
This filter supports the following commands:
@table @option
@item width, w
@item height, h
Set the output video dimension expression.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@end table
@section scroll
Scroll input video horizontally and/or vertically by constant speed.
@ -16723,6 +16843,15 @@ The Adler-32 checksum (printed in hexadecimal) of all the planes of the input fr
@item plane_checksum
The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
expressed in the form "[@var{c0} @var{c1} @var{c2} @var{c3}]".
@item mean
The mean value of pixels in each plane of the input frame, expressed in the form
"[@var{mean0} @var{mean1} @var{mean2} @var{mean3}]".
@item stdev
The standard deviation of pixel values in each plane of the input frame, expressed
in the form "[@var{stdev0} @var{stdev1} @var{stdev2} @var{stdev3}]".
@end table
@section showpalette
@ -17190,6 +17319,15 @@ option may cause flicker since the B-Frames have often larger QP. Default is
@code{0} (not enabled).
@end table
@subsection Commands
This filter supports the following commands:
@table @option
@item level
@item quality
Same as quality option. And the command accepts the @code{max} same as the @code{6}.
@end table
@section sr
Scale the input by applying one of the super-resolution methods based on
@ -17740,6 +17878,61 @@ PAL output (25i):
16p: 33333334
@end example
@section thistogram
Compute and draw a color distribution histogram for the input video across time.
Unlike @ref{histogram} video filter which only shows histogram of single input frame
at certain time, this filter shows also past histograms of number of frames defined
by @code{width} option.
The computed histogram is a representation of the color component
distribution in an image.
The filter accepts the following options:
@table @option
@item width, w
Set width of single color component output. Default value is @code{0}.
Value of @code{0} means width will be picked from input video.
This also set number of passed histograms to keep.
Allowed range is [0, 8192].
@item display_mode, d
Set display mode.
It accepts the following values:
@table @samp
@item stack
Per color component graphs are placed below each other.
@item parade
Per color component graphs are placed side by side.
@item overlay
Presents information identical to that in the @code{parade}, except
that the graphs representing color components are superimposed directly
over one another.
@end table
Default is @code{stack}.
@item levels_mode, m
Set mode. Can be either @code{linear}, or @code{logarithmic}.
Default is @code{linear}.
@item components, c
Set what color components to display.
Default is @code{7}.
@item bgopacity, b
Set background opacity. Default is @code{0.9}.
@item envelope, e
Show envelope. Default is disabled.
@item ecolor, ec
Set envelope color. Default is @code{gold}.
@end table
@section threshold
Apply threshold effect to video stream.
@ -18119,10 +18312,12 @@ Enable complex vertical low-pass filtering.
This will slightly less reduce interlace 'twitter' and Moire
patterning but better retain detail and subjective sharpness impression.
@item bypass_il
Bypass already interlaced frames, only adjust the frame rate.
@end table
Vertical low-pass filtering can only be enabled for @option{mode}
@var{interleave_top} and @var{interleave_bottom}.
Vertical low-pass filtering and bypassing already interlaced frames can only be
enabled for @option{mode} @var{interleave_top} and @var{interleave_bottom}.
@end table
@ -18938,6 +19133,7 @@ Set vectorscope mode.
It accepts the following values:
@table @samp
@item gray
@item tint
Gray values are displayed on graph, higher brightness means more pixels have
same component color value on location in graph. This is the default mode.
@ -18996,6 +19192,7 @@ Set what kind of graticule to draw.
@item none
@item green
@item color
@item invert
@end table
@item opacity, o
@ -19040,6 +19237,11 @@ Set what kind of colorspace to use when drawing graticule.
@item 709
@end table
Default is auto.
@item tint0, t0
@item tint1, t1
Set color tint for gray/tint vectorscope mode. By default both options are zero.
This means no tint, and output will remain gray.
@end table
@anchor{vidstabdetect}
@ -19328,6 +19530,10 @@ If @code{intensity} is negative and this is set to 1, colors will change,
otherwise colors will be less saturated, more towards gray.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@anchor{vignette}
@section vignette
@ -19671,6 +19877,12 @@ Default is digital.
@item bgopacity, b
Set background opacity.
@item tint0, t0
@item tint1, t1
Set tint for output.
Only used with lowpass filter and when display is not overlay and input
pixel formats are not RGB.
@end table
@section weave, doubleweave
@ -20344,7 +20556,17 @@ horizontal and vertical output chroma subsample values. For example for the
pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1.
@end table
@subsection Commands
This filter supports the following commands:
@table @option
@item width, w
@item height, h
Set the output video dimension expression.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@end table
@c man end VIDEO FILTERS
@ -21034,6 +21256,65 @@ Apply a strong blur of both luma and chroma parameters:
@c man end OPENCL VIDEO FILTERS
@chapter VAAPI Video Filters
@c man begin VAAPI VIDEO FILTERS
VAAPI Video filters are usually used with VAAPI decoder and VAAPI encoder. Below is a description of VAAPI video filters.
To enable compilation of these filters you need to configure FFmpeg with
@code{--enable-vaapi}.
To use vaapi filters, you need to setup the vaapi device correctly. For more information, please read @url{https://trac.ffmpeg.org/wiki/Hardware/VAAPI}
@section tonemap_vappi
Perform HDR(High Dynamic Range) to SDR(Standard Dynamic Range) conversion with tone-mapping.
It maps the dynamic range of HDR10 content to the SDR content.
It currently only accepts HDR10 as input.
It accepts the following parameters:
@table @option
@item format
Specify the output pixel format.
Currently supported formats are:
@table @var
@item p010
@item nv12
@end table
Default is nv12.
@item primaries, p
Set the output color primaries.
Default is same as input.
@item transfer, t
Set the output transfer characteristics.
Default is bt709.
@item matrix, m
Set the output colorspace matrix.
Default is same as input.
@end table
@subsection Example
@itemize
@item
Convert HDR(HDR10) video to bt2020-transfer-characteristic p010 format
@example
tonemap_vaapi=format=p010:t=bt2020-10
@end example
@end itemize
@c man end VAAPI VIDEO FILTERS
@chapter Video Sources
@c man begin VIDEO SOURCES
@ -21076,9 +21357,9 @@ Specify the frame rate expected for the video stream.
The sample (pixel) aspect ratio of the input video.
@item sws_param
Specify the optional parameters to be used for the scale filter which
is automatically inserted when an input change is detected in the
input size or format.
This option is deprecated and ignored. Prepend @code{sws_flags=@var{flags};}
to the filtergraph description to specify swscale flags for automatically
inserted scalers. See @ref{Filtergraph syntax}.
@item hw_frames_ctx
When using a hardware pixel format, this should be a reference to an
@ -21103,7 +21384,7 @@ buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
Alternatively, the options can be specified as a flat string, but this
syntax is deprecated:
@var{width}:@var{height}:@var{pix_fmt}:@var{time_base.num}:@var{time_base.den}:@var{pixel_aspect.num}:@var{pixel_aspect.den}[:@var{sws_param}]
@var{width}:@var{height}:@var{pix_fmt}:@var{time_base.num}:@var{time_base.den}:@var{pixel_aspect.num}:@var{pixel_aspect.den}
@section cellauto
@ -22516,6 +22797,9 @@ plain filename any writable url can be specified. Filename ``-'' is a shorthand
for standard output. If @code{file} option is not set, output is written to the log
with AV_LOG_INFO loglevel.
@item direct
Reduces buffering in print mode when output is written to a URL set using @var{file}.
@end table
@subsection Examples

View File

@ -814,11 +814,13 @@ following image formats are supported:
@item Autodesk RLE @tab @tab X
@tab fourcc: AASC
@item AV1 @tab E @tab E
@tab Supported through external libraries libaom and libdav1d
@tab Supported through external libraries libaom, libdav1d and librav1e
@item Avid 1:1 10-bit RGB Packer @tab X @tab X
@tab fourcc: AVrp
@item AVS (Audio Video Standard) video @tab @tab X
@tab Video encoding used by the Creature Shock game.
@item AVS2-P2/IEEE1857.4 @tab E @tab E
@tab Supported through external libraries libxavs2 and libdavs2
@item AYUV @tab X @tab X
@tab Microsoft uncompressed packed 4:4:4:4
@item Beam Software VB @tab @tab X

View File

@ -277,8 +277,8 @@ audio track.
@item list_devices
If set to @option{true}, print a list of devices and exit.
Defaults to @option{false}. Alternatively you can use the @code{-sources}
option of ffmpeg to list the available input devices.
Defaults to @option{false}. This option is deprecated, please use the
@code{-sources} option of ffmpeg to list the available input devices.
@item list_formats
If set to @option{true}, print a list of supported formats and exit.
@ -292,11 +292,6 @@ as @option{pal} (3 letters).
Default behavior is autodetection of the input video format, if the hardware
supports it.
@item bm_v210
This is a deprecated option, you can use @option{raw_format} instead.
If set to @samp{1}, video is captured in 10 bit v210 instead
of uyvy422. Not all Blackmagic devices support this option.
@item raw_format
Set the pixel format of the captured video.
Available values are:
@ -412,7 +407,7 @@ Defaults to @option{false}.
@item
List input devices:
@example
ffmpeg -f decklink -list_devices 1 -i dummy
ffmpeg -sources decklink
@end example
@item
@ -430,7 +425,7 @@ ffmpeg -format_code Hi50 -f decklink -i 'Intensity Pro' -c:a copy -c:v copy outp
@item
Capture video clip at 1080i50 10 bit:
@example
ffmpeg -bm_v210 1 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
ffmpeg -raw_format yuv422p10 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
@end example
@item
@ -1532,7 +1527,7 @@ ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_siz
@end example
@item video_size
Set the video frame size. Default value is @code{vga}.
Set the video frame size. Default is the full desktop.
@item grab_x
@item grab_y

View File

@ -236,8 +236,10 @@ This is a deprecated option to set the segment length in microseconds, use @var{
@item seg_duration @var{duration}
Set the segment length in seconds (fractional value can be set). The value is
treated as average segment duration when @var{use_template} is enabled and
@var{use_timeline} is disabled and as minimum segment duration for all the other
use cases.
@item frag_duration @var{duration}
Set the length in seconds of fragments within segments (fractional value can be set).
@item frag_type @var{type}
Set the type of interval for fragmentation.
@item window_size @var{size}
Set the maximum number of segments kept in the manifest.
@item extra_window_size @var{size}
@ -278,9 +280,12 @@ To map all video (or audio) streams to an AdaptationSet, "v" (or "a") can be use
When no assignment is defined, this defaults to an AdaptationSet for each stream.
Optional syntax is "id=x,descriptor=descriptor_string,streams=a,b,c id=y,streams=d,e" and so on, descriptor is useful to the scheme defined by ISO/IEC 23009-1:2014/Amd.2:2015.
Optional syntax is "id=x,seg_duration=x,frag_duration=x,frag_type=type,descriptor=descriptor_string,streams=a,b,c id=y,seg_duration=y,frag_type=type,streams=d,e" and so on,
descriptor is useful to the scheme defined by ISO/IEC 23009-1:2014/Amd.2:2015.
For example, -adaptation_sets "id=0,descriptor=<SupplementalProperty schemeIdUri=\"urn:mpeg:dash:srd:2014\" value=\"0,0,0,1,1,2,2\"/>,streams=v".
Please note that descriptor string should be a self-closing xml tag.
seg_duration, frag_duration and frag_type override the global option values for each adaptation set.
For example, -adaptation_sets "id=0,seg_duration=2,frag_duration=1,frag_type=duration,streams=v id=1,seg_duration=2,frag_type=none,streams=a"
@item timeout @var{timeout}
Set timeout for socket I/O operations. Applicable only for HTTP output.
@item index_correction @var{index_correction}
@ -326,9 +331,26 @@ This option will also try to comply with the above open spec, till Apple's spec
Applicable only when @var{streaming} and @var{hls_playlist} options are enabled.
This is an experimental feature.
@item ldash @var{ldash}
Enable Low-latency Dash by constraining the presence and values of some elements.
@item master_m3u8_publish_rate @var{master_m3u8_publish_rate}
Publish master playlist repeatedly every after specified number of segment intervals.
@item -write_prft @var{write_prft}
Write Producer Reference Time elements on supported streams. This also enables writing
prft boxes in the underlying muxer. Applicable only when the @var{utc_url} option is enabled.
@item -mpd_profile @var{mpd_profile}
Set one or more manifest profiles.
@item -http_opts @var{http_opts}
List of options to pass to the underlying HTTP protocol. Applicable only for HTTP output.
@item -target_latency @var{target_latency}
Set an intended target latency in seconds (fractional value can be set) for serving. Applicable only when @var{streaming} and @var{write_prft} options are enabled.
This is an informative fields clients can use to measure the latency of the service.
@end table
@anchor{framecrc}
@ -1169,6 +1191,32 @@ The pattern "img%%-%d.jpg" will specify a sequence of filenames of the
form @file{img%-1.jpg}, @file{img%-2.jpg}, ..., @file{img%-10.jpg},
etc.
The image muxer supports the .Y.U.V image file format. This format is
special in that that each image frame consists of three files, for
each of the YUV420P components. To read or write this image file format,
specify the name of the '.Y' file. The muxer will automatically open the
'.U' and '.V' files as required.
@subsection Options
@table @option
@item frame_pts
If set to 1, expand the filename with pts from pkt->pts.
Default value is 0.
@item start_number
Start the sequence from the specified number. Default value is 1.
@item update
If set to 1, the filename will always be interpreted as just a
filename, not a pattern, and the corresponding file will be continuously
overwritten with new images. Default value is 0.
@item strftime
If set to 1, expand the filename with date and time information from
@code{strftime()}. Default value is 0.
@end table
@subsection Examples
The following example shows how to use @command{ffmpeg} for creating a
@ -1209,32 +1257,6 @@ You can set the file name with current frame's PTS:
ffmpeg -f v4l2 -r 1 -i /dev/video0 -copyts -f image2 -frame_pts true %d.jpg"
@end example
@subsection Options
@table @option
@item frame_pts
If set to 1, expand the filename with pts from pkt->pts.
Default value is 0.
@item start_number
Start the sequence from the specified number. Default value is 1.
@item update
If set to 1, the filename will always be interpreted as just a
filename, not a pattern, and the corresponding file will be continuously
overwritten with new images. Default value is 0.
@item strftime
If set to 1, expand the filename with date and time information from
@code{strftime()}. Default value is 0.
@end table
The image muxer supports the .Y.U.V image file format. This format is
special in that that each image frame consists of three files, for
each of the YUV420P components. To read or write this image file format,
specify the name of the '.Y' file. The muxer will automatically open the
'.U' and '.V' files as required.
@section matroska
Matroska container muxer.

View File

@ -140,8 +140,8 @@ device with @command{-list_formats 1}. Audio sample rate is always 48 kHz.
@item list_devices
If set to @option{true}, print a list of devices and exit.
Defaults to @option{false}. Alternatively you can use the @code{-sinks}
option of ffmpeg to list the available output devices.
Defaults to @option{false}. This option is deprecated, please use the
@code{-sinks} option of ffmpeg to list the available output devices.
@item list_formats
If set to @option{true}, print a list of supported formats and exit.
@ -168,7 +168,7 @@ Defaults to @samp{unset}.
@item
List output devices:
@example
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
ffmpeg -sinks decklink
@end example
@item

View File

@ -119,7 +119,7 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
void init_dynload(void)
{
#ifdef _WIN32
#if HAVE_SETDLLDIRECTORY && defined(_WIN32)
/* Calling SetDllDirectory with the empty string (but not NULL) removes the
* current working directory from the DLL search path as a security pre-caution. */
SetDllDirectory("");
@ -182,7 +182,7 @@ void show_help_options(const OptionDef *options, const char *msg, int req_flags,
first = 1;
for (po = options; po->name; po++) {
char buf[64];
char buf[128];
if (((po->flags & req_flags) != req_flags) ||
(alt_flags && !(po->flags & alt_flags)) ||
@ -2039,7 +2039,7 @@ FILE *get_preset_file(char *filename, size_t filename_size,
av_strlcpy(filename, preset_name, filename_size);
f = fopen(filename, "r");
} else {
#ifdef _WIN32
#if HAVE_GETMODULEHANDLE && defined(_WIN32)
char datadir[MAX_PATH], *ls;
base[2] = NULL;

View File

@ -1268,7 +1268,8 @@ static void do_video_out(OutputFile *of,
ost->forced_keyframes_expr_const_values[FKF_N] += 1;
} else if ( ost->forced_keyframes
&& !strncmp(ost->forced_keyframes, "source", 6)
&& in_picture->key_frame==1) {
&& in_picture->key_frame==1
&& !i) {
forced_keyframe = 1;
}
@ -3404,10 +3405,6 @@ static int init_output_stream_encode(OutputStream *ost)
av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
"Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
}
for (j = 0; j < ost->forced_kf_count; j++)
ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
AV_TIME_BASE_Q,
enc_ctx->time_base);
enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
@ -3599,12 +3596,14 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
int i;
for (i = 0; i < ist->st->nb_side_data; i++) {
AVPacketSideData *sd = &ist->st->side_data[i];
uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
if (!dst)
return AVERROR(ENOMEM);
memcpy(dst, sd->data, sd->size);
if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
av_display_rotation_set((uint32_t *)dst, 0);
if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
if (!dst)
return AVERROR(ENOMEM);
memcpy(dst, sd->data, sd->size);
if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
av_display_rotation_set((uint32_t *)dst, 0);
}
}
}

View File

@ -786,10 +786,9 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
av_bprintf(&args,
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
"pixel_aspect=%d/%d:sws_param=flags=%d",
"pixel_aspect=%d/%d",
ifilter->width, ifilter->height, ifilter->format,
tb.num, tb.den, sar.num, sar.den,
SWS_BILINEAR + ((ist->dec_ctx->flags&AV_CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
tb.num, tb.den, sar.num, sar.den);
if (fr.num && fr.den)
av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,

View File

@ -3202,7 +3202,7 @@ void show_help_default(const char *opt, const char *arg)
OPT_EXIT, 0, 0);
show_help_options(options, "Global options (affect whole program "
"instead of just one file:",
"instead of just one file):",
0, per_file | OPT_EXIT | OPT_EXPERT, 0);
if (show_advanced)
show_help_options(options, "Advanced global options:", OPT_EXPERT,

View File

@ -254,6 +254,7 @@ static const OptionDef *options;
/* FFprobe context */
static const char *input_filename;
static const char *print_input_filename;
static AVInputFormat *iformat = NULL;
static struct AVHashContext *hash;
@ -2836,7 +2837,8 @@ static void show_error(WriterContext *w, int err)
writer_print_section_footer(w);
}
static int open_input_file(InputFile *ifile, const char *filename)
static int open_input_file(InputFile *ifile, const char *filename,
const char *print_filename)
{
int err, i;
AVFormatContext *fmt_ctx = NULL;
@ -2858,6 +2860,10 @@ static int open_input_file(InputFile *ifile, const char *filename)
print_error(filename, err);
return err;
}
if (print_filename) {
av_freep(&fmt_ctx->url);
fmt_ctx->url = av_strdup(print_filename);
}
ifile->fmt_ctx = fmt_ctx;
if (scan_all_pmts_set)
av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
@ -2971,7 +2977,8 @@ static void close_input_file(InputFile *ifile)
avformat_close_input(&ifile->fmt_ctx);
}
static int probe_file(WriterContext *wctx, const char *filename)
static int probe_file(WriterContext *wctx, const char *filename,
const char *print_filename)
{
InputFile ifile = { 0 };
int ret, i;
@ -2980,7 +2987,7 @@ static int probe_file(WriterContext *wctx, const char *filename)
do_read_frames = do_show_frames || do_count_frames;
do_read_packets = do_show_packets || do_count_packets;
ret = open_input_file(&ifile, filename);
ret = open_input_file(&ifile, filename, print_filename);
if (ret < 0)
goto end;
@ -3286,6 +3293,12 @@ static int opt_input_file_i(void *optctx, const char *opt, const char *arg)
return 0;
}
static int opt_print_filename(void *optctx, const char *opt, const char *arg)
{
print_input_filename = arg;
return 0;
}
void show_help_default(const char *opt, const char *arg)
{
av_log_set_callback(log_callback_help);
@ -3544,6 +3557,7 @@ static const OptionDef real_options[] = {
{ "read_intervals", HAS_ARG, {.func_arg = opt_read_intervals}, "set read intervals", "read_intervals" },
{ "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {.func_arg = opt_default}, "generic catch all option", "" },
{ "i", HAS_ARG, {.func_arg = opt_input_file_i}, "read specified file", "input_file"},
{ "print_filename", HAS_ARG, {.func_arg = opt_print_filename}, "override the printed input filename", "print_file"},
{ "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
"read and decode the streams to fill missing information with heuristics" },
{ NULL, },
@ -3692,7 +3706,7 @@ int main(int argc, char **argv)
av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name);
ret = AVERROR(EINVAL);
} else if (input_filename) {
ret = probe_file(wctx, input_filename);
ret = probe_file(wctx, input_filename, print_input_filename);
if (ret < 0 && do_show_error)
show_error(wctx, ret);
}

View File

@ -140,8 +140,8 @@ static av_cold int adpcm_decode_init(AVCodecContext * avctx)
break;
case AV_CODEC_ID_ADPCM_IMA_APC:
if (avctx->extradata && avctx->extradata_size >= 8) {
c->status[0].predictor = AV_RL32(avctx->extradata);
c->status[1].predictor = AV_RL32(avctx->extradata + 4);
c->status[0].predictor = av_clip_intp2(AV_RL32(avctx->extradata ), 18);
c->status[1].predictor = av_clip_intp2(AV_RL32(avctx->extradata + 4), 18);
}
break;
case AV_CODEC_ID_ADPCM_IMA_WS:
@ -441,7 +441,7 @@ static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
d = in[16+i+j*4];
t = sign_extend(d, 4);
s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
s_2 = s_1;
s_1 = av_clip_int16(s);
out0[j] = s_1;
@ -468,7 +468,7 @@ static int xa_decode(AVCodecContext *avctx, int16_t *out0, int16_t *out1,
d = in[16+i+j*4];
t = sign_extend(d >> 4, 4);
s = ( t<<shift ) + ((s_1*f0 + s_2*f1+32)>>6);
s = t*(1<<shift) + ((s_1*f0 + s_2*f1+32)>>6);
s_2 = s_1;
s_1 = av_clip_int16(s);
out1[j] = s_1;
@ -1233,7 +1233,7 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
}
for (i=0; i<=st; i++) {
c->status[i].predictor = bytestream2_get_le32u(&gb);
if (FFABS(c->status[i].predictor) > (1<<16))
if (FFABS((int64_t)c->status[i].predictor) > (1<<16))
return AVERROR_INVALIDDATA;
}

View File

@ -423,8 +423,8 @@ static int decode_inter_plane(AGMContext *s, GetBitContext *gb, int size,
int map = s->map[x];
if (orig_mv_x >= -32) {
if (y * 8 + mv_y < 0 || y * 8 + mv_y >= h ||
x * 8 + mv_x < 0 || x * 8 + mv_x >= w)
if (y * 8 + mv_y < 0 || y * 8 + mv_y + 8 >= h ||
x * 8 + mv_x < 0 || x * 8 + mv_x + 8 >= w)
return AVERROR_INVALIDDATA;
copy_block8(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,

View File

@ -228,7 +228,7 @@ static void lpc_prediction(int32_t *error_buffer, uint32_t *buffer_out,
sign = sign_only(val) * error_sign;
lpc_coefs[j] -= sign;
val *= (unsigned)sign;
error_val -= (val >> lpc_quant) * (j + 1);
error_val -= (val >> lpc_quant) * (j + 1U);
}
}
}

View File

@ -776,6 +776,7 @@ extern AVCodec ff_mpeg2_qsv_encoder;
extern AVCodec ff_mpeg2_vaapi_encoder;
extern AVCodec ff_mpeg4_cuvid_decoder;
extern AVCodec ff_mpeg4_mediacodec_decoder;
extern AVCodec ff_mpeg4_omx_encoder;
extern AVCodec ff_mpeg4_v4l2m2m_encoder;
extern AVCodec ff_vc1_cuvid_decoder;
extern AVCodec ff_vp8_cuvid_decoder;

View File

@ -496,6 +496,7 @@ static inline int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb,
x = (overflow << rice->k) + get_bits(gb, rice->k);
} else {
av_log(ctx->avctx, AV_LOG_ERROR, "Too many bits: %"PRIu32"\n", rice->k);
ctx->error = 1;
return AVERROR_INVALIDDATA;
}
rice->ksum += x - (rice->ksum + 8 >> 4);

View File

@ -105,7 +105,7 @@ int ff_ass_add_rect(AVSubtitle *sub, const char *dialog,
char *ass_str;
AVSubtitleRect **rects;
rects = av_realloc_array(sub->rects, (sub->num_rects+1), sizeof(*sub->rects));
rects = av_realloc_array(sub->rects, sub->num_rects+1, sizeof(*sub->rects));
if (!rects)
return AVERROR(ENOMEM);
sub->rects = rects;

View File

@ -223,8 +223,18 @@ static inline int parse_band_ext(ATRAC9Context *s, ATRAC9BlockData *b,
b->channel[0].band_ext = get_bits(gb, 2);
b->channel[0].band_ext = ext_band > 2 ? b->channel[0].band_ext : 4;
if (!get_bits(gb, 5))
if (!get_bits(gb, 5)) {
for (int i = 0; i <= stereo; i++) {
ATRAC9ChannelData *c = &b->channel[i];
const int count = at9_tab_band_ext_cnt[c->band_ext][ext_band];
for (int j = 0; j < count; j++) {
int len = at9_tab_band_ext_lengths[c->band_ext][ext_band][j];
c->band_ext_data[j] = av_clip_uintp2_c(c->band_ext_data[j], len);
}
}
return 0;
}
for (int i = 0; i <= stereo; i++) {
ATRAC9ChannelData *c = &b->channel[i];

View File

@ -656,6 +656,7 @@ enum AVCodecID {
AV_CODEC_ID_ATRAC9,
AV_CODEC_ID_HCOM,
AV_CODEC_ID_ACELP_KELVIN,
AV_CODEC_ID_MPEGH_3D_AUDIO,
/* subtitle codecs */
AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
@ -1175,6 +1176,11 @@ typedef struct AVCPBProperties {
uint64_t vbv_delay;
} AVCPBProperties;
typedef struct AVProducerReferenceTime {
int64_t wallclock;
int flags;
} AVProducerReferenceTime;
/**
* The decoder will keep a reference to the frame and may reuse it later.
*/
@ -1409,6 +1415,11 @@ enum AVPacketSideDataType {
*/
AV_PKT_DATA_AFD,
/**
* Producer Reference Time data corresponding to the AVProducerReferenceTime struct.
*/
AV_PKT_DATA_PRFT,
/**
* The number of side data types.
* This is not part of the public API/ABI in the sense that it may

View File

@ -741,3 +741,25 @@ int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, i
return 0;
}
int ff_side_data_set_prft(AVPacket *pkt, int64_t timestamp)
{
AVProducerReferenceTime *prft;
uint8_t *side_data;
int side_data_size;
side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_PRFT, &side_data_size);
if (!side_data) {
side_data_size = sizeof(AVProducerReferenceTime);
side_data = av_packet_new_side_data(pkt, AV_PKT_DATA_PRFT, side_data_size);
}
if (!side_data || side_data_size < sizeof(AVProducerReferenceTime))
return AVERROR(ENOMEM);
prft = (AVProducerReferenceTime *)side_data;
prft->wallclock = timestamp;
prft->flags = 0;
return 0;
}

View File

@ -493,7 +493,7 @@ int ff_bgmc_decode_init(GetBitContext *gb, unsigned int *h,
*h = TOP_VALUE;
*l = 0;
*v = get_bits_long(gb, VALUE_BITS);
*v = get_bits(gb, VALUE_BITS);
return 0;
}

View File

@ -153,7 +153,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
static float get_float(GetBitContext *gb)
{
int power = get_bits(gb, 5);
float f = ldexpf(get_bits_long(gb, 23), power - 23);
float f = ldexpf(get_bits(gb, 23), power - 23);
if (get_bits1(gb))
f = -f;
return f;

View File

@ -82,6 +82,7 @@ const AVClass *av_bsf_get_class(void)
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
{
AVBSFContext *ctx;
AVBSFInternal *bsfi;
int ret;
ctx = av_mallocz(sizeof(*ctx));
@ -98,14 +99,15 @@ int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **pctx)
goto fail;
}
ctx->internal = av_mallocz(sizeof(*ctx->internal));
if (!ctx->internal) {
bsfi = av_mallocz(sizeof(*bsfi));
if (!bsfi) {
ret = AVERROR(ENOMEM);
goto fail;
}
ctx->internal = bsfi;
ctx->internal->buffer_pkt = av_packet_alloc();
if (!ctx->internal->buffer_pkt) {
bsfi->buffer_pkt = av_packet_alloc();
if (!bsfi->buffer_pkt) {
ret = AVERROR(ENOMEM);
goto fail;
}
@ -175,9 +177,11 @@ int av_bsf_init(AVBSFContext *ctx)
void av_bsf_flush(AVBSFContext *ctx)
{
ctx->internal->eof = 0;
AVBSFInternal *bsfi = ctx->internal;
av_packet_unref(ctx->internal->buffer_pkt);
bsfi->eof = 0;
av_packet_unref(bsfi->buffer_pkt);
if (ctx->filter->flush)
ctx->filter->flush(ctx);
@ -185,26 +189,27 @@ void av_bsf_flush(AVBSFContext *ctx)
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
{
AVBSFInternal *bsfi = ctx->internal;
int ret;
if (!pkt || (!pkt->data && !pkt->side_data_elems)) {
ctx->internal->eof = 1;
bsfi->eof = 1;
return 0;
}
if (ctx->internal->eof) {
if (bsfi->eof) {
av_log(ctx, AV_LOG_ERROR, "A non-NULL packet sent after an EOF.\n");
return AVERROR(EINVAL);
}
if (ctx->internal->buffer_pkt->data ||
ctx->internal->buffer_pkt->side_data_elems)
if (bsfi->buffer_pkt->data ||
bsfi->buffer_pkt->side_data_elems)
return AVERROR(EAGAIN);
ret = av_packet_make_refcounted(pkt);
if (ret < 0)
return ret;
av_packet_move_ref(ctx->internal->buffer_pkt, pkt);
av_packet_move_ref(bsfi->buffer_pkt, pkt);
return 0;
}
@ -216,38 +221,38 @@ int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
int ff_bsf_get_packet(AVBSFContext *ctx, AVPacket **pkt)
{
AVBSFInternal *in = ctx->internal;
AVBSFInternal *bsfi = ctx->internal;
AVPacket *tmp_pkt;
if (in->eof)
if (bsfi->eof)
return AVERROR_EOF;
if (!ctx->internal->buffer_pkt->data &&
!ctx->internal->buffer_pkt->side_data_elems)
if (!bsfi->buffer_pkt->data &&
!bsfi->buffer_pkt->side_data_elems)
return AVERROR(EAGAIN);
tmp_pkt = av_packet_alloc();
if (!tmp_pkt)
return AVERROR(ENOMEM);
*pkt = ctx->internal->buffer_pkt;
ctx->internal->buffer_pkt = tmp_pkt;
*pkt = bsfi->buffer_pkt;
bsfi->buffer_pkt = tmp_pkt;
return 0;
}
int ff_bsf_get_packet_ref(AVBSFContext *ctx, AVPacket *pkt)
{
AVBSFInternal *in = ctx->internal;
AVBSFInternal *bsfi = ctx->internal;
if (in->eof)
if (bsfi->eof)
return AVERROR_EOF;
if (!ctx->internal->buffer_pkt->data &&
!ctx->internal->buffer_pkt->side_data_elems)
if (!bsfi->buffer_pkt->data &&
!bsfi->buffer_pkt->side_data_elems)
return AVERROR(EAGAIN);
av_packet_move_ref(pkt, ctx->internal->buffer_pkt);
av_packet_move_ref(pkt, bsfi->buffer_pkt);
return 0;
}
@ -517,8 +522,8 @@ static int bsf_parse_single(const char *str, AVBSFList *bsf_lst)
ret = av_bsf_list_append2(bsf_lst, bsf_name, &bsf_options);
av_dict_free(&bsf_options);
end:
av_dict_free(&bsf_options);
av_free(buf);
return ret;
}

View File

@ -105,7 +105,7 @@ typedef struct AV1RawSequenceHeader {
uint8_t use_128x128_superblock;
uint8_t enable_filter_intra;
uint8_t enable_intra_edge_filter;
uint8_t enable_intraintra_compound;
uint8_t enable_interintra_compound;
uint8_t enable_masked_compound;
uint8_t enable_warped_motion;
uint8_t enable_dual_filter;
@ -256,20 +256,20 @@ typedef struct AV1RawFrameHeader {
uint8_t update_grain;
uint8_t film_grain_params_ref_idx;
uint8_t num_y_points;
uint8_t point_y_value[16];
uint8_t point_y_scaling[16];
uint8_t point_y_value[14];
uint8_t point_y_scaling[14];
uint8_t chroma_scaling_from_luma;
uint8_t num_cb_points;
uint8_t point_cb_value[16];
uint8_t point_cb_scaling[16];
uint8_t point_cb_value[10];
uint8_t point_cb_scaling[10];
uint8_t num_cr_points;
uint8_t point_cr_value[16];
uint8_t point_cr_scaling[16];
uint8_t point_cr_value[10];
uint8_t point_cr_scaling[10];
uint8_t grain_scaling_minus_8;
uint8_t ar_coeff_lag;
uint8_t ar_coeffs_y_plus_128[24];
uint8_t ar_coeffs_cb_plus_128[24];
uint8_t ar_coeffs_cr_plus_128[24];
uint8_t ar_coeffs_cb_plus_128[25];
uint8_t ar_coeffs_cr_plus_128[25];
uint8_t ar_coeff_shift_minus_6;
uint8_t grain_scale_shift;
uint8_t cb_mult;

View File

@ -268,7 +268,7 @@ static int FUNC(sequence_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
flag(enable_intra_edge_filter);
if (current->reduced_still_picture_header) {
infer(enable_intraintra_compound, 0);
infer(enable_interintra_compound, 0);
infer(enable_masked_compound, 0);
infer(enable_warped_motion, 0);
infer(enable_dual_filter, 0);
@ -281,7 +281,7 @@ static int FUNC(sequence_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
infer(seq_force_integer_mv,
AV1_SELECT_INTEGER_MV);
} else {
flag(enable_intraintra_compound);
flag(enable_interintra_compound);
flag(enable_masked_compound);
flag(enable_warped_motion);
flag(enable_dual_filter);
@ -1155,7 +1155,7 @@ static int FUNC(film_grain_params)(CodedBitstreamContext *ctx, RWContext *rw,
return 0;
}
fb(4, num_y_points);
fc(4, num_y_points, 0, 14);
for (i = 0; i < current->num_y_points; i++) {
fbs(8, point_y_value[i], 1, i);
fbs(8, point_y_scaling[i], 1, i);
@ -1174,12 +1174,12 @@ static int FUNC(film_grain_params)(CodedBitstreamContext *ctx, RWContext *rw,
infer(num_cb_points, 0);
infer(num_cr_points, 0);
} else {
fb(4, num_cb_points);
fc(4, num_cb_points, 0, 10);
for (i = 0; i < current->num_cb_points; i++) {
fbs(8, point_cb_value[i], 1, i);
fbs(8, point_cb_scaling[i], 1, i);
}
fb(4, num_cr_points);
fc(4, num_cr_points, 0, 10);
for (i = 0; i < current->num_cr_points; i++) {
fbs(8, point_cr_value[i], 1, i);
fbs(8, point_cr_scaling[i], 1, i);

View File

@ -568,7 +568,10 @@ static int cbs_h2645_fragment_add_nals(CodedBitstreamContext *ctx,
// Remove trailing zeroes.
while (size > 0 && nal->data[size - 1] == 0)
--size;
av_assert0(size > 0);
if (size == 0) {
av_log(ctx->log_ctx, AV_LOG_VERBOSE, "Discarding empty 0 NAL unit\n");
continue;
}
ref = (nal->data == nal->raw_data) ? frag->data_ref
: packet->rbsp.rbsp_buffer_ref;

View File

@ -954,6 +954,7 @@ static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw,
current->payload[k].payload_type = payload_type;
current->payload[k].payload_size = payload_size;
current->payload_count++;
CHECK(FUNC(sei_payload)(ctx, rw, &current->payload[k]));
if (!cbs_h2645_read_more_rbsp_data(rw))
@ -964,7 +965,6 @@ static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw,
"SEI message: found %d.\n", k);
return AVERROR_INVALIDDATA;
}
current->payload_count = k + 1;
#else
for (k = 0; k < current->payload_count; k++) {
PutBitContext start_state;

View File

@ -2184,6 +2184,7 @@ static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw,
current->payload[k].payload_type = payload_type;
current->payload[k].payload_size = payload_size;
current->payload_count++;
CHECK(FUNC(sei_payload)(ctx, rw, &current->payload[k], prefix));
if (!cbs_h2645_read_more_rbsp_data(rw))
@ -2194,7 +2195,6 @@ static int FUNC(sei)(CodedBitstreamContext *ctx, RWContext *rw,
"SEI message: found %d.\n", k);
return AVERROR_INVALIDDATA;
}
current->payload_count = k + 1;
#else
for (k = 0; k < current->payload_count; k++) {
PutBitContext start_state;

View File

@ -416,6 +416,9 @@ static int cbs_vp9_split_fragment(CodedBitstreamContext *ctx,
uint8_t superframe_header;
int err;
if (frag->data_size == 0)
return AVERROR_INVALIDDATA;
// Last byte in the packet.
superframe_header = frag->data[frag->data_size - 1];
@ -428,6 +431,9 @@ static int cbs_vp9_split_fragment(CodedBitstreamContext *ctx,
index_size = 2 + (((superframe_header & 0x18) >> 3) + 1) *
((superframe_header & 0x07) + 1);
if (index_size > frag->data_size)
return AVERROR_INVALIDDATA;
err = init_get_bits(&gbc, frag->data + frag->data_size - index_size,
8 * index_size);
if (err < 0)

File diff suppressed because it is too large Load Diff

View File

@ -759,7 +759,7 @@ static int decouple_info(COOKContext *q, COOKSubpacket *p, int *decouple_tab)
for (i = 0; i < length; i++)
decouple_tab[start + i] = get_vlc2(&q->gb,
p->channel_coupling.table,
p->channel_coupling.bits, 2);
p->channel_coupling.bits, 3);
else
for (i = 0; i < length; i++) {
int v = get_bits(&q->gb, p->js_vlc_bits);

View File

@ -479,32 +479,32 @@ static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
if(side && side_size>=10) {
avctx->internal->skip_samples = AV_RL32(side) * avctx->internal->skip_samples_multiplier;
avci->skip_samples = AV_RL32(side) * avci->skip_samples_multiplier;
discard_padding = AV_RL32(side + 4);
av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
avctx->internal->skip_samples, (int)discard_padding);
avci->skip_samples, (int)discard_padding);
skip_reason = AV_RL8(side + 8);
discard_reason = AV_RL8(side + 9);
}
if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
got_frame = 0;
}
if (avctx->internal->skip_samples > 0 && got_frame &&
if (avci->skip_samples > 0 && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
if(frame->nb_samples <= avctx->internal->skip_samples){
if(frame->nb_samples <= avci->skip_samples){
got_frame = 0;
avctx->internal->skip_samples -= frame->nb_samples;
avci->skip_samples -= frame->nb_samples;
av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
avctx->internal->skip_samples);
avci->skip_samples);
} else {
av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples,
frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
frame->nb_samples - avci->skip_samples, avctx->channels, frame->format);
if(avctx->pkt_timebase.num && avctx->sample_rate) {
int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
int64_t diff_ts = av_rescale_q(avci->skip_samples,
(AVRational){1, avctx->sample_rate},
avctx->pkt_timebase);
if(frame->pts!=AV_NOPTS_VALUE)
@ -523,9 +523,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
}
av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
avctx->internal->skip_samples, frame->nb_samples);
frame->nb_samples -= avctx->internal->skip_samples;
avctx->internal->skip_samples = 0;
avci->skip_samples, frame->nb_samples);
frame->nb_samples -= avci->skip_samples;
avci->skip_samples = 0;
}
}
@ -551,11 +551,11 @@ FF_ENABLE_DEPRECATION_WARNINGS
if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
if (fside) {
AV_WL32(fside->data, avctx->internal->skip_samples);
AV_WL32(fside->data, avci->skip_samples);
AV_WL32(fside->data + 4, discard_padding);
AV_WL8(fside->data + 8, skip_reason);
AV_WL8(fside->data + 9, discard_reason);
avctx->internal->skip_samples = 0;
avci->skip_samples = 0;
}
}
}
@ -580,7 +580,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
/* do not stop draining when actual_got_frame != 0 or ret < 0 */
/* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
if (avctx->internal->draining && !actual_got_frame) {
if (avci->draining && !actual_got_frame) {
if (ret < 0) {
/* prevent infinite loop if a decoder wrongly always return error on draining */
/* reasonable nb_errors_max = maximum b frames + thread count */
@ -1925,7 +1925,7 @@ static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
return AVERROR(EINVAL);
}
} else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
if (frame->nb_samples * avctx->channels > avctx->max_samples) {
if (frame->nb_samples * (int64_t)avctx->channels > avctx->max_samples) {
av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
return AVERROR(EINVAL);
}
@ -2030,15 +2030,17 @@ static void bsfs_flush(AVCodecContext *avctx)
void avcodec_flush_buffers(AVCodecContext *avctx)
{
avctx->internal->draining = 0;
avctx->internal->draining_done = 0;
avctx->internal->nb_draining_errors = 0;
av_frame_unref(avctx->internal->buffer_frame);
av_frame_unref(avctx->internal->compat_decode_frame);
av_packet_unref(avctx->internal->buffer_pkt);
avctx->internal->buffer_pkt_valid = 0;
AVCodecInternal *avci = avctx->internal;
av_packet_unref(avctx->internal->ds.in_pkt);
avci->draining = 0;
avci->draining_done = 0;
avci->nb_draining_errors = 0;
av_frame_unref(avci->buffer_frame);
av_frame_unref(avci->compat_decode_frame);
av_packet_unref(avci->buffer_pkt);
avci->buffer_pkt_valid = 0;
av_packet_unref(avci->ds.in_pkt);
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
ff_thread_flush(avctx);
@ -2051,7 +2053,7 @@ void avcodec_flush_buffers(AVCodecContext *avctx)
bsfs_flush(avctx);
if (!avctx->refcounted_frames)
av_frame_unref(avctx->internal->to_free);
av_frame_unref(avci->to_free);
}
void ff_decode_bsfs_uninit(AVCodecContext *avctx)

View File

@ -121,7 +121,7 @@ static int read_map(GetBitContext *gb, Table *t, unsigned int map[DST_MAX_CHANNE
static av_always_inline int get_sr_golomb_dst(GetBitContext *gb, unsigned int k)
{
int v = get_ur_golomb(gb, k, get_bits_left(gb), 0);
int v = get_ur_golomb_jpegls(gb, k, get_bits_left(gb), 0);
if (v && get_bits1(gb))
v = -v;
return v;

View File

@ -252,7 +252,7 @@ static int escape124_decode_frame(AVCodecContext *avctx,
if (i == 2) {
// This codebook can be cut off at places other than
// powers of 2, leaving some of the entries undefined.
cb_size = get_bits_long(&gb, 20);
cb_size = get_bits(&gb, 20);
if (!cb_size) {
av_log(avctx, AV_LOG_ERROR, "Invalid codebook size 0.\n");
return AVERROR_INVALIDDATA;

View File

@ -37,7 +37,7 @@ static inline int RENAME(get_context)(PlaneContext *p, TYPE *src,
const int RT = last[1];
const int L = src[-1];
if (p->quant_table[3][127]) {
if (p->quant_table[3][127] || p->quant_table[4][127]) {
const int TT = last2[0];
const int LL = src[-2];
return p->quant_table[0][(L - LT) & 0xFF] +

View File

@ -350,7 +350,8 @@ fail:
static void wavesynth_synth_sample(struct wavesynth_context *ws, int64_t ts,
int32_t *channels)
{
int32_t amp, val, *cv;
int32_t amp, *cv;
unsigned val;
struct ws_interval *in;
int i, *last, pink;
uint32_t c, all_ch = 0;

View File

@ -217,9 +217,9 @@ int ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
}
skip_bits(&gb, 24); /* skip min frame size */
s->max_framesize = get_bits_long(&gb, 24);
s->max_framesize = get_bits(&gb, 24);
s->samplerate = get_bits_long(&gb, 20);
s->samplerate = get_bits(&gb, 20);
s->channels = get_bits(&gb, 3) + 1;
s->bps = get_bits(&gb, 5) + 1;

View File

@ -30,7 +30,7 @@ int ff_flv_decode_picture_header(MpegEncContext *s)
int format, width, height;
/* picture header */
if (get_bits_long(&s->gb, 17) != 1) {
if (get_bits(&s->gb, 17) != 1) {
av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
return AVERROR_INVALIDDATA;
}

View File

@ -486,14 +486,14 @@ static int16_t apply_tilt_comp(int16_t* out, int16_t* res_pst, int refl_coeff,
if (refl_coeff > 0) {
gt = (refl_coeff * G729_TILT_FACTOR_PLUS + 0x4000) >> 15;
fact = 0x4000; // 0.5 in (0.15)
sh_fact = 15;
fact = 0x2000; // 0.5 in (0.15)
sh_fact = 14;
} else {
gt = (refl_coeff * G729_TILT_FACTOR_MINUS + 0x4000) >> 15;
fact = 0x800; // 0.5 in (3.12)
sh_fact = 12;
fact = 0x400; // 0.5 in (3.12)
sh_fact = 11;
}
ga = (fact << 15) / av_clip_int16(32768 - FFABS(gt));
ga = (fact << 16) / av_clip_int16(32768 - FFABS(gt));
gt >>= 1;
/* Apply tilt compensation filter to signal. */
@ -503,12 +503,12 @@ static int16_t apply_tilt_comp(int16_t* out, int16_t* res_pst, int refl_coeff,
tmp2 = (gt * res_pst[i-1]) * 2 + 0x4000;
tmp2 = res_pst[i] + (tmp2 >> 15);
tmp2 = (tmp2 * ga * 2 + fact) >> sh_fact;
tmp2 = (tmp2 * ga + fact) >> sh_fact;
out[i] = tmp2;
}
tmp2 = (gt * ht_prev_data) * 2 + 0x4000;
tmp2 = res_pst[0] + (tmp2 >> 15);
tmp2 = (tmp2 * ga * 2 + fact) >> sh_fact;
tmp2 = (tmp2 * ga + fact) >> sh_fact;
out[0] = tmp2;
return tmp;

View File

@ -313,7 +313,7 @@ static inline int get_interleaved_se_golomb(GetBitContext *gb)
} else {
int log;
skip_bits(gb, 8);
buf |= 1 | show_bits_long(gb, 24);
buf |= 1 | show_bits(gb, 24);
if ((buf & 0xAAAAAAAA) == 0)
return INVALID_VLC;

View File

@ -381,7 +381,7 @@ static int h264_metadata_filter(AVBSFContext *bsf, AVPacket *pkt)
} else {
goto invalid_user_data;
}
if (i & 1)
if (j & 1)
udu->uuid_iso_iec_11578[j / 2] |= v;
else
udu->uuid_iso_iec_11578[j / 2] = v << 4;

View File

@ -21,6 +21,7 @@
#include <string.h>
#include "libavutil/avassert.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mem.h"
@ -68,7 +69,7 @@ static int h264_extradata_to_annexb(AVBSFContext *ctx, const int padding)
{
H264BSFContext *s = ctx->priv_data;
uint16_t unit_size;
uint64_t total_size = 0;
uint32_t total_size = 0;
uint8_t *out = NULL, unit_nb, sps_done = 0,
sps_seen = 0, pps_seen = 0;
const uint8_t *extradata = ctx->par_in->extradata + 4;
@ -91,12 +92,7 @@ static int h264_extradata_to_annexb(AVBSFContext *ctx, const int padding)
unit_size = AV_RB16(extradata);
total_size += unit_size + 4;
if (total_size > INT_MAX - padding) {
av_log(ctx, AV_LOG_ERROR,
"Too big extradata size, corrupted stream or invalid MP4/AVCC bitstream\n");
av_free(out);
return AVERROR(EINVAL);
}
av_assert1(total_size <= INT_MAX - padding);
if (extradata + 2 + unit_size > ctx->par_in->extradata + ctx->par_in->extradata_size) {
av_log(ctx, AV_LOG_ERROR, "Packet header is not contained in global extradata, "
"corrupted stream or invalid MP4/AVCC bitstream\n");

View File

@ -186,7 +186,7 @@ static inline int decode_vui_parameters(GetBitContext *gb, AVCodecContext *avctx
}
if (show_bits1(gb) && get_bits_left(gb) < 10) {
av_log(avctx, AV_LOG_WARNING, "Truncated VUI\n");
av_log(avctx, AV_LOG_WARNING, "Truncated VUI (%d)\n", get_bits_left(gb));
return 0;
}

View File

@ -247,14 +247,14 @@ static int decode_unregistered_user_data(H264SEIUnregistered *h, GetBitContext *
uint8_t *user_data;
int e, build, i;
if (size < 16 || size >= INT_MAX - 16)
if (size < 16 || size >= INT_MAX - 1)
return AVERROR_INVALIDDATA;
user_data = av_malloc(16 + size + 1);
user_data = av_malloc(size + 1);
if (!user_data)
return AVERROR(ENOMEM);
for (i = 0; i < size + 16; i++)
for (i = 0; i < size; i++)
user_data[i] = get_bits(gb, 8);
user_data[i] = 0;

View File

@ -832,8 +832,6 @@ int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl);
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height);
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl,
const H2645NAL *nal);
/**
* Submit a slice for decoding.
*

View File

@ -66,7 +66,7 @@ static const int8_t num_bins_in_se[] = {
1, // no_residual_data_flag
3, // split_transform_flag
2, // cbf_luma
4, // cbf_cb, cbf_cr
5, // cbf_cb, cbf_cr
2, // transform_skip_flag[][]
2, // explicit_rdpcm_flag[][]
2, // explicit_rdpcm_dir_flag[][]
@ -122,23 +122,23 @@ static const int elem_offset[sizeof(num_bins_in_se)] = {
37, // split_transform_flag
40, // cbf_luma
42, // cbf_cb, cbf_cr
46, // transform_skip_flag[][]
48, // explicit_rdpcm_flag[][]
50, // explicit_rdpcm_dir_flag[][]
52, // last_significant_coeff_x_prefix
70, // last_significant_coeff_y_prefix
88, // last_significant_coeff_x_suffix
88, // last_significant_coeff_y_suffix
88, // significant_coeff_group_flag
92, // significant_coeff_flag
136, // coeff_abs_level_greater1_flag
160, // coeff_abs_level_greater2_flag
166, // coeff_abs_level_remaining
166, // coeff_sign_flag
166, // log2_res_scale_abs
174, // res_scale_sign_flag
176, // cu_chroma_qp_offset_flag
177, // cu_chroma_qp_offset_idx
47, // transform_skip_flag[][]
49, // explicit_rdpcm_flag[][]
51, // explicit_rdpcm_dir_flag[][]
53, // last_significant_coeff_x_prefix
71, // last_significant_coeff_y_prefix
89, // last_significant_coeff_x_suffix
89, // last_significant_coeff_y_suffix
89, // significant_coeff_group_flag
93, // significant_coeff_flag
137, // coeff_abs_level_greater1_flag
161, // coeff_abs_level_greater2_flag
167, // coeff_abs_level_remaining
167, // coeff_sign_flag
167, // log2_res_scale_abs
175, // res_scale_sign_flag
177, // cu_chroma_qp_offset_flag
178, // cu_chroma_qp_offset_idx
};
#define CNU 154
@ -189,7 +189,7 @@ static const uint8_t init_values[3][HEVC_CONTEXTS] = {
// cbf_luma
111, 141,
// cbf_cb, cbf_cr
94, 138, 182, 154,
94, 138, 182, 154, 154,
// transform_skip_flag
139, 139,
// explicit_rdpcm_flag
@ -266,7 +266,7 @@ static const uint8_t init_values[3][HEVC_CONTEXTS] = {
// cbf_luma
153, 111,
// cbf_cb, cbf_cr
149, 107, 167, 154,
149, 107, 167, 154, 154,
// transform_skip_flag
139, 139,
// explicit_rdpcm_flag
@ -343,7 +343,7 @@ static const uint8_t init_values[3][HEVC_CONTEXTS] = {
// cbf_luma
153, 111,
// cbf_cb, cbf_cr
149, 92, 167, 154,
149, 92, 167, 154, 154,
// transform_skip_flag
139, 139,
// explicit_rdpcm_flag

View File

@ -76,8 +76,8 @@ static int decode_nal_sei_mastering_display_info(HEVCSEIMasteringDisplay *s, Get
static int decode_nal_sei_content_light_info(HEVCSEIContentLight *s, GetBitContext *gb)
{
// Max and average light levels
s->max_content_light_level = get_bits_long(gb, 16);
s->max_pic_average_light_level = get_bits_long(gb, 16);
s->max_content_light_level = get_bits(gb, 16);
s->max_pic_average_light_level = get_bits(gb, 16);
// As this SEI message comes before the first frame that references it,
// initialize the flag to 2 and decrement on IRAP access unit so it
// persists for the coded video sequence (e.g., between two IRAPs)
@ -177,7 +177,8 @@ static int decode_registered_user_data_closed_caption(HEVCSEIA53Caption *s, GetB
size -= 2;
if (cc_count && size >= cc_count * 3) {
const uint64_t new_size = (s->a53_caption_size + cc_count
int old_size = s->buf_ref ? s->buf_ref->size : 0;
const uint64_t new_size = (old_size + cc_count
* UINT64_C(3));
int i, ret;
@ -185,14 +186,14 @@ static int decode_registered_user_data_closed_caption(HEVCSEIA53Caption *s, GetB
return AVERROR(EINVAL);
/* Allow merging of the cc data from two fields. */
ret = av_reallocp(&s->a53_caption, new_size);
ret = av_buffer_realloc(&s->buf_ref, new_size);
if (ret < 0)
return ret;
for (i = 0; i < cc_count; i++) {
s->a53_caption[s->a53_caption_size++] = get_bits(gb, 8);
s->a53_caption[s->a53_caption_size++] = get_bits(gb, 8);
s->a53_caption[s->a53_caption_size++] = get_bits(gb, 8);
s->buf_ref->data[old_size++] = get_bits(gb, 8);
s->buf_ref->data[old_size++] = get_bits(gb, 8);
s->buf_ref->data[old_size++] = get_bits(gb, 8);
}
skip_bits(gb, 8); // marker_bits
}
@ -363,6 +364,5 @@ int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s,
void ff_hevc_reset_sei(HEVCSEI *s)
{
s->a53_caption.a53_caption_size = 0;
av_freep(&s->a53_caption.a53_caption);
av_buffer_unref(&s->a53_caption.buf_ref);
}

View File

@ -83,8 +83,7 @@ typedef struct HEVCSEIPictureTiming {
} HEVCSEIPictureTiming;
typedef struct HEVCSEIA53Caption {
int a53_caption_size;
uint8_t *a53_caption;
AVBufferRef *buf_ref;
} HEVCSEIA53Caption;
typedef struct HEVCSEIMasteringDisplay {

View File

@ -2778,14 +2778,14 @@ static int set_side_data(HEVCContext *s)
metadata->MaxCLL, metadata->MaxFALL);
}
if (s->sei.a53_caption.a53_caption) {
AVFrameSideData* sd = av_frame_new_side_data(out,
AV_FRAME_DATA_A53_CC,
s->sei.a53_caption.a53_caption_size);
if (sd)
memcpy(sd->data, s->sei.a53_caption.a53_caption, s->sei.a53_caption.a53_caption_size);
av_freep(&s->sei.a53_caption.a53_caption);
s->sei.a53_caption.a53_caption_size = 0;
if (s->sei.a53_caption.buf_ref) {
HEVCSEIA53Caption *a53 = &s->sei.a53_caption;
AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, a53->buf_ref);
if (!sd)
av_buffer_unref(&a53->buf_ref);
a53->buf_ref = NULL;
s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
}
@ -3463,6 +3463,13 @@ static int hevc_update_thread_context(AVCodecContext *dst,
s->max_ra = INT_MAX;
}
av_buffer_unref(&s->sei.a53_caption.buf_ref);
if (s0->sei.a53_caption.buf_ref) {
s->sei.a53_caption.buf_ref = av_buffer_ref(s0->sei.a53_caption.buf_ref);
if (!s->sei.a53_caption.buf_ref)
return AVERROR(ENOMEM);
}
s->sei.frame_packing = s0->sei.frame_packing;
s->sei.display_orientation = s0->sei.display_orientation;
s->sei.mastering_display = s0->sei.mastering_display;

View File

@ -322,6 +322,8 @@ static int extract_header(AVCodecContext *const avctx,
av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
return AVERROR_INVALIDDATA;
}
if (s->video_size && s->planesize * s->bpp * avctx->height > s->video_size)
return AVERROR_INVALIDDATA;
av_freep(&s->ham_buf);
av_freep(&s->ham_palbuf);
@ -1359,6 +1361,8 @@ static void decode_delta_d(uint8_t *dst,
bytestream2_seek_p(&pb, (offset / planepitch_byte) * pitch + (offset % planepitch_byte) + k * planepitch, SEEK_SET);
if (opcode >= 0) {
uint32_t x = bytestream2_get_be32(&gb);
if (opcode && 4 + (opcode - 1LL) * pitch > bytestream2_get_bytes_left_p(&pb))
continue;
while (opcode && bytestream2_get_bytes_left_p(&pb) > 0) {
bytestream2_put_be32(&pb, x);
bytestream2_skip_p(&pb, pitch - 4);

View File

@ -264,7 +264,7 @@ static int decode_gop_header(IVI45DecContext *ctx, AVCodecContext *avctx)
}
if (get_bits1(&ctx->gb))
skip_bits_long(&ctx->gb, 24); /* skip transparency fill color */
skip_bits(&ctx->gb, 24); /* skip transparency fill color */
}
align_get_bits(&ctx->gb);
@ -348,7 +348,7 @@ static int decode_pic_hdr(IVI45DecContext *ctx, AVCodecContext *avctx)
if (ctx->frame_type != FRAMETYPE_NULL) {
ctx->frame_flags = get_bits(&ctx->gb, 8);
ctx->pic_hdr_size = (ctx->frame_flags & 1) ? get_bits_long(&ctx->gb, 24) : 0;
ctx->pic_hdr_size = (ctx->frame_flags & 1) ? get_bits(&ctx->gb, 24) : 0;
ctx->checksum = (ctx->frame_flags & 0x10) ? get_bits(&ctx->gb, 16) : 0;
@ -392,7 +392,7 @@ static int decode_band_hdr(IVI45DecContext *ctx, IVIBandDesc *band,
return 0;
}
band->data_size = (ctx->frame_flags & 0x80) ? get_bits_long(&ctx->gb, 24) : 0;
band->data_size = (ctx->frame_flags & 0x80) ? get_bits(&ctx->gb, 24) : 0;
band->inherit_mv = band_flags & 2;
band->inherit_qdelta = band_flags & 8;

View File

@ -33,7 +33,7 @@ int ff_intel_h263_decode_picture_header(MpegEncContext *s)
}
/* picture header */
if (get_bits_long(&s->gb, 22) != 0x20) {
if (get_bits(&s->gb, 22) != 0x20) {
av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
return -1;
}

View File

@ -392,6 +392,8 @@ AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx);
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type);
int ff_side_data_set_prft(AVPacket *pkt, int64_t timestamp);
/**
* Check AVFrame for A53 side data and allocate and fill SEI message with A53 info
*

View File

@ -476,7 +476,7 @@ static int ivi_dec_tile_data_size(GetBitContext *gb)
if (get_bits1(gb)) {
len = get_bits(gb, 8);
if (len == 255)
len = get_bits_long(gb, 24);
len = get_bits(gb, 24);
}
/* align the bitstream reader on the byte boundary */
@ -1193,7 +1193,7 @@ int ff_ivi_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
left = get_bits_count(&ctx->gb) & 0x18;
skip_bits_long(&ctx->gb, 64 - left);
if (get_bits_left(&ctx->gb) > 18 &&
show_bits_long(&ctx->gb, 21) == 0xBFFF8) { // syncheader + inter type
show_bits(&ctx->gb, 21) == 0xBFFF8) { // syncheader + inter type
AVPacket pkt;
pkt.data = avpkt->data + (get_bits_count(&ctx->gb) >> 3);
pkt.size = get_bits_left(&ctx->gb) >> 3;

View File

@ -110,8 +110,8 @@ static av_cold int libkvazaar_init(AVCodecContext *avctx)
entry->key, entry->value);
}
}
av_dict_free(&dict);
}
av_dict_free(&dict);
}
ctx->encoder = enc = api->encoder_open(cfg);

View File

@ -42,7 +42,7 @@ typedef struct librav1eContext {
size_t pass_pos;
int pass_size;
char *rav1e_opts;
AVDictionary *rav1e_opts;
int quantizer;
int speed;
int tiles;
@ -244,17 +244,12 @@ static av_cold int librav1e_encode_init(AVCodecContext *avctx)
}
}
if (ctx->rav1e_opts) {
AVDictionary *dict = NULL;
{
AVDictionaryEntry *en = NULL;
if (!av_dict_parse_string(&dict, ctx->rav1e_opts, "=", ":", 0)) {
while (en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX)) {
int parse_ret = rav1e_config_parse(cfg, en->key, en->value);
if (parse_ret < 0)
av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value);
}
av_dict_free(&dict);
while ((en = av_dict_get(ctx->rav1e_opts, "", en, AV_DICT_IGNORE_SUFFIX))) {
int parse_ret = rav1e_config_parse(cfg, en->key, en->value);
if (parse_ret < 0)
av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value);
}
}
@ -538,7 +533,7 @@ static const AVOption options[] = {
{ "tiles", "number of tiles encode with", OFFSET(tiles), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
{ "tile-rows", "number of tiles rows to encode with", OFFSET(tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
{ "tile-columns", "number of tiles columns to encode with", OFFSET(tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
{ "rav1e-params", "set the rav1e configuration using a :-separated list of key=value parameters", OFFSET(rav1e_opts), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
{ "rav1e-params", "set the rav1e configuration using a :-separated list of key=value parameters", OFFSET(rav1e_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
{ NULL }
};

View File

@ -100,7 +100,7 @@ typedef struct VPxEncoderContext {
int rc_undershoot_pct;
int rc_overshoot_pct;
char *vp8_ts_parameters;
AVDictionary *vp8_ts_parameters;
// VP9-only
int lossless;
@ -757,19 +757,13 @@ FF_ENABLE_DEPRECATION_WARNINGS
enccfg.g_error_resilient = ctx->error_resilient || ctx->flags & VP8F_ERROR_RESILIENT;
if (CONFIG_LIBVPX_VP8_ENCODER && avctx->codec_id == AV_CODEC_ID_VP8 && ctx->vp8_ts_parameters) {
AVDictionary *dict = NULL;
if (CONFIG_LIBVPX_VP8_ENCODER && avctx->codec_id == AV_CODEC_ID_VP8) {
AVDictionaryEntry* en = NULL;
if (!av_dict_parse_string(&dict, ctx->vp8_ts_parameters, "=", ":", 0)) {
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
if (vp8_ts_param_parse(&enccfg, en->key, en->value) < 0)
av_log(avctx, AV_LOG_WARNING,
"Error parsing option '%s = %s'.\n",
en->key, en->value);
}
av_dict_free(&dict);
while ((en = av_dict_get(ctx->vp8_ts_parameters, "", en, AV_DICT_IGNORE_SUFFIX))) {
if (vp8_ts_param_parse(&enccfg, en->key, en->value) < 0)
av_log(avctx, AV_LOG_WARNING,
"Error parsing option '%s = %s'.\n",
en->key, en->value);
}
}
@ -1047,8 +1041,7 @@ static int queue_frames(AVCodecContext *avctx, AVPacket *pkt_out)
if (size < 0)
return size;
} else {
struct FrameListData *cx_frame =
av_malloc(sizeof(struct FrameListData));
struct FrameListData *cx_frame = av_malloc(sizeof(*cx_frame));
if (!cx_frame) {
av_log(avctx, AV_LOG_ERROR,
@ -1462,7 +1455,7 @@ static const AVOption vp8_options[] = {
"frames (2-pass only)", OFFSET(auto_alt_ref), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 2, VE},
{ "cpu-used", "Quality/Speed ratio modifier", OFFSET(cpu_used), AV_OPT_TYPE_INT, {.i64 = 1}, -16, 16, VE},
{ "ts-parameters", "Temporal scaling configuration using a "
":-separated list of key=value parameters", OFFSET(vp8_ts_parameters), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, VE},
":-separated list of key=value parameters", OFFSET(vp8_ts_parameters), AV_OPT_TYPE_DICT, {.str=NULL}, 0, 0, VE},
LEGACY_OPTIONS
{ NULL }
};

View File

@ -25,6 +25,7 @@
#include "libavutil/mem.h"
#include "libavutil/pixdesc.h"
#include "libavutil/stereo3d.h"
#include "libavutil/time.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "internal.h"
@ -44,6 +45,11 @@
// blocks of pixels (with respect to the luma plane)
#define MB_SIZE 16
typedef struct X264Opaque {
int64_t reordered_opaque;
int64_t wallclock;
} X264Opaque;
typedef struct X264Context {
AVClass *class;
x264_param_t params;
@ -95,10 +101,10 @@ typedef struct X264Context {
int scenechange_threshold;
int noise_reduction;
char *x264_params;
AVDictionary *x264_params;
int nb_reordered_opaque, next_reordered_opaque;
int64_t *reordered_opaque;
X264Opaque *reordered_opaque;
/**
* If the encoder does not support ROI then warn the first time we
@ -292,7 +298,8 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
x264_picture_t pic_out = {0};
int pict_type;
int bit_depth;
int64_t *out_opaque;
int64_t wallclock = 0;
X264Opaque *out_opaque;
AVFrameSideData *sd;
x264_picture_init( &x4->pic );
@ -314,7 +321,8 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
x4->pic.i_pts = frame->pts;
x4->reordered_opaque[x4->next_reordered_opaque] = frame->reordered_opaque;
x4->reordered_opaque[x4->next_reordered_opaque].reordered_opaque = frame->reordered_opaque;
x4->reordered_opaque[x4->next_reordered_opaque].wallclock = av_gettime();
x4->pic.opaque = &x4->reordered_opaque[x4->next_reordered_opaque];
x4->next_reordered_opaque++;
x4->next_reordered_opaque %= x4->nb_reordered_opaque;
@ -443,7 +451,8 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
out_opaque = pic_out.opaque;
if (out_opaque >= x4->reordered_opaque &&
out_opaque < &x4->reordered_opaque[x4->nb_reordered_opaque]) {
ctx->reordered_opaque = *out_opaque;
ctx->reordered_opaque = out_opaque->reordered_opaque;
wallclock = out_opaque->wallclock;
} else {
// Unexpected opaque pointer on picture output
ctx->reordered_opaque = 0;
@ -473,6 +482,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
pkt->flags |= AV_PKT_FLAG_KEY*pic_out.b_keyframe;
if (ret) {
ff_side_data_set_encoder_stats(pkt, (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA, NULL, 0, pict_type);
if (wallclock)
ff_side_data_set_prft(pkt, wallclock);
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
@ -892,19 +903,14 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
}
if (x4->x264_params) {
AVDictionary *dict = NULL;
{
AVDictionaryEntry *en = NULL;
if (!av_dict_parse_string(&dict, x4->x264_params, "=", ":", 0)) {
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
if (x264_param_parse(&x4->params, en->key, en->value) < 0)
av_log(avctx, AV_LOG_WARNING,
"Error parsing option '%s = %s'.\n",
en->key, en->value);
}
av_dict_free(&dict);
while (en = av_dict_get(x4->x264_params, "", en, AV_DICT_IGNORE_SUFFIX)) {
if (x264_param_parse(&x4->params, en->key, en->value) < 0)
av_log(avctx, AV_LOG_WARNING,
"Error parsing option '%s = %s'.\n",
en->key, en->value);
}
}
@ -1116,7 +1122,7 @@ static const AVOption options[] = {
{ "sc_threshold", "Scene change threshold", OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX, VE },
{ "noise_reduction", "Noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX, VE },
{ "x264-params", "Override the x264 configuration using a :-separated list of key=value parameters", OFFSET(x264_params), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
{ "x264-params", "Override the x264 configuration using a :-separated list of key=value parameters", OFFSET(x264_params), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
{ NULL },
};

View File

@ -42,11 +42,12 @@ typedef struct libx265Context {
const x265_api *api;
float crf;
int cqp;
int forced_idr;
char *preset;
char *tune;
char *profile;
char *x265_opts;
AVDictionary *x265_opts;
/**
* If the encoder does not support ROI then warn the first time we
@ -82,10 +83,41 @@ static av_cold int libx265_encode_close(AVCodecContext *avctx)
return 0;
}
static av_cold int libx265_param_parse_float(AVCodecContext *avctx,
const char *key, float value)
{
libx265Context *ctx = avctx->priv_data;
char buf[256];
snprintf(buf, sizeof(buf), "%2.2f", value);
if (ctx->api->param_parse(ctx->params, key, buf) == X265_PARAM_BAD_VALUE) {
av_log(avctx, AV_LOG_ERROR, "Invalid value %2.2f for param \"%s\".\n", value, key);
return AVERROR(EINVAL);
}
return 0;
}
static av_cold int libx265_param_parse_int(AVCodecContext *avctx,
const char *key, int value)
{
libx265Context *ctx = avctx->priv_data;
char buf[256];
snprintf(buf, sizeof(buf), "%d", value);
if (ctx->api->param_parse(ctx->params, key, buf) == X265_PARAM_BAD_VALUE) {
av_log(avctx, AV_LOG_ERROR, "Invalid value %d for param \"%s\".\n", value, key);
return AVERROR(EINVAL);
}
return 0;
}
static av_cold int libx265_encode_init(AVCodecContext *avctx)
{
libx265Context *ctx = avctx->priv_data;
AVCPBProperties *cpb_props = NULL;
int ret;
ctx->api = x265_api_get(av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth);
if (!ctx->api)
@ -159,6 +191,10 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx)
// x265 validates the parameters internally
ctx->params->vui.colorPrimaries = avctx->color_primaries;
ctx->params->vui.transferCharacteristics = avctx->color_trc;
#if X265_BUILD >= 159
if (avctx->color_trc == AVCOL_TRC_ARIB_STD_B67)
ctx->params->preferredTransferCharacteristics = ctx->params->vui.transferCharacteristics;
#endif
ctx->params->vui.matrixCoeffs = avctx->colorspace;
}
@ -222,6 +258,48 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx)
} else if (avctx->bit_rate > 0) {
ctx->params->rc.bitrate = avctx->bit_rate / 1000;
ctx->params->rc.rateControlMode = X265_RC_ABR;
} else if (ctx->cqp >= 0) {
ret = libx265_param_parse_int(avctx, "qp", ctx->cqp);
if (ret < 0)
return ret;
}
#if X265_BUILD >= 89
if (avctx->qmin >= 0) {
ret = libx265_param_parse_int(avctx, "qpmin", avctx->qmin);
if (ret < 0)
return ret;
}
if (avctx->qmax >= 0) {
ret = libx265_param_parse_int(avctx, "qpmax", avctx->qmax);
if (ret < 0)
return ret;
}
#endif
if (avctx->max_qdiff >= 0) {
ret = libx265_param_parse_int(avctx, "qpstep", avctx->max_qdiff);
if (ret < 0)
return ret;
}
if (avctx->qblur >= 0) {
ret = libx265_param_parse_float(avctx, "qblur", avctx->qblur);
if (ret < 0)
return ret;
}
if (avctx->qcompress >= 0) {
ret = libx265_param_parse_float(avctx, "qcomp", avctx->qcompress);
if (ret < 0)
return ret;
}
if (avctx->i_quant_factor >= 0) {
ret = libx265_param_parse_float(avctx, "ipratio", avctx->i_quant_factor);
if (ret < 0)
return ret;
}
if (avctx->b_quant_factor >= 0) {
ret = libx265_param_parse_float(avctx, "pbratio", avctx->b_quant_factor);
if (ret < 0)
return ret;
}
ctx->params->rc.vbvBufferSize = avctx->rc_buffer_size / 1000;
@ -237,28 +315,44 @@ static av_cold int libx265_encode_init(AVCodecContext *avctx)
if (!(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER))
ctx->params->bRepeatHeaders = 1;
if (ctx->x265_opts) {
AVDictionary *dict = NULL;
if (avctx->gop_size >= 0) {
ret = libx265_param_parse_int(avctx, "keyint", avctx->gop_size);
if (ret < 0)
return ret;
}
if (avctx->keyint_min > 0) {
ret = libx265_param_parse_int(avctx, "min-keyint", avctx->keyint_min);
if (ret < 0)
return ret;
}
if (avctx->max_b_frames >= 0) {
ret = libx265_param_parse_int(avctx, "bframes", avctx->max_b_frames);
if (ret < 0)
return ret;
}
if (avctx->refs >= 0) {
ret = libx265_param_parse_int(avctx, "ref", avctx->refs);
if (ret < 0)
return ret;
}
{
AVDictionaryEntry *en = NULL;
while ((en = av_dict_get(ctx->x265_opts, "", en, AV_DICT_IGNORE_SUFFIX))) {
int parse_ret = ctx->api->param_parse(ctx->params, en->key, en->value);
if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) {
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
int parse_ret = ctx->api->param_parse(ctx->params, en->key, en->value);
switch (parse_ret) {
case X265_PARAM_BAD_NAME:
av_log(avctx, AV_LOG_WARNING,
"Unknown option: %s.\n", en->key);
break;
case X265_PARAM_BAD_VALUE:
av_log(avctx, AV_LOG_WARNING,
"Invalid value for %s: %s.\n", en->key, en->value);
break;
default:
break;
}
switch (parse_ret) {
case X265_PARAM_BAD_NAME:
av_log(avctx, AV_LOG_WARNING,
"Unknown option: %s.\n", en->key);
break;
case X265_PARAM_BAD_VALUE:
av_log(avctx, AV_LOG_WARNING,
"Invalid value for %s: %s.\n", en->key, en->value);
break;
default:
break;
}
av_dict_free(&dict);
}
}
@ -383,6 +477,7 @@ static int libx265_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
x265_picture x265pic_out = { 0 };
x265_nal *nal;
uint8_t *dst;
int pict_type;
int payload = 0;
int nnal;
int ret;
@ -442,20 +537,23 @@ static int libx265_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
pkt->pts = x265pic_out.pts;
pkt->dts = x265pic_out.dts;
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
switch (x265pic_out.sliceType) {
case X265_TYPE_IDR:
case X265_TYPE_I:
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
pict_type = AV_PICTURE_TYPE_I;
break;
case X265_TYPE_P:
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
pict_type = AV_PICTURE_TYPE_P;
break;
case X265_TYPE_B:
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;
case X265_TYPE_BREF:
pict_type = AV_PICTURE_TYPE_B;
break;
}
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
avctx->coded_frame->pict_type = pict_type;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
@ -466,6 +564,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
#endif
pkt->flags |= AV_PKT_FLAG_DISPOSABLE;
ff_side_data_set_encoder_stats(pkt, x265pic_out.frameData.qp * FF_QP2LAMBDA, NULL, 0, pict_type);
*got_packet = 1;
return 0;
}
@ -535,11 +635,12 @@ static av_cold void libx265_encode_init_csp(AVCodec *codec)
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "crf", "set the x265 crf", OFFSET(crf), AV_OPT_TYPE_FLOAT, { .dbl = -1 }, -1, FLT_MAX, VE },
{ "qp", "set the x265 qp", OFFSET(cqp), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, VE },
{ "forced-idr", "if forcing keyframes, force them as IDR frames", OFFSET(forced_idr),AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
{ "preset", "set the x265 preset", OFFSET(preset), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
{ "tune", "set the x265 tune parameter", OFFSET(tune), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
{ "profile", "set the x265 profile", OFFSET(profile), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
{ "x265-params", "set the x265 configuration using a :-separated list of key=value parameters", OFFSET(x265_opts), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
{ "x265-params", "set the x265 configuration using a :-separated list of key=value parameters", OFFSET(x265_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
{ NULL }
};
@ -552,6 +653,17 @@ static const AVClass class = {
static const AVCodecDefault x265_defaults[] = {
{ "b", "0" },
{ "bf", "-1" },
{ "g", "-1" },
{ "keyint_min", "-1" },
{ "refs", "-1" },
{ "qmin", "-1" },
{ "qmax", "-1" },
{ "qdiff", "-1" },
{ "qblur", "-1" },
{ "qcomp", "-1" },
{ "i_qfactor", "-1" },
{ "b_qfactor", "-1" },
{ NULL },
};

View File

@ -48,7 +48,7 @@ typedef struct XAVS2EContext {
int log_level;
void *encoder;
char *xavs2_opts;
AVDictionary *xavs2_opts;
xavs2_outpacket_t packet;
xavs2_param_t *param;
@ -92,16 +92,10 @@ static av_cold int xavs2_init(AVCodecContext *avctx)
xavs2_opt_set2("OpenGOP", "%d", !(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP));
if (cae->xavs2_opts) {
AVDictionary *dict = NULL;
{
AVDictionaryEntry *en = NULL;
if (!av_dict_parse_string(&dict, cae->xavs2_opts, "=", ":", 0)) {
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
xavs2_opt_set2(en->key, "%s", en->value);
}
av_dict_free(&dict);
}
while ((en = av_dict_get(cae->xavs2_opts, "", en, AV_DICT_IGNORE_SUFFIX)))
xavs2_opt_set2(en->key, "%s", en->value);
}
/* Rate control */
@ -267,7 +261,7 @@ static const AVOption options[] = {
{ "min_qp" , "min qp for rate control" , OFFSET(min_qp) , AV_OPT_TYPE_INT, {.i64 = 20 }, 0, 63, VE },
{ "speed_level" , "Speed level, higher is better but slower", OFFSET(preset_level) , AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 9, VE },
{ "log_level" , "log level: -1: none, 0: error, 1: warning, 2: info, 3: debug", OFFSET(log_level) , AV_OPT_TYPE_INT, {.i64 = 0 }, -1, 3, VE },
{ "xavs2-params" , "set the xavs2 configuration using a :-separated list of key=value parameters", OFFSET(xavs2_opts), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
{ "xavs2-params" , "set the xavs2 configuration using a :-separated list of key=value parameters", OFFSET(xavs2_opts), AV_OPT_TYPE_DICT, { 0 }, 0, 0, VE },
{ NULL },
};

View File

@ -63,7 +63,7 @@ static int decode_mvdv(MidiVidContext *s, AVCodecContext *avctx, AVFrame *frame)
if (intra_flag) {
nb_blocks = (avctx->width / 2) * (avctx->height / 2);
} else {
int skip_linesize;
int ret, skip_linesize;
nb_blocks = bytestream2_get_le32(gb);
skip_linesize = avctx->width >> 1;
@ -73,7 +73,9 @@ static int decode_mvdv(MidiVidContext *s, AVCodecContext *avctx, AVFrame *frame)
if (bytestream2_get_bytes_left(gb) < mask_size)
return AVERROR_INVALIDDATA;
init_get_bits8(&mask, mask_start, mask_size);
ret = init_get_bits8(&mask, mask_start, mask_size);
if (ret < 0)
return ret;
bytestream2_skip(gb, mask_size);
skip = s->skip;

View File

@ -73,10 +73,7 @@ static av_cold void h264_pred_init_msa(H264PredContext *h, int codec_id,
switch (codec_id) {
case AV_CODEC_ID_SVQ3:
;
break;
case AV_CODEC_ID_RV40:
;
break;
case AV_CODEC_ID_VP7:
case AV_CODEC_ID_VP8:

View File

@ -2049,7 +2049,7 @@ static int mjpeg_decode_app(MJpegDecodeContext *s)
unsigned nummarkers;
id = get_bits_long(&s->gb, 32);
id2 = get_bits_long(&s->gb, 24);
id2 = get_bits(&s->gb, 24);
len -= 7;
if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");

View File

@ -102,7 +102,7 @@ int ff_mlp_read_major_sync(void *log, MLPHeaderInfo *mh, GetBitContext *gb)
return AVERROR_INVALIDDATA;
}
if (get_bits_long(gb, 24) != 0xf8726f) /* Sync words */
if (get_bits(gb, 24) != 0xf8726f) /* Sync words */
return AVERROR_INVALIDDATA;
mh->stream_type = get_bits(gb, 8);

View File

@ -61,6 +61,8 @@ static int mlp_parse(AVCodecParserContext *s,
int ret;
int i, p = 0;
s->key_frame = 0;
*poutbuf_size = 0;
if (buf_size == 0)
return 0;
@ -136,6 +138,8 @@ static int mlp_parse(AVCodecParserContext *s,
* access unit header and all the 2- or 4-byte substream headers. */
// Only check when this isn't a sync frame - syncs have a checksum.
s->key_frame = 0;
parity_bits = 0;
for (i = -1; i < mp->num_substreams; i++) {
parity_bits ^= buf[p++];
@ -159,6 +163,8 @@ static int mlp_parse(AVCodecParserContext *s,
if (ff_mlp_read_major_sync(avctx, &mh, &gb) < 0)
goto lost_sync;
s->key_frame = 1;
avctx->bits_per_raw_sample = mh.group1_bits;
if (avctx->bits_per_raw_sample > 16)
avctx->sample_fmt = AV_SAMPLE_FMT_S32;

View File

@ -62,6 +62,11 @@ static int mp3_header_decompress(AVBSFContext *ctx, AVPacket *out)
lsf = sample_rate < (24000+32000)/2;
mpeg25 = sample_rate < (12000+16000)/2;
sample_rate_index= (header>>10)&3;
if (sample_rate_index == 3) {
ret = AVERROR_INVALIDDATA;
goto fail;
}
sample_rate= avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25); //in case sample rate is a little off
for(bitrate_index=2; bitrate_index<30; bitrate_index++){

View File

@ -1669,8 +1669,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
return AVERROR_INVALIDDATA;
}
if (s->avctx->hwaccel &&
(s->avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD)) {
if (s->avctx->hwaccel) {
if ((ret = s->avctx->hwaccel->end_frame(s->avctx)) < 0) {
av_log(avctx, AV_LOG_ERROR,
"hardware accelerator failed to decode first field\n");

View File

@ -118,8 +118,8 @@ int ff_mpeg4audio_get_config_gb(MPEG4AudioConfig *c, GetBitContext *gb,
if (c->object_type == AOT_ALS) {
skip_bits(gb, 5);
if (show_bits_long(gb, 24) != MKBETAG('\0','A','L','S'))
skip_bits_long(gb, 24);
if (show_bits(gb, 24) != MKBETAG('\0','A','L','S'))
skip_bits(gb, 24);
specific_config_bitindex = get_bits_count(gb);

View File

@ -711,7 +711,7 @@ static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
int i;
do {
if (show_bits_long(&s->gb, 19) == DC_MARKER)
if (show_bits(&s->gb, 19) == DC_MARKER)
return mb_num - 1;
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
@ -1001,7 +1001,7 @@ int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx)
if (s->pict_type == AV_PICTURE_TYPE_I) {
while (show_bits(&s->gb, 9) == 1)
skip_bits(&s->gb, 9);
if (get_bits_long(&s->gb, 19) != DC_MARKER) {
if (get_bits(&s->gb, 19) != DC_MARKER) {
av_log(s->avctx, AV_LOG_ERROR,
"marker missing after first I partition at %d %d\n",
s->mb_x, s->mb_y);
@ -1782,7 +1782,7 @@ static void next_start_code_studio(GetBitContext *gb)
{
align_get_bits(gb);
while (get_bits_left(gb) >= 24 && show_bits_long(gb, 24) != 0x1) {
while (get_bits_left(gb) >= 24 && show_bits(gb, 24) != 0x1) {
get_bits(gb, 8);
}
}

View File

@ -412,8 +412,6 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
ff_mpeg_er_frame_start(s);
v->bits = buf_size * 8;
v->end_mb_x = (w + 15) >> 4;
s->end_mb_y = (h + 15) >> 4;
if (v->respic & 1)

View File

@ -161,6 +161,9 @@ static int decode_frame(AVCodecContext *avctx,
type = AV_RB32(avpkt->data);
size = AV_RL32(avpkt->data + 4);
if (size < 1 || size >= avpkt->size)
return AVERROR_INVALIDDATA;
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;

View File

@ -39,7 +39,7 @@ static int noise(AVBSFContext *ctx, AVPacket *pkt)
{
NoiseContext *s = ctx->priv_data;
int amount = s->amount > 0 ? s->amount : (s->state % 10001 + 1);
int i, ret = 0;
int i, ret;
if (amount <= 0)
return AVERROR(EINVAL);
@ -55,19 +55,18 @@ static int noise(AVBSFContext *ctx, AVPacket *pkt)
}
ret = av_packet_make_writable(pkt);
if (ret < 0)
goto fail;
if (ret < 0) {
av_packet_unref(pkt);
return ret;
}
for (i = 0; i < pkt->size; i++) {
s->state += pkt->data[i] + 1;
if (s->state % amount == 0)
pkt->data[i] = s->state;
}
fail:
if (ret < 0)
av_packet_unref(pkt);
return ret;
return 0;
}
#define OFFSET(x) offsetof(NoiseContext, x)

View File

@ -50,6 +50,10 @@ static int nvdec_mpeg12_start_frame(AVCodecContext *avctx, const uint8_t *buffer
.FrameHeightInMbs = (cur_frame->height + 15) / 16,
.CurrPicIdx = cf->idx,
.field_pic_flag = s->picture_structure != PICT_FRAME,
.bottom_field_flag = s->picture_structure == PICT_BOTTOM_FIELD,
.second_field = s->picture_structure != PICT_FRAME && !s->first_field,
.intra_pic_flag = s->pict_type == AV_PICTURE_TYPE_I,
.ref_pic_flag = s->pict_type == AV_PICTURE_TYPE_I ||
s->pict_type == AV_PICTURE_TYPE_P,

View File

@ -2262,3 +2262,8 @@ int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
return 0;
}
av_cold void ff_nvenc_encode_flush(AVCodecContext *avctx)
{
ff_nvenc_send_frame(avctx, NULL);
}

View File

@ -214,6 +214,8 @@ int ff_nvenc_receive_packet(AVCodecContext *avctx, AVPacket *pkt);
int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *frame, int *got_packet);
void ff_nvenc_encode_flush(AVCodecContext *avctx);
extern const enum AVPixelFormat ff_nvenc_pix_fmts[];
#endif /* AVCODEC_NVENC_H */

View File

@ -240,6 +240,7 @@ AVCodec ff_h264_nvenc_encoder = {
.receive_packet = ff_nvenc_receive_packet,
.encode2 = ff_nvenc_encode_frame,
.close = ff_nvenc_encode_close,
.flush = ff_nvenc_encode_flush,
.priv_data_size = sizeof(NvencContext),
.priv_class = &h264_nvenc_class,
.defaults = defaults,

View File

@ -198,6 +198,7 @@ AVCodec ff_hevc_nvenc_encoder = {
.receive_packet = ff_nvenc_receive_packet,
.encode2 = ff_nvenc_encode_frame,
.close = ff_nvenc_encode_close,
.flush = ff_nvenc_encode_flush,
.priv_data_size = sizeof(NvencContext),
.priv_class = &hevc_nvenc_class,
.defaults = defaults,

View File

@ -141,8 +141,8 @@ static const AVOption avcodec_options[] = {
{"explode", "abort decoding on minor error detection", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_EXPLODE }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
{"ignore_err", "ignore errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_IGNORE_ERR }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
{"careful", "consider things that violate the spec, are fast to check and have not been seen in the wild as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_CAREFUL }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
{"aggressive", "consider things that a sane encoder should not do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
{"compliant", "consider all spec non compliancies as errors", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_COMPLIANT | AV_EF_CAREFUL }, INT_MIN, INT_MAX, A|V|D, "err_detect"},
{"aggressive", "consider things that a sane encoder should not do as an error", 0, AV_OPT_TYPE_CONST, {.i64 = AV_EF_AGGRESSIVE | AV_EF_COMPLIANT | AV_EF_CAREFUL}, INT_MIN, INT_MAX, A|V|D, "err_detect"},
{"has_b_frames", NULL, OFFSET(has_b_frames), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
{"block_align", NULL, OFFSET(block_align), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX},
#if FF_API_PRIVATE_OPT

View File

@ -691,8 +691,11 @@ static int decode(AVCodecContext *avctx, void *data, int *data_size,
ret = AVERROR_INVALIDDATA;
break;
}
if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE))
if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
avsubtitle_free(data);
*data_size = 0;
return ret;
}
buf += segment_length;
}

View File

@ -117,6 +117,9 @@ int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s)
return AVERROR_INVALIDDATA;
}
}
if (!pnm_space(s->bytestream[-1]))
return AVERROR_INVALIDDATA;
/* check that all tags are present */
if (w <= 0 || h <= 0 || maxval <= 0 || maxval > UINT16_MAX || depth <= 0 || tuple_type[0] == '\0' ||
av_image_check_size(w, h, 0, avctx) || s->bytestream >= s->bytestream_end)
@ -197,6 +200,10 @@ int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext * const s)
}
}else
s->maxval=1;
if (!pnm_space(s->bytestream[-1]))
return AVERROR_INVALIDDATA;
/* more check if YUV420 */
if (av_pix_fmt_desc_get(avctx->pix_fmt)->flags & AV_PIX_FMT_FLAG_PLANAR) {
if ((avctx->width & 1) != 0)

View File

@ -143,7 +143,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data,
v = (*s->bytestream++)&1;
} else {
/* read a sequence of digits */
for (k = 0; k < 5 && c <= 9; k += 1) {
for (k = 0; k < 6 && c <= 9; k += 1) {
v = 10*v + c;
c = (*s->bytestream++) - '0';
}

View File

@ -224,7 +224,7 @@ static void encode_codeword(PutBitContext *pb, int val, int codebook)
}
#define QSCALE(qmat,ind,val) ((val) / ((qmat)[ind]))
#define TO_GOLOMB(val) (((val) << 1) ^ ((val) >> 31))
#define TO_GOLOMB(val) (((val) * 2) ^ ((val) >> 31))
#define DIFF_SIGN(val, sign) (((val) >> 31) ^ (sign))
#define IS_NEGATIVE(val) ((((val) >> 31) ^ -1) + 1)
#define TO_GOLOMB2(val,sign) ((val)==0 ? 0 : ((val) << 1) + (sign))

View File

@ -72,58 +72,6 @@ int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id)
return AVERROR(ENOSYS);
}
static const struct {
enum AVCodecID codec_id;
int codec_profile;
int mfx_profile;
} qsv_profile_map[] = {
#define MAP(c, p, v) { AV_CODEC_ID_ ## c, FF_PROFILE_ ## p, MFX_PROFILE_ ## v }
MAP(MPEG2VIDEO, MPEG2_SIMPLE, MPEG2_SIMPLE ),
MAP(MPEG2VIDEO, MPEG2_MAIN, MPEG2_MAIN ),
MAP(MPEG2VIDEO, MPEG2_HIGH, MPEG2_HIGH ),
MAP(H264, H264_BASELINE, AVC_BASELINE ),
MAP(H264, H264_CONSTRAINED_BASELINE, AVC_BASELINE),
#if QSV_VERSION_ATLEAST(1, 3)
MAP(H264, H264_EXTENDED, AVC_EXTENDED ),
#endif
MAP(H264, H264_MAIN, AVC_MAIN ),
MAP(H264, H264_HIGH, AVC_HIGH ),
MAP(H264, H264_HIGH_422, AVC_HIGH_422 ),
#if QSV_VERSION_ATLEAST(1, 8)
MAP(HEVC, HEVC_MAIN, HEVC_MAIN ),
MAP(HEVC, HEVC_MAIN_10, HEVC_MAIN10 ),
MAP(HEVC, HEVC_MAIN_STILL_PICTURE, HEVC_MAINSP ),
#endif
#if QSV_VERSION_ATLEAST(1, 16)
MAP(HEVC, HEVC_REXT, HEVC_REXT ),
#endif
MAP(VC1, VC1_SIMPLE, VC1_SIMPLE ),
MAP(VC1, VC1_MAIN, VC1_MAIN ),
MAP(VC1, VC1_COMPLEX, VC1_ADVANCED ),
MAP(VC1, VC1_ADVANCED, VC1_ADVANCED ),
#undef MAP
};
int ff_qsv_profile_to_mfx(enum AVCodecID codec_id, int profile)
{
int i;
if (profile == FF_PROFILE_UNKNOWN)
return MFX_PROFILE_UNKNOWN;
for (i = 0; i < FF_ARRAY_ELEMS(qsv_profile_map); i++) {
if (qsv_profile_map[i].codec_id != codec_id)
continue;
if (qsv_profile_map[i].codec_profile == profile)
return qsv_profile_map[i].mfx_profile;
}
return MFX_PROFILE_UNKNOWN;
}
int ff_qsv_level_to_mfx(enum AVCodecID codec_id, int level)
{
if (level == FF_LEVEL_UNKNOWN)

View File

@ -116,7 +116,6 @@ int ff_qsv_print_warning(void *log_ctx, mfxStatus err,
const char *warning_string);
int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id);
int ff_qsv_profile_to_mfx(enum AVCodecID codec_id, int profile);
int ff_qsv_level_to_mfx(enum AVCodecID codec_id, int level);
enum AVPixelFormat ff_qsv_map_fourcc(uint32_t fourcc);

View File

@ -74,7 +74,7 @@ static int ff_qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame, A
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
return AVERROR(ENOMEM);
return AVERROR(EINVAL);
}
frame->linesize[1] = frame->linesize[0];
@ -99,9 +99,11 @@ static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession ses
int ret;
if (q->gpu_copy == MFX_GPUCOPY_ON &&
!(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY))
!(q->iopattern & MFX_IOPATTERN_OUT_SYSTEM_MEMORY)) {
av_log(avctx, AV_LOG_WARNING, "GPU-accelerated memory copy "
"only works in MFX_IOPATTERN_OUT_SYSTEM_MEMORY.\n");
"only works in system memory mode.\n");
q->gpu_copy = MFX_GPUCOPY_OFF;
}
if (session) {
q->session = session;
} else if (hw_frames_ref) {

View File

@ -139,6 +139,9 @@ static void dump_video_param(AVCodecContext *avctx, QSVEncContext *q,
#if QSV_HAVE_CO3
mfxExtCodingOption3 *co3 = (mfxExtCodingOption3*)coding_opts[2];
#endif
#if QSV_HAVE_EXT_HEVC_TILES
mfxExtHEVCTiles *exthevctiles = (mfxExtHEVCTiles *)coding_opts[3 + QSV_HAVE_CO_VPS];
#endif
av_log(avctx, AV_LOG_VERBOSE, "profile: %s; level: %"PRIu16"\n",
print_profile(info->CodecProfile), info->CodecLevel);
@ -204,6 +207,12 @@ static void dump_video_param(AVCodecContext *avctx, QSVEncContext *q,
av_log(avctx, AV_LOG_VERBOSE, "RateDistortionOpt: %s\n",
print_threestate(co->RateDistortionOpt));
#if QSV_HAVE_EXT_HEVC_TILES
if (avctx->codec_id == AV_CODEC_ID_HEVC)
av_log(avctx, AV_LOG_VERBOSE, "NumTileColumns: %"PRIu16"; NumTileRows: %"PRIu16"\n",
exthevctiles->NumTileColumns, exthevctiles->NumTileRows);
#endif
#if QSV_HAVE_CO2
av_log(avctx, AV_LOG_VERBOSE,
"RecoveryPointSEI: %s IntRefType: %"PRIu16"; IntRefCycleSize: %"PRIu16"; IntRefQPDelta: %"PRId16"\n",
@ -771,6 +780,16 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
#endif
#if QSV_HAVE_EXT_HEVC_TILES
if (avctx->codec_id == AV_CODEC_ID_HEVC) {
q->exthevctiles.Header.BufferId = MFX_EXTBUFF_HEVC_TILES;
q->exthevctiles.Header.BufferSz = sizeof(q->exthevctiles);
q->exthevctiles.NumTileColumns = q->tile_cols;
q->exthevctiles.NumTileRows = q->tile_rows;
q->extparam_internal[q->nb_extparam_internal++] = (mfxExtBuffer *)&q->exthevctiles;
}
#endif
if (!check_enc_param(avctx,q)) {
av_log(avctx, AV_LOG_ERROR,
"some encoding parameters are not supported by the QSV "
@ -889,7 +908,14 @@ static int qsv_retrieve_enc_params(AVCodecContext *avctx, QSVEncContext *q)
};
#endif
mfxExtBuffer *ext_buffers[2 + QSV_HAVE_CO2 + QSV_HAVE_CO3 + QSV_HAVE_CO_VPS];
#if QSV_HAVE_EXT_HEVC_TILES
mfxExtHEVCTiles hevc_tile_buf = {
.Header.BufferId = MFX_EXTBUFF_HEVC_TILES,
.Header.BufferSz = sizeof(hevc_tile_buf),
};
#endif
mfxExtBuffer *ext_buffers[2 + QSV_HAVE_CO2 + QSV_HAVE_CO3 + QSV_HAVE_CO_VPS + QSV_HAVE_EXT_HEVC_TILES];
int need_pps = avctx->codec_id != AV_CODEC_ID_MPEG2VIDEO;
int ret, ext_buf_num = 0, extradata_offset = 0;
@ -907,6 +933,10 @@ static int qsv_retrieve_enc_params(AVCodecContext *avctx, QSVEncContext *q)
if (q->hevc_vps)
ext_buffers[ext_buf_num++] = (mfxExtBuffer*)&extradata_vps;
#endif
#if QSV_HAVE_EXT_HEVC_TILES
if (avctx->codec_id == AV_CODEC_ID_HEVC)
ext_buffers[ext_buf_num++] = (mfxExtBuffer*)&hevc_tile_buf;
#endif
q->param.ExtParam = ext_buffers;
q->param.NumExtParam = ext_buf_num;

View File

@ -38,6 +38,7 @@
#define QSV_HAVE_CO3 QSV_VERSION_ATLEAST(1, 11)
#define QSV_HAVE_CO_VPS QSV_VERSION_ATLEAST(1, 17)
#define QSV_HAVE_EXT_HEVC_TILES QSV_VERSION_ATLEAST(1, 13)
#define QSV_HAVE_EXT_VP9_PARAM QSV_VERSION_ATLEAST(1, 26)
#define QSV_HAVE_TRELLIS QSV_VERSION_ATLEAST(1, 8)
@ -124,6 +125,9 @@ typedef struct QSVEncContext {
mfxExtMultiFrameParam extmfp;
mfxExtMultiFrameControl extmfc;
#endif
#if QSV_HAVE_EXT_HEVC_TILES
mfxExtHEVCTiles exthevctiles;
#endif
#if QSV_HAVE_EXT_VP9_PARAM
mfxExtVP9Param extvp9param;
#endif
@ -161,6 +165,9 @@ typedef struct QSVEncContext {
int max_frame_size;
int max_slice_size;
int tile_cols;
int tile_rows;
int aud;
int single_sei_nal_unit;

View File

@ -243,6 +243,9 @@ static const AVOption options[] = {
{ "gpb", "1: GPB (generalized P/B frame); 0: regular P frame", OFFSET(qsv.gpb), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE},
{ "tile_cols", "Number of columns for tiled encoding", OFFSET(qsv.tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE },
{ "tile_rows", "Number of rows for tiled encoding", OFFSET(qsv.tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, UINT16_MAX, VE },
{ NULL },
};

View File

@ -121,7 +121,7 @@ static inline void FUNC6(idctRowCondDC)(idctin *row, int extra_shift)
// TODO: Add DC-only support for int32_t input
#if IN_IDCT_DEPTH == 16
#if HAVE_FAST_64BIT
#define ROW0_MASK (0xffffLL << 48 * HAVE_BIGENDIAN)
#define ROW0_MASK (0xffffULL << 48 * HAVE_BIGENDIAN)
if (((AV_RN64A(row) & ~ROW0_MASK) | AV_RN64A(row+4)) == 0) {
uint64_t temp;
if (DC_SHIFT - extra_shift >= 0) {

View File

@ -144,6 +144,8 @@ static inline av_flatten int get_symbol(RangeCoder *c, uint8_t *state, int is_si
e= 0;
while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
e++;
if (e > 31)
return AVERROR_INVALIDDATA;
}
a= 1;

View File

@ -132,12 +132,6 @@ static int decode_frame(AVCodecContext *avctx,
h = bytestream2_get_le16(&s->gb);
bpp = bytestream2_get_byte(&s->gb);
if (bytestream2_get_bytes_left(&s->gb) <= idlen) {
av_log(avctx, AV_LOG_ERROR,
"Not enough data to read header\n");
return AVERROR_INVALIDDATA;
}
flags = bytestream2_get_byte(&s->gb);
if (!pal && (first_clr || colors || csize)) {
@ -146,6 +140,12 @@ static int decode_frame(AVCodecContext *avctx,
first_clr = colors = csize = 0;
}
if (bytestream2_get_bytes_left(&s->gb) < idlen + 2*colors) {
av_log(avctx, AV_LOG_ERROR,
"Not enough data to read header\n");
return AVERROR_INVALIDDATA;
}
// skip identifier if any
bytestream2_skip(&s->gb, idlen);

View File

@ -1218,6 +1218,8 @@ static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
{
AVFrameSideData *sd;
GetByteContext gb_temp;
unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
int i, start;
int pos;
@ -1643,6 +1645,22 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
}
}
break;
case TIFF_ICC_PROFILE:
if (type != TIFF_UNDEFINED)
return AVERROR_INVALIDDATA;
gb_temp = s->gb;
bytestream2_seek(&gb_temp, SEEK_SET, off);
if (bytestream2_get_bytes_left(&gb_temp) < count)
return AVERROR_INVALIDDATA;
sd = av_frame_new_side_data(frame, AV_FRAME_DATA_ICC_PROFILE, count);
if (!sd)
return AVERROR(ENOMEM);
bytestream2_get_bufferu(&gb_temp, sd->data, count);
break;
case TIFF_ARTIST:
ADD_METADATA(count, "artist", NULL);
break;

View File

@ -92,6 +92,7 @@ enum TiffTags {
TIFF_MODEL_TIEPOINT = 0x8482,
TIFF_MODEL_PIXEL_SCALE = 0x830E,
TIFF_MODEL_TRANSFORMATION= 0x8480,
TIFF_ICC_PROFILE = 0x8773,
TIFF_GEO_KEY_DIRECTORY = 0x87AF,
TIFF_GEO_DOUBLE_PARAMS = 0x87B0,
TIFF_GEO_ASCII_PARAMS = 0x87B1,

Some files were not shown because too many files have changed in this diff Show More