This commit is contained in:
MrBesen 2019-12-11 10:30:27 +01:00
commit 438b0eae71
Signed by: MrBesen
GPG Key ID: 596B2350DCD67504
261 changed files with 5023 additions and 1768 deletions

View File

@ -24,6 +24,9 @@ version <next>:
- AV1 encoding support via librav1e
- AV1 frame merge bitstream filter
- AV1 Annex B demuxer
- axcorrelate filter
- mvdv decoder
- mvha decoder
version 4.2:

View File

@ -198,6 +198,7 @@ Codecs:
libvorbis.c David Conrad
libvpx* James Zern
libxavs.c Stefan Gehrer
libxavs2.c Huiwen Ren
libzvbi-teletextdec.c Marton Balint
lzo.h, lzo.c Reimar Doeffinger
mdec.c Michael Niedermayer
@ -369,6 +370,8 @@ Filters:
Sources:
vsrc_mandelbrot.c Michael Niedermayer
dnn Yejun Guo
libavformat
===========

View File

@ -50,6 +50,9 @@ $(TOOLS): %$(EXESUF): %.o
target_dec_%_fuzzer$(EXESUF): target_dec_%_fuzzer.o $(FF_DEP_LIBS)
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
tools/target_bsf_%_fuzzer$(EXESUF): tools/target_bsf_%_fuzzer.o $(FF_DEP_LIBS)
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
tools/target_dem_fuzzer$(EXESUF): tools/target_dem_fuzzer.o $(FF_DEP_LIBS)
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)

12
configure vendored
View File

@ -2779,6 +2779,7 @@ msmpeg4v3_decoder_select="h263_decoder"
msmpeg4v3_encoder_select="h263_encoder"
mss2_decoder_select="mpegvideo qpeldsp vc1_decoder"
mts2_decoder_select="mss34dsp"
mvha_decoder_deps="zlib"
mwsc_decoder_deps="zlib"
mxpeg_decoder_select="mjpeg_decoder"
nellymoser_decoder_select="mdct sinewin"
@ -3499,6 +3500,7 @@ frei0r_filter_deps="frei0r libdl"
frei0r_src_filter_deps="frei0r libdl"
fspp_filter_deps="gpl"
geq_filter_deps="gpl"
headphone_filter_select="fft"
histeq_filter_deps="gpl"
hqdn3d_filter_deps="gpl"
interlace_filter_deps="gpl"
@ -3547,6 +3549,7 @@ showcqt_filter_suggest="libfontconfig libfreetype"
showcqt_filter_select="fft"
showfreqs_filter_deps="avcodec"
showfreqs_filter_select="fft"
showspatial_filter_select="fft"
showspectrum_filter_deps="avcodec"
showspectrum_filter_select="fft"
showspectrumpic_filter_deps="avcodec"
@ -6137,10 +6140,10 @@ fi
if ! disabled ffnvcodec; then
ffnv_hdr_list="ffnvcodec/nvEncodeAPI.h ffnvcodec/dynlink_cuda.h ffnvcodec/dynlink_cuviddec.h ffnvcodec/dynlink_nvcuvid.h"
check_pkg_config ffnvcodec "ffnvcodec >= 9.0.18.0" "$ffnv_hdr_list" "" || \
check_pkg_config ffnvcodec "ffnvcodec >= 8.2.15.8 ffnvcodec < 8.3" "$ffnv_hdr_list" "" || \
check_pkg_config ffnvcodec "ffnvcodec >= 8.1.24.9 ffnvcodec < 8.2" "$ffnv_hdr_list" "" || \
check_pkg_config ffnvcodec "ffnvcodec >= 8.0.14.9 ffnvcodec < 8.1" "$ffnv_hdr_list" ""
check_pkg_config ffnvcodec "ffnvcodec >= 9.1.23.1" "$ffnv_hdr_list" "" || \
check_pkg_config ffnvcodec "ffnvcodec >= 9.0.18.3 ffnvcodec < 9.1" "$ffnv_hdr_list" "" || \
check_pkg_config ffnvcodec "ffnvcodec >= 8.2.15.10 ffnvcodec < 8.3" "$ffnv_hdr_list" "" || \
check_pkg_config ffnvcodec "ffnvcodec >= 8.1.24.11 ffnvcodec < 8.2" "$ffnv_hdr_list" ""
fi
check_cpp_condition winrt windows.h "!WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)"
@ -6400,6 +6403,7 @@ enabled omx_rpi && { test_code cc OMX_Core.h OMX_IndexConfigBrcmVideoR
enabled omx && require_headers OMX_Core.h
enabled openssl && { check_pkg_config openssl openssl openssl/ssl.h OPENSSL_init_ssl ||
check_pkg_config openssl openssl openssl/ssl.h SSL_library_init ||
check_lib openssl openssl/ssl.h OPENSSL_init_ssl -lssl -lcrypto ||
check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto ||
check_lib openssl openssl/ssl.h SSL_library_init -lssl32 -leay32 ||
check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto -lws2_32 -lgdi32 ||

View File

@ -15,6 +15,9 @@ libavutil: 2017-10-21
API changes, most recent first:
2019-11-17 - 1c23abc88f - lavu 56.36.100 - eval API
Add av_expr_count_vars().
2019-10-14 - f3746d31f9 - lavu 56.35.101 - opt.h
Add AV_OPT_FLAG_RUNTIME_PARAM.

View File

@ -48,6 +48,8 @@ config
tools/target_dec_<decoder>_fuzzer
Build fuzzer to fuzz the specified decoder.
tools/target_bsf_<filter>_fuzzer
Build fuzzer to fuzz the specified bitstream filter.
Useful standard make commands:
make -t <target>

View File

@ -71,6 +71,13 @@ Set amount of tile threads to use during decoding. The default value is 0 (autod
Apply film grain to the decoded video if present in the bitstream. Defaults to the
internal default of the library.
@item oppoint
Select an operating point of a scalable AV1 bitstream (0 - 31). Defaults to the
internal default of the library.
@item alllayers
Output all spatial layers of a scalable AV1 bitstream. The default value is false.
@end table
@section libdavs2

View File

@ -331,6 +331,10 @@ segment index to start live streams at (negative values are from the end).
Maximum number of times a insufficient list is attempted to be reloaded.
Default value is 1000.
@item m3u8_hold_counters
The maximum number of times to load m3u8 when it refreshes without new segments.
Default value is 1000.
@item http_persistent
Use persistent HTTP connections. Applicable only for HTTP streams.
Enabled by default.

View File

@ -1395,7 +1395,7 @@ Sets the maximum quantizer to use when using bitrate mode.
Sets the minimum quantizer to use when using bitrate mode.
@item qp
Uses quantizer mode to encode at the given quantizer.
Uses quantizer mode to encode at the given quantizer (0-255).
@item speed
Selects the speed preset (0-10) to encode with.
@ -2776,7 +2776,7 @@ recommended value) and do not set a size constraint.
@section QSV encoders
The family of Intel QuickSync Video encoders (MPEG-2, H.264 and HEVC)
The family of Intel QuickSync Video encoders (MPEG-2, H.264, HEVC, JPEG/MJPEG and VP9)
The ratecontrol method is selected as follows:

View File

@ -1547,6 +1547,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Commands
@ -2527,6 +2531,39 @@ ffmpeg -i INPUT -af atrim=end_sample=1000
@end itemize
@section axcorrelate
Calculate normalized cross-correlation between two input audio streams.
Resulted samples are always between -1 and 1 inclusive.
If result is 1 it means two input samples are highly correlated in that selected segment.
Result 0 means they are not correlated at all.
If result is -1 it means two input samples are out of phase, which means they cancel each
other.
The filter accepts the following options:
@table @option
@item size
Set size of segment over which cross-correlation is calculated.
Default is 256. Allowed range is from 2 to 131072.
@item algo
Set algorithm for cross-correlation. Can be @code{slow} or @code{fast}.
Default is @code{slow}. Fast algorithm assumes mean values over any given segment
are always zero and thus need much less calculations to make.
This is generally not true, but is valid for typical audio streams.
@end table
@subsection Examples
@itemize
@item
Calculate correlation between channels in stereo audio stream:
@example
ffmpeg -i stereo.wav -af channelsplit,axcorrelate=size=1024:algo=fast correlation.wav
@end example
@end itemize
@section bandpass
Apply a two-pole Butterworth band-pass filter with central
@ -2568,6 +2605,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Commands
@ -2627,6 +2668,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Commands
@ -2693,6 +2738,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Commands
@ -2744,6 +2793,13 @@ Syntax for the command is : "@var{value}"
@item mix, m
How much to use filtered signal in output. Default is 1.
Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@section bs2b
@ -3439,6 +3495,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Examples
@ -3908,6 +3968,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Commands
@ -4224,6 +4288,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Examples
@ -5332,6 +5400,10 @@ Range is between 0 and 1.
@item channels, c
Specify which channels to filter, by default all available are filtered.
@item normalize, n
Normalize biquad coefficients, by default is disabled.
Enabling it will normalize magnitude response at DC to 0dB.
@end table
@subsection Commands
@ -6863,6 +6935,13 @@ Literal colors like "green" or "red" don't make sense with this enabled anymore.
This can be used to pass exact YUV values as hexadecimal numbers.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@section chromakey
YUV colorspace color/chroma keying.
@ -6892,6 +6971,13 @@ Literal colors like "green" or "red" don't make sense with this enabled anymore.
This can be used to pass exact YUV values as hexadecimal numbers.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@subsection Examples
@itemize
@ -6925,6 +7011,10 @@ Set amount to shift chroma-red vertically.
Set edge mode, can be @var{smear}, default, or @var{warp}.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section ciescope
Display CIE color diagram with pixels overlaid onto it.
@ -10745,6 +10835,13 @@ Default is @var{smear}.
Set color for pixels in fixed mode. Default is @var{black}.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@section find_rect
Find a rectangular object
@ -11668,6 +11765,13 @@ A floating point number which specifies chroma temporal strength. It defaults to
@var{luma_tmp}*@var{chroma_spatial}/@var{luma_spatial}.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@anchor{hwdownload}
@section hwdownload
@ -12380,7 +12484,7 @@ The filter has following options:
@table @option
@item model_path
Set the model path which is to be used for SVM.
Default value: @code{"vmaf_v0.6.1.pkl"}
Default value: @code{"/usr/local/share/model/vmaf_v0.6.1.pkl"}
@item log_path
Set the file path to be used to store logs.
@ -12396,27 +12500,35 @@ Default value: @code{false}
@item phone_model
Invokes the phone model which will generate VMAF scores higher than in the
regular model, which is more suitable for laptop, TV, etc. viewing conditions.
Default value: @code{false}
@item psnr
Enables computing psnr along with vmaf.
Default value: @code{false}
@item ssim
Enables computing ssim along with vmaf.
Default value: @code{false}
@item ms_ssim
Enables computing ms_ssim along with vmaf.
Default value: @code{false}
@item pool
Set the pool method (mean, min or harmonic mean) to be used for computing vmaf.
Set the pool method to be used for computing vmaf.
Options are @code{min}, @code{harmonic_mean} or @code{mean} (default).
@item n_threads
Set number of threads to be used when computing vmaf.
Default value: @code{0}, which makes use of all available logical processors.
@item n_subsample
Set interval for frame subsampling used when computing vmaf.
Default value: @code{1}
@item enable_conf_interval
Enables confidence interval.
Default value: @code{false}
@end table
This filter also supports the @ref{framesync} options.
@ -12440,7 +12552,7 @@ ffmpeg -i main.mpg -i ref.mpg -lavfi libvmaf="psnr=1:log_fmt=json" -f null -
@item
Example with options and different containers:
@example
ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=1/AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=1/AVTB,setpts=PTS-STARTPTS[ref];[main][ref]libvmaf=psnr=1:log_fmt=json" -f null -
ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]libvmaf=psnr=1:log_fmt=json" -f null -
@end example
@end itemize
@ -12590,13 +12702,20 @@ Default value is @code{0}.
@item tolerance
Set the range of luma values to be keyed out.
Default value is @code{0}.
Default value is @code{0.01}.
@item softness
Set the range of softness. Default value is @code{0}.
Use this to control gradual transition from zero to full transparency.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@section lut, lutrgb, lutyuv
Compute a look-up table for binding each pixel component input value
@ -12982,6 +13101,13 @@ Allowed range is integer from 0 to 127.
If it is 0, value will be picked from horizontal @code{radius} option.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@section mergeplanes
Merge color channel components from several video streams.
@ -13550,6 +13676,13 @@ expensive no-op. Defaults to 1.0 (full strength).
@end table
@subsection Commands
This filter supports same @ref{commands} as options, excluding @var{smoothing} option.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@subsection Examples
Stretch video contrast to use the full dynamic range, with no temporal
@ -13763,6 +13896,13 @@ Draw some statistics. By default is enabled.
Draw scope. By default is enabled.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
The command accepts the same syntax of the corresponding option.
If the specified expression is not valid, it is kept at its current
value.
@subsection Examples
@itemize
@ -15061,7 +15201,7 @@ is stored in @file{stats.log}.
@item
Another example with different containers:
@example
ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=1/AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=1/AVTB,setpts=PTS-STARTPTS[ref];[main][ref]psnr" -f null -
ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]psnr" -f null -
@end example
@end itemize
@ -15489,6 +15629,10 @@ Set amount to shift alpha vertically.
Set edge mode, can be @var{smear}, default, or @var{warp}.
@end table
@subsection Commands
This filter supports the all above options as @ref{commands}.
@section roberts
Apply roberts cross operator to input video stream.
@ -16066,6 +16210,46 @@ Supersampling
@item lanczos
@end table
@item force_original_aspect_ratio
Enable decreasing or increasing output video width or height if necessary to
keep the original aspect ratio. Possible values:
@table @samp
@item disable
Scale the video as specified and disable this feature.
@item decrease
The output video dimensions will automatically be decreased if needed.
@item increase
The output video dimensions will automatically be increased if needed.
@end table
One useful instance of this option is that when you know a specific device's
maximum allowed resolution, you can use this to limit the output video to
that, while retaining the aspect ratio. For example, device A allows
1280x720 playback, and your video is 1920x800. Using this option (set it to
decrease) and specifying 1280x720 to the command line makes the output
1280x533.
Please note that this is a different thing than specifying -1 for @option{w}
or @option{h}, you still need to specify the output resolution for this option
to work.
@item force_divisible_by
Ensures that both the output dimensions, width and height, are divisible by the
given integer when used together with @option{force_original_aspect_ratio}. This
works similar to using @code{-n} in the @option{w} and @option{h} options.
This option respects the value set for @option{force_original_aspect_ratio},
increasing or decreasing the resolution accordingly. The video's aspect ratio
may be slightly modified.
This option can be handy if you need to have a video fit within or exceed
a defined resolution using @option{force_original_aspect_ratio} but also have
encoder restrictions on width or height divisibility.
@end table
@section scale2ref
@ -17128,7 +17312,7 @@ ffmpeg -i main.mpg -i ref.mpg -lavfi "ssim;[0:v][1:v]psnr" -f null -
@item
Another example with different containers:
@example
ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=1/AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=1/AVTB,setpts=PTS-STARTPTS[ref];[main][ref]ssim" -f null -
ffmpeg -i main.mpg -i ref.mkv -lavfi "[0:v]settb=AVTB,setpts=PTS-STARTPTS[main];[1:v]settb=AVTB,setpts=PTS-STARTPTS[ref];[main][ref]ssim" -f null -
@end example
@end itemize
@ -19257,16 +19441,23 @@ vignette='PI/4+random(1)*PI/50':eval=frame
@section vmafmotion
Obtain the average vmaf motion score of a video.
It is one of the component filters of VMAF.
Obtain the average VMAF motion score of a video.
It is one of the component metrics of VMAF.
The obtained average motion score is printed through the logging system.
In the below example the input file @file{ref.mpg} is being processed and score
is computed.
The filter accepts the following options:
@table @option
@item stats_file
If specified, the filter will use the named file to save the motion score of
each frame with respect to the previous frame.
When filename equals "-" the data is sent to standard output.
@end table
Example:
@example
ffmpeg -i ref.mpg -lavfi vmafmotion -f null -
ffmpeg -i ref.mpg -vf vmafmotion -f null -
@end example
@section vstack
@ -19767,6 +19958,28 @@ Only deinterlace frames marked as interlaced.
The default value is @code{all}.
@end table
@section yaepblur
Apply blur filter while preserving edges ("yaepblur" means "yet another edge preserving blur filter").
The algorithm is described in
"J. S. Lee, Digital image enhancement and noise filtering by use of local statistics, IEEE Trans. Pattern Anal. Mach. Intell. PAMI-2, 1980."
It accepts the following parameters:
@table @option
@item radius, r
Set the window radius. Default value is 3.
@item planes, p
Set which planes to filter. Default is only the first plane.
@item sigma, s
Set blur strength. Default value is 128.
@end table
@subsection Commands
This filter supports same @ref{commands} as options.
@section zoompan
Apply Zoom & Pan effect.

View File

@ -395,6 +395,14 @@ Either sync could go wrong by 1 frame or in a rarer case
@option{timestamp_align} seconds.
Defaults to @samp{0}.
@item wait_for_tc (@emph{bool})
Drop frames till a frame with timecode is received. Sometimes serial timecode
isn't received with the first input frame. If that happens, the stored stream
timecode will be inaccurate. If this option is set to @option{true}, input frames
are dropped till a frame with timecode is received.
Option @var{timecode_format} must be specified.
Defaults to @option{false}.
@end table
@subsection Examples

View File

@ -1582,11 +1582,12 @@ Advanced Codec Digital HDTV service.
@end table
@item mpegts_pmt_start_pid @var{integer}
Set the first PID for PMT. Default is @code{0x1000}. Max is @code{0x1f00}.
Set the first PID for PMTs. Default is @code{0x1000}, minimum is @code{0x0020},
maximum is @code{0x1ffa}.
@item mpegts_start_pid @var{integer}
Set the first PID for data packets. Default is @code{0x0100}. Max is
@code{0x0f00}.
Set the first PID for elementary streams. Default is @code{0x0100}, minimum is
@code{0x0020}, maximum is @code{0x1ffa}.
@item mpegts_m2ts_mode @var{boolean}
Enable m2ts mode if set to @code{1}. Default value is @code{-1} which

View File

@ -1282,6 +1282,26 @@ only if @option{pbkeylen} is non-zero. It is used on
the receiver only if the received data is encrypted.
The configured passphrase cannot be recovered (write-only).
@item enforced_encryption=@var{1|0}
If true, both connection parties must have the same password
set (including empty, that is, with no encryption). If the
password doesn't match or only one side is unencrypted,
the connection is rejected. Default is true.
@item kmrefreshrate=@var{packets}
The number of packets to be transmitted after which the
encryption key is switched to a new key. Default is -1.
-1 means auto (0x1000000 in srt library). The range for
this option is integers in the 0 - @code{INT_MAX}.
@item kmpreannounce=@var{packets}
The interval between when a new encryption key is sent and
when switchover occurs. This value also applies to the
subsequent interval between when switchover occurs and
when the old encryption key is decommissioned. Default is -1.
-1 means auto (0x1000 in srt library). The range for
this option is integers in the 0 - @code{INT_MAX}.
@item payload_size=@var{bytes}
Sets the maximum declared size of a packet transferred
during the single call to the sending function in Live
@ -1426,6 +1446,12 @@ the overhead transmission (retransmitted and control packets).
file: Set options as for non-live transmission. See @option{messageapi}
for further explanations
@item linger=@var{seconds}
The number of seconds that the socket waits for unsent data when closing.
Default is -1. -1 means auto (off with 0 seconds in live mode, on with 180
seconds in file mode). The range for this option is integers in the
0 - @code{INT_MAX}.
@end table
For more information see: @url{https://github.com/Haivision/srt}.

View File

@ -493,6 +493,8 @@ OBJS-$(CONFIG_MSZH_DECODER) += lcldec.o
OBJS-$(CONFIG_MTS2_DECODER) += mss4.o
OBJS-$(CONFIG_MVC1_DECODER) += mvcdec.o
OBJS-$(CONFIG_MVC2_DECODER) += mvcdec.o
OBJS-$(CONFIG_MVDV_DECODER) += midivid.o
OBJS-$(CONFIG_MVHA_DECODER) += mvha.o
OBJS-$(CONFIG_MWSC_DECODER) += mwsc.o
OBJS-$(CONFIG_MXPEG_DECODER) += mxpegdec.o
OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o

View File

@ -460,8 +460,8 @@ static int decode_inter_plane(AGMContext *s, GetBitContext *gb, int size,
return ret;
if (orig_mv_x >= -32) {
if (y * 8 + mv_y < 0 || y * 8 + mv_y >= h ||
x * 8 + mv_x < 0 || x * 8 + mv_x >= w)
if (y * 8 + mv_y < 0 || y * 8 + mv_y + 8 > h ||
x * 8 + mv_x < 0 || x * 8 + mv_x + 8 > w)
return AVERROR_INVALIDDATA;
copy_block8(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8,
@ -827,7 +827,7 @@ static int decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame
static int decode_motion_vectors(AVCodecContext *avctx, GetBitContext *gb)
{
AGMContext *s = avctx->priv_data;
int nb_mvs = ((avctx->height + 15) >> 4) * ((avctx->width + 15) >> 4);
int nb_mvs = ((avctx->coded_height + 15) >> 4) * ((avctx->coded_width + 15) >> 4);
int ret, skip = 0, value, map;
av_fast_padded_malloc(&s->mvectors, &s->mvectors_size,

View File

@ -227,7 +227,7 @@ static void lpc_prediction(int32_t *error_buffer, uint32_t *buffer_out,
val = d - pred[j];
sign = sign_only(val) * error_sign;
lpc_coefs[j] -= sign;
val *= sign;
val *= (unsigned)sign;
error_val -= (val >> lpc_quant) * (j + 1);
}
}

View File

@ -218,6 +218,8 @@ extern AVCodec ff_mszh_decoder;
extern AVCodec ff_mts2_decoder;
extern AVCodec ff_mvc1_decoder;
extern AVCodec ff_mvc2_decoder;
extern AVCodec ff_mvdv_decoder;
extern AVCodec ff_mvha_decoder;
extern AVCodec ff_mwsc_decoder;
extern AVCodec ff_mxpeg_decoder;
extern AVCodec ff_nuv_decoder;

View File

@ -236,6 +236,7 @@ typedef struct ALSDecContext {
int **raw_mantissa; ///< decoded mantissa bits of the difference signal
unsigned char *larray; ///< buffer to store the output of masked lz decompression
int *nbits; ///< contains the number of bits to read for masked lz decompression for all samples
int highest_decoded_channel;
} ALSDecContext;
@ -1678,6 +1679,7 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
memmove(ctx->raw_samples[c] - sconf->max_order,
ctx->raw_samples[c] - sconf->max_order + sconf->frame_length,
sizeof(*ctx->raw_samples[c]) * sconf->max_order);
ctx->highest_decoded_channel = c;
}
} else { // multi-channel coding
ALSBlockData bd = { 0 };
@ -1746,6 +1748,8 @@ static int read_frame_data(ALSDecContext *ctx, unsigned int ra_frame)
if ((ret = decode_block(ctx, &bd)) < 0)
return ret;
ctx->highest_decoded_channel = FFMAX(ctx->highest_decoded_channel, c);
}
memset(reverted_channels, 0, avctx->channels * sizeof(*reverted_channels));
@ -1802,11 +1806,15 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
else
ctx->cur_frame_length = sconf->frame_length;
ctx->highest_decoded_channel = 0;
// decode the frame data
if ((invalid_frame = read_frame_data(ctx, ra_frame)) < 0)
av_log(ctx->avctx, AV_LOG_WARNING,
"Reading frame data failed. Skipping RA unit.\n");
if (ctx->highest_decoded_channel == 0)
return AVERROR_INVALIDDATA;
ctx->frame_id++;
/* get output buffer */
@ -1819,16 +1827,17 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
{ \
int##bps##_t *dest = (int##bps##_t*)frame->data[0]; \
int channels = avctx->channels; \
int32_t **raw_samples = ctx->raw_samples; \
int32_t *raw_samples = ctx->raw_samples[0]; \
int raw_step = channels > 1 ? ctx->raw_samples[1] - raw_samples : 1; \
shift = bps - ctx->avctx->bits_per_raw_sample; \
if (!ctx->cs_switch) { \
for (sample = 0; sample < ctx->cur_frame_length; sample++) \
for (c = 0; c < channels; c++) \
*dest++ = raw_samples[c][sample] * (1U << shift); \
*dest++ = raw_samples[c*raw_step + sample] * (1U << shift); \
} else { \
for (sample = 0; sample < ctx->cur_frame_length; sample++) \
for (c = 0; c < channels; c++) \
*dest++ = raw_samples[sconf->chan_pos[c]][sample] * (1U << shift);\
*dest++ = raw_samples[sconf->chan_pos[c]*raw_step + sample] * (1U << shift);\
} \
}

View File

@ -451,7 +451,7 @@ static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt, AMFBuffer *buff
int64_t timestamp = AV_NOPTS_VALUE;
int64_t size = buffer->pVtbl->GetSize(buffer);
if ((ret = ff_alloc_packet2(avctx, pkt, size, 0)) < 0) {
if ((ret = av_new_packet(pkt, size)) < 0) {
return ret;
}
memcpy(pkt->data, buffer->pVtbl->GetNative(buffer), size);

View File

@ -1234,7 +1234,7 @@ static void predictor_decode_mono_3950(APEContext *ctx, int count)
p->buf = p->historybuffer;
}
p->filterA[0] = currentA + ((int)(p->filterA[0] * 31U) >> 5);
p->filterA[0] = currentA + (unsigned)((int)(p->filterA[0] * 31U) >> 5);
*(decoded0++) = p->filterA[0];
}
@ -1272,7 +1272,7 @@ static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
f->adaptcoeffs - order,
order, APESIGN(*data));
res = (int)(res + (1U << (fracbits - 1))) >> fracbits;
res += *data;
res += (unsigned)*data;
*data++ = res;
/* Update the output history */
@ -1302,7 +1302,7 @@ static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
else
*f->adaptcoeffs = 0;
f->avg += (absres - f->avg) / 16;
f->avg += (int)(absres - (unsigned)f->avg) / 16;
f->adaptcoeffs[-1] >>= 1;
f->adaptcoeffs[-2] >>= 1;

View File

@ -964,7 +964,7 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
return AVERROR_INVALIDDATA;
}
if (avctx->block_align >= UINT_MAX / 2 || avctx->block_align <= 0)
if (avctx->block_align > 1024 || avctx->block_align <= 0)
return AVERROR(EINVAL);
q->decoded_bytes_buffer = av_mallocz(FFALIGN(avctx->block_align, 4) +

View File

@ -187,7 +187,7 @@ static inline void calc_precision(ATRAC9Context *s, ATRAC9BlockData *b,
for (int i = 0; i < b->q_unit_cnt; i++) {
c->precision_fine[i] = 0;
if (c->precision_coarse[i] > 15) {
c->precision_fine[i] = c->precision_coarse[i] - 15;
c->precision_fine[i] = FFMIN(c->precision_coarse[i], 30) - 15;
c->precision_coarse[i] = 15;
}
}
@ -199,7 +199,7 @@ static inline int parse_band_ext(ATRAC9Context *s, ATRAC9BlockData *b,
int ext_band = 0;
if (b->has_band_ext) {
if (b->q_unit_cnt < 13)
if (b->q_unit_cnt < 13 || b->q_unit_cnt > 20)
return AVERROR_INVALIDDATA;
ext_band = at9_tab_band_ext_group[b->q_unit_cnt - 13][2];
if (stereo) {

View File

@ -23,6 +23,7 @@
#include "av1_parse.h"
#include "cbs.h"
#include "cbs_av1.h"
#include "internal.h"
#include "parser.h"
typedef struct AV1ParseContext {
@ -100,6 +101,9 @@ static int av1_parser_parse(AVCodecParserContext *ctx,
else
continue;
if (obu->header.spatial_id > 0)
continue;
if (frame->show_existing_frame) {
AV1ReferenceFrameState *ref = &av1->ref[frame->frame_to_show_map_idx];
@ -155,6 +159,12 @@ static int av1_parser_parse(AVCodecParserContext *ctx,
break;
}
av_assert2(ctx->format != AV_PIX_FMT_NONE);
if (ctx->width != avctx->width || ctx->height != avctx->height) {
ret = ff_set_dimensions(avctx, ctx->width, ctx->height);
if (ret < 0)
goto end;
}
}
if (avctx->framerate.num)

View File

@ -458,6 +458,8 @@ enum AVCodecID {
AV_CODEC_ID_LSCR,
AV_CODEC_ID_VP4,
AV_CODEC_ID_IMM5,
AV_CODEC_ID_MVDV,
AV_CODEC_ID_MVHA,
/* various PCM "codecs" */
AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
@ -5946,11 +5948,13 @@ int av_bsf_init(AVBSFContext *ctx);
*
* @param pkt the packet to filter. The bitstream filter will take ownership of
* the packet and reset the contents of pkt. pkt is not touched if an error occurs.
* This parameter may be NULL, which signals the end of the stream (i.e. no more
* packets will be sent). That will cause the filter to output any packets it
* may have buffered internally.
* If pkt is empty (i.e. NULL, or pkt->data is NULL and pkt->side_data_elems zero),
* it signals the end of the stream (i.e. no more non-empty packets will be sent;
* sending more empty packets does nothing) and will cause the filter to output
* any packets it may have buffered internally.
*
* @return 0 on success, a negative AVERROR on error.
* @return 0 on success, a negative AVERROR on error. This function never fails if
* pkt is empty.
*/
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt);

View File

@ -95,10 +95,12 @@ int ff_cbs_init(CodedBitstreamContext **ctx_ptr,
ctx->log_ctx = log_ctx;
ctx->codec = type;
ctx->priv_data = av_mallocz(ctx->codec->priv_data_size);
if (!ctx->priv_data) {
av_freep(&ctx);
return AVERROR(ENOMEM);
if (type->priv_data_size) {
ctx->priv_data = av_mallocz(ctx->codec->priv_data_size);
if (!ctx->priv_data) {
av_freep(&ctx);
return AVERROR(ENOMEM);
}
}
ctx->decompose_unit_types = NULL;
@ -120,6 +122,7 @@ void ff_cbs_close(CodedBitstreamContext **ctx_ptr)
if (ctx->codec && ctx->codec->close)
ctx->codec->close(ctx);
av_freep(&ctx->write_buffer);
av_freep(&ctx->priv_data);
av_freep(ctx_ptr);
}
@ -280,6 +283,59 @@ int ff_cbs_read(CodedBitstreamContext *ctx,
return cbs_read_fragment_content(ctx, frag);
}
static int cbs_write_unit_data(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
{
PutBitContext pbc;
int ret;
if (!ctx->write_buffer) {
// Initial write buffer size is 1MB.
ctx->write_buffer_size = 1024 * 1024;
reallocate_and_try_again:
ret = av_reallocp(&ctx->write_buffer, ctx->write_buffer_size);
if (ret < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
"sufficiently large write buffer (last attempt "
"%"SIZE_SPECIFIER" bytes).\n", ctx->write_buffer_size);
return ret;
}
}
init_put_bits(&pbc, ctx->write_buffer, ctx->write_buffer_size);
ret = ctx->codec->write_unit(ctx, unit, &pbc);
if (ret < 0) {
if (ret == AVERROR(ENOSPC)) {
// Overflow.
if (ctx->write_buffer_size == INT_MAX / 8)
return AVERROR(ENOMEM);
ctx->write_buffer_size = FFMIN(2 * ctx->write_buffer_size, INT_MAX / 8);
goto reallocate_and_try_again;
}
// Write failed for some other reason.
return ret;
}
// Overflow but we didn't notice.
av_assert0(put_bits_count(&pbc) <= 8 * ctx->write_buffer_size);
if (put_bits_count(&pbc) % 8)
unit->data_bit_padding = 8 - put_bits_count(&pbc) % 8;
else
unit->data_bit_padding = 0;
flush_put_bits(&pbc);
ret = ff_cbs_alloc_unit_data(ctx, unit, put_bits_count(&pbc) / 8);
if (ret < 0)
return ret;
memcpy(unit->data, ctx->write_buffer, unit->data_size);
return 0;
}
int ff_cbs_write_fragment_data(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag)
@ -295,7 +351,7 @@ int ff_cbs_write_fragment_data(CodedBitstreamContext *ctx,
av_buffer_unref(&unit->data_ref);
unit->data = NULL;
err = ctx->codec->write_unit(ctx, unit);
err = cbs_write_unit_data(ctx, unit);
if (err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Failed to write unit %d "
"(type %"PRIu32").\n", i, unit->type);

View File

@ -210,6 +210,13 @@ typedef struct CodedBitstreamContext {
* From AV_LOG_*; defaults to AV_LOG_TRACE.
*/
int trace_level;
/**
* Write buffer. Used as intermediate buffer when writing units.
* For internal use of cbs only.
*/
uint8_t *write_buffer;
size_t write_buffer_size;
} CodedBitstreamContext;

View File

@ -939,6 +939,8 @@ static int cbs_av1_read_unit(CodedBitstreamContext *ctx,
priv->spatial_id = 0;
}
priv->ref = (AV1ReferenceFrameState *)&priv->read_ref;
switch (obu->header.obu_type) {
case AV1_OBU_SEQUENCE_HEADER:
{
@ -1037,6 +1039,7 @@ static int cbs_av1_read_unit(CodedBitstreamContext *ctx,
if (obu->obu_size > 0 &&
obu->header.obu_type != AV1_OBU_TILE_GROUP &&
obu->header.obu_type != AV1_OBU_TILE_LIST &&
obu->header.obu_type != AV1_OBU_FRAME) {
int nb_bits = obu->obu_size * 8 + start_pos - end_pos;
@ -1081,6 +1084,8 @@ static int cbs_av1_write_obu(CodedBitstreamContext *ctx,
td = NULL;
start_pos = put_bits_count(pbc);
priv->ref = (AV1ReferenceFrameState *)&priv->write_ref;
switch (obu->header.obu_type) {
case AV1_OBU_SEQUENCE_HEADER:
{
@ -1199,66 +1204,19 @@ static int cbs_av1_write_obu(CodedBitstreamContext *ctx,
return AVERROR(ENOSPC);
if (obu->obu_size > 0) {
memmove(priv->write_buffer + data_pos,
priv->write_buffer + start_pos, header_size);
memmove(pbc->buf + data_pos,
pbc->buf + start_pos, header_size);
skip_put_bytes(pbc, header_size);
if (td) {
memcpy(priv->write_buffer + data_pos + header_size,
memcpy(pbc->buf + data_pos + header_size,
td->data, td->data_size);
skip_put_bytes(pbc, td->data_size);
}
}
return 0;
}
static int cbs_av1_write_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
{
CodedBitstreamAV1Context *priv = ctx->priv_data;
PutBitContext pbc;
int err;
if (!priv->write_buffer) {
// Initial write buffer size is 1MB.
priv->write_buffer_size = 1024 * 1024;
reallocate_and_try_again:
err = av_reallocp(&priv->write_buffer, priv->write_buffer_size);
if (err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
"sufficiently large write buffer (last attempt "
"%"SIZE_SPECIFIER" bytes).\n", priv->write_buffer_size);
return err;
}
}
init_put_bits(&pbc, priv->write_buffer, priv->write_buffer_size);
err = cbs_av1_write_obu(ctx, unit, &pbc);
if (err == AVERROR(ENOSPC)) {
// Overflow.
priv->write_buffer_size *= 2;
goto reallocate_and_try_again;
}
if (err < 0)
return err;
// Overflow but we didn't notice.
av_assert0(put_bits_count(&pbc) <= 8 * priv->write_buffer_size);
// OBU data must be byte-aligned.
av_assert0(put_bits_count(&pbc) % 8 == 0);
unit->data_size = put_bits_count(&pbc) / 8;
flush_put_bits(&pbc);
err = ff_cbs_alloc_unit_data(ctx, unit, unit->data_size);
if (err < 0)
return err;
memcpy(unit->data, priv->write_buffer, unit->data_size);
av_assert0(put_bits_count(pbc) % 8 == 0);
return 0;
}
@ -1297,8 +1255,6 @@ static void cbs_av1_close(CodedBitstreamContext *ctx)
av_buffer_unref(&priv->sequence_header_ref);
av_buffer_unref(&priv->frame_header_ref);
av_freep(&priv->write_buffer);
}
const CodedBitstreamType ff_cbs_type_av1 = {
@ -1308,7 +1264,7 @@ const CodedBitstreamType ff_cbs_type_av1 = {
.split_fragment = &cbs_av1_split_fragment,
.read_unit = &cbs_av1_read_unit,
.write_unit = &cbs_av1_write_unit,
.write_unit = &cbs_av1_write_obu,
.assemble_fragment = &cbs_av1_assemble_fragment,
.close = &cbs_av1_close,

View File

@ -441,11 +441,9 @@ typedef struct CodedBitstreamAV1Context {
int tile_cols;
int tile_rows;
AV1ReferenceFrameState ref[AV1_NUM_REF_FRAMES];
// Write buffer.
uint8_t *write_buffer;
size_t write_buffer_size;
AV1ReferenceFrameState *ref;
AV1ReferenceFrameState read_ref[AV1_NUM_REF_FRAMES];
AV1ReferenceFrameState write_ref[AV1_NUM_REF_FRAMES];
} CodedBitstreamAV1Context;

View File

@ -339,6 +339,117 @@ static int FUNC(temporal_delimiter_obu)(CodedBitstreamContext *ctx, RWContext *r
return 0;
}
static int FUNC(set_frame_refs)(CodedBitstreamContext *ctx, RWContext *rw,
AV1RawFrameHeader *current)
{
CodedBitstreamAV1Context *priv = ctx->priv_data;
const AV1RawSequenceHeader *seq = priv->sequence_header;
static const uint8_t ref_frame_list[AV1_NUM_REF_FRAMES - 2] = {
AV1_REF_FRAME_LAST2, AV1_REF_FRAME_LAST3, AV1_REF_FRAME_BWDREF,
AV1_REF_FRAME_ALTREF2, AV1_REF_FRAME_ALTREF
};
int8_t ref_frame_idx[AV1_REFS_PER_FRAME], used_frame[AV1_NUM_REF_FRAMES];
int8_t shifted_order_hints[AV1_NUM_REF_FRAMES];
int cur_frame_hint, latest_order_hint, earliest_order_hint, ref;
int i, j;
for (i = 0; i < AV1_REFS_PER_FRAME; i++)
ref_frame_idx[i] = -1;
ref_frame_idx[AV1_REF_FRAME_LAST - AV1_REF_FRAME_LAST] = current->last_frame_idx;
ref_frame_idx[AV1_REF_FRAME_GOLDEN - AV1_REF_FRAME_LAST] = current->golden_frame_idx;
for (i = 0; i < AV1_NUM_REF_FRAMES; i++)
used_frame[i] = 0;
used_frame[current->last_frame_idx] = 1;
used_frame[current->golden_frame_idx] = 1;
cur_frame_hint = 1 << (seq->order_hint_bits_minus_1);
for (i = 0; i < AV1_NUM_REF_FRAMES; i++)
shifted_order_hints[i] = cur_frame_hint +
cbs_av1_get_relative_dist(seq, priv->ref[i].order_hint,
current->order_hint);
latest_order_hint = shifted_order_hints[current->last_frame_idx];
earliest_order_hint = shifted_order_hints[current->golden_frame_idx];
ref = -1;
for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
int hint = shifted_order_hints[i];
if (!used_frame[i] && hint >= cur_frame_hint &&
(ref < 0 || hint >= latest_order_hint)) {
ref = i;
latest_order_hint = hint;
}
}
if (ref >= 0) {
ref_frame_idx[AV1_REF_FRAME_ALTREF - AV1_REF_FRAME_LAST] = ref;
used_frame[ref] = 1;
}
ref = -1;
for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
int hint = shifted_order_hints[i];
if (!used_frame[i] && hint >= cur_frame_hint &&
(ref < 0 || hint < earliest_order_hint)) {
ref = i;
earliest_order_hint = hint;
}
}
if (ref >= 0) {
ref_frame_idx[AV1_REF_FRAME_BWDREF - AV1_REF_FRAME_LAST] = ref;
used_frame[ref] = 1;
}
ref = -1;
for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
int hint = shifted_order_hints[i];
if (!used_frame[i] && hint >= cur_frame_hint &&
(ref < 0 || hint < earliest_order_hint)) {
ref = i;
earliest_order_hint = hint;
}
}
if (ref >= 0) {
ref_frame_idx[AV1_REF_FRAME_ALTREF2 - AV1_REF_FRAME_LAST] = ref;
used_frame[ref] = 1;
}
for (i = 0; i < AV1_REFS_PER_FRAME - 2; i++) {
int ref_frame = ref_frame_list[i];
if (ref_frame_idx[ref_frame - AV1_REF_FRAME_LAST] < 0 ) {
ref = -1;
for (j = 0; j < AV1_NUM_REF_FRAMES; j++) {
int hint = shifted_order_hints[j];
if (!used_frame[j] && hint < cur_frame_hint &&
(ref < 0 || hint >= latest_order_hint)) {
ref = j;
latest_order_hint = hint;
}
}
if (ref >= 0) {
ref_frame_idx[ref_frame - AV1_REF_FRAME_LAST] = ref;
used_frame[ref] = 1;
}
}
}
ref = -1;
for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
int hint = shifted_order_hints[i];
if (ref < 0 || hint < earliest_order_hint) {
ref = i;
earliest_order_hint = hint;
}
}
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
if (ref_frame_idx[i] < 0)
ref_frame_idx[i] = ref;
infer(ref_frame_idx[i], ref_frame_idx[i]);
}
return 0;
}
static int FUNC(superres_params)(CodedBitstreamContext *ctx, RWContext *rw,
AV1RawFrameHeader *current)
{
@ -419,17 +530,16 @@ static int FUNC(frame_size_with_refs)(CodedBitstreamContext *ctx, RWContext *rw,
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
flags(found_ref[i], 1, i);
if (current->found_ref[i]) {
AV1ReferenceFrameState *ref;
AV1ReferenceFrameState *ref =
&priv->ref[current->ref_frame_idx[i]];
if (current->ref_frame_idx[i] < 0 ||
!priv->ref[current->ref_frame_idx[i]].valid) {
if (!ref->valid) {
av_log(ctx->log_ctx, AV_LOG_ERROR,
"Missing reference frame needed for frame size "
"(ref = %d, ref_frame_idx = %d).\n",
i, current->ref_frame_idx[i]);
return AVERROR_INVALIDDATA;
}
ref = &priv->ref[current->ref_frame_idx[i]];
priv->upscaled_width = ref->upscaled_width;
priv->frame_width = ref->frame_width;
@ -882,7 +992,7 @@ static int FUNC(skip_mode_params)(CodedBitstreamContext *ctx, RWContext *rw,
forward_idx = -1;
backward_idx = -1;
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
ref_hint = priv->ref[i].order_hint;
ref_hint = priv->ref[current->ref_frame_idx[i]].order_hint;
dist = cbs_av1_get_relative_dist(seq, ref_hint,
current->order_hint);
if (dist < 0) {
@ -913,7 +1023,7 @@ static int FUNC(skip_mode_params)(CodedBitstreamContext *ctx, RWContext *rw,
second_forward_idx = -1;
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
ref_hint = priv->ref[i].order_hint;
ref_hint = priv->ref[current->ref_frame_idx[i]].order_hint;
if (cbs_av1_get_relative_dist(seq, ref_hint,
forward_hint) < 0) {
if (second_forward_idx < 0 ||
@ -1307,16 +1417,7 @@ static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw,
if (current->frame_refs_short_signaling) {
fb(3, last_frame_idx);
fb(3, golden_frame_idx);
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
if (i == 0)
infer(ref_frame_idx[i], current->last_frame_idx);
else if (i == AV1_REF_FRAME_GOLDEN -
AV1_REF_FRAME_LAST)
infer(ref_frame_idx[i], current->golden_frame_idx);
else
infer(ref_frame_idx[i], -1);
}
CHECK(FUNC(set_frame_refs)(ctx, rw, current));
}
}

View File

@ -1101,7 +1101,7 @@ static int cbs_h2645_write_slice_data(CodedBitstreamContext *ctx,
const uint8_t *pos = data + data_bit_start / 8;
av_assert0(data_bit_start >= 0 &&
8 * data_size > data_bit_start);
data_size > data_bit_start / 8);
if (data_size * 8 + 8 > put_bits_left(pbc))
return AVERROR(ENOSPC);
@ -1380,65 +1380,6 @@ static int cbs_h265_write_nal_unit(CodedBitstreamContext *ctx,
return 0;
}
static int cbs_h2645_write_nal_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
{
CodedBitstreamH2645Context *priv = ctx->priv_data;
enum AVCodecID codec_id = ctx->codec->codec_id;
PutBitContext pbc;
int err;
if (!priv->write_buffer) {
// Initial write buffer size is 1MB.
priv->write_buffer_size = 1024 * 1024;
reallocate_and_try_again:
err = av_reallocp(&priv->write_buffer, priv->write_buffer_size);
if (err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
"sufficiently large write buffer (last attempt "
"%"SIZE_SPECIFIER" bytes).\n", priv->write_buffer_size);
return err;
}
}
init_put_bits(&pbc, priv->write_buffer, priv->write_buffer_size);
if (codec_id == AV_CODEC_ID_H264)
err = cbs_h264_write_nal_unit(ctx, unit, &pbc);
else
err = cbs_h265_write_nal_unit(ctx, unit, &pbc);
if (err == AVERROR(ENOSPC)) {
// Overflow.
priv->write_buffer_size *= 2;
goto reallocate_and_try_again;
}
// Overflow but we didn't notice.
av_assert0(put_bits_count(&pbc) <= 8 * priv->write_buffer_size);
if (err < 0) {
// Write failed for some other reason.
return err;
}
if (put_bits_count(&pbc) % 8)
unit->data_bit_padding = 8 - put_bits_count(&pbc) % 8;
else
unit->data_bit_padding = 0;
unit->data_size = (put_bits_count(&pbc) + 7) / 8;
flush_put_bits(&pbc);
err = ff_cbs_alloc_unit_data(ctx, unit, unit->data_size);
if (err < 0)
return err;
memcpy(unit->data, priv->write_buffer, unit->data_size);
return 0;
}
static int cbs_h2645_assemble_fragment(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag)
{
@ -1454,7 +1395,7 @@ static int cbs_h2645_assemble_fragment(CodedBitstreamContext *ctx,
max_size = 0;
for (i = 0; i < frag->nb_units; i++) {
// Start code + content with worst-case emulation prevention.
max_size += 3 + frag->units[i].data_size * 3 / 2;
max_size += 4 + frag->units[i].data_size * 3 / 2;
}
data = av_realloc(NULL, max_size + AV_INPUT_BUFFER_PADDING_SIZE);
@ -1533,8 +1474,6 @@ static void cbs_h264_close(CodedBitstreamContext *ctx)
ff_h2645_packet_uninit(&h264->common.read_packet);
av_freep(&h264->common.write_buffer);
for (i = 0; i < FF_ARRAY_ELEMS(h264->sps); i++)
av_buffer_unref(&h264->sps_ref[i]);
for (i = 0; i < FF_ARRAY_ELEMS(h264->pps); i++)
@ -1548,8 +1487,6 @@ static void cbs_h265_close(CodedBitstreamContext *ctx)
ff_h2645_packet_uninit(&h265->common.read_packet);
av_freep(&h265->common.write_buffer);
for (i = 0; i < FF_ARRAY_ELEMS(h265->vps); i++)
av_buffer_unref(&h265->vps_ref[i]);
for (i = 0; i < FF_ARRAY_ELEMS(h265->sps); i++)
@ -1565,7 +1502,7 @@ const CodedBitstreamType ff_cbs_type_h264 = {
.split_fragment = &cbs_h2645_split_fragment,
.read_unit = &cbs_h264_read_nal_unit,
.write_unit = &cbs_h2645_write_nal_unit,
.write_unit = &cbs_h264_write_nal_unit,
.assemble_fragment = &cbs_h2645_assemble_fragment,
.close = &cbs_h264_close,
@ -1578,7 +1515,7 @@ const CodedBitstreamType ff_cbs_type_h265 = {
.split_fragment = &cbs_h2645_split_fragment,
.read_unit = &cbs_h265_read_nal_unit,
.write_unit = &cbs_h2645_write_nal_unit,
.write_unit = &cbs_h265_write_nal_unit,
.assemble_fragment = &cbs_h2645_assemble_fragment,
.close = &cbs_h265_close,

View File

@ -19,9 +19,6 @@
#ifndef AVCODEC_CBS_H2645_H
#define AVCODEC_CBS_H2645_H
#include <stddef.h>
#include <stdint.h>
#include "h2645_parse.h"
@ -33,10 +30,6 @@ typedef struct CodedBitstreamH2645Context {
int nal_length_size;
// Packet reader.
H2645Packet read_packet;
// Write buffer
uint8_t *write_buffer;
size_t write_buffer_size;
} CodedBitstreamH2645Context;

View File

@ -44,9 +44,11 @@ typedef struct CodedBitstreamType {
int (*read_unit)(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit);
// Write the unit->data bitstream from unit->content.
// Write the data bitstream from unit->content into pbc.
// Return value AVERROR(ENOSPC) indicates that pbc was too small.
int (*write_unit)(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit);
CodedBitstreamUnit *unit,
PutBitContext *pbc);
// Read the data from all of frag->units and assemble it into
// a bitstream for the whole fragment.

View File

@ -377,58 +377,13 @@ static int cbs_jpeg_write_segment(CodedBitstreamContext *ctx,
}
static int cbs_jpeg_write_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
CodedBitstreamUnit *unit,
PutBitContext *pbc)
{
CodedBitstreamJPEGContext *priv = ctx->priv_data;
PutBitContext pbc;
int err;
if (!priv->write_buffer) {
// Initial write buffer size is 1MB.
priv->write_buffer_size = 1024 * 1024;
reallocate_and_try_again:
err = av_reallocp(&priv->write_buffer, priv->write_buffer_size);
if (err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
"sufficiently large write buffer (last attempt "
"%"SIZE_SPECIFIER" bytes).\n", priv->write_buffer_size);
return err;
}
}
init_put_bits(&pbc, priv->write_buffer, priv->write_buffer_size);
if (unit->type == JPEG_MARKER_SOS)
err = cbs_jpeg_write_scan(ctx, unit, &pbc);
return cbs_jpeg_write_scan (ctx, unit, pbc);
else
err = cbs_jpeg_write_segment(ctx, unit, &pbc);
if (err == AVERROR(ENOSPC)) {
// Overflow.
priv->write_buffer_size *= 2;
goto reallocate_and_try_again;
}
if (err < 0) {
// Write failed for some other reason.
return err;
}
if (put_bits_count(&pbc) % 8)
unit->data_bit_padding = 8 - put_bits_count(&pbc) % 8;
else
unit->data_bit_padding = 0;
unit->data_size = (put_bits_count(&pbc) + 7) / 8;
flush_put_bits(&pbc);
err = ff_cbs_alloc_unit_data(ctx, unit, unit->data_size);
if (err < 0)
return err;
memcpy(unit->data, priv->write_buffer, unit->data_size);
return 0;
return cbs_jpeg_write_segment(ctx, unit, pbc);
}
static int cbs_jpeg_assemble_fragment(CodedBitstreamContext *ctx,
@ -499,22 +454,11 @@ static int cbs_jpeg_assemble_fragment(CodedBitstreamContext *ctx,
return 0;
}
static void cbs_jpeg_close(CodedBitstreamContext *ctx)
{
CodedBitstreamJPEGContext *priv = ctx->priv_data;
av_freep(&priv->write_buffer);
}
const CodedBitstreamType ff_cbs_type_jpeg = {
.codec_id = AV_CODEC_ID_MJPEG,
.priv_data_size = sizeof(CodedBitstreamJPEGContext),
.split_fragment = &cbs_jpeg_split_fragment,
.read_unit = &cbs_jpeg_read_unit,
.write_unit = &cbs_jpeg_write_unit,
.assemble_fragment = &cbs_jpeg_assemble_fragment,
.close = &cbs_jpeg_close,
};

View File

@ -120,11 +120,4 @@ typedef struct JPEGRawComment {
} JPEGRawComment;
typedef struct CodedBitstreamJPEGContext {
// Write buffer.
uint8_t *write_buffer;
size_t write_buffer_size;
} CodedBitstreamJPEGContext;
#endif /* AVCODEC_CBS_JPEG_H */

View File

@ -337,7 +337,7 @@ static int cbs_mpeg2_write_slice(CodedBitstreamContext *ctx,
uint8_t *pos = slice->data + slice->data_bit_start / 8;
av_assert0(slice->data_bit_start >= 0 &&
8 * slice->data_size > slice->data_bit_start);
slice->data_size > slice->data_bit_start / 8);
if (slice->data_size * 8 + 8 > put_bits_left(pbc))
return AVERROR(ENOSPC);
@ -371,58 +371,13 @@ static int cbs_mpeg2_write_slice(CodedBitstreamContext *ctx,
}
static int cbs_mpeg2_write_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
CodedBitstreamUnit *unit,
PutBitContext *pbc)
{
CodedBitstreamMPEG2Context *priv = ctx->priv_data;
PutBitContext pbc;
int err;
if (!priv->write_buffer) {
// Initial write buffer size is 1MB.
priv->write_buffer_size = 1024 * 1024;
reallocate_and_try_again:
err = av_reallocp(&priv->write_buffer, priv->write_buffer_size);
if (err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
"sufficiently large write buffer (last attempt "
"%"SIZE_SPECIFIER" bytes).\n", priv->write_buffer_size);
return err;
}
}
init_put_bits(&pbc, priv->write_buffer, priv->write_buffer_size);
if (MPEG2_START_IS_SLICE(unit->type))
err = cbs_mpeg2_write_slice(ctx, unit, &pbc);
return cbs_mpeg2_write_slice (ctx, unit, pbc);
else
err = cbs_mpeg2_write_header(ctx, unit, &pbc);
if (err == AVERROR(ENOSPC)) {
// Overflow.
priv->write_buffer_size *= 2;
goto reallocate_and_try_again;
}
if (err < 0) {
// Write failed for some other reason.
return err;
}
if (put_bits_count(&pbc) % 8)
unit->data_bit_padding = 8 - put_bits_count(&pbc) % 8;
else
unit->data_bit_padding = 0;
unit->data_size = (put_bits_count(&pbc) + 7) / 8;
flush_put_bits(&pbc);
err = ff_cbs_alloc_unit_data(ctx, unit, unit->data_size);
if (err < 0)
return err;
memcpy(unit->data, priv->write_buffer, unit->data_size);
return 0;
return cbs_mpeg2_write_header(ctx, unit, pbc);
}
static int cbs_mpeg2_assemble_fragment(CodedBitstreamContext *ctx,
@ -462,13 +417,6 @@ static int cbs_mpeg2_assemble_fragment(CodedBitstreamContext *ctx,
return 0;
}
static void cbs_mpeg2_close(CodedBitstreamContext *ctx)
{
CodedBitstreamMPEG2Context *priv = ctx->priv_data;
av_freep(&priv->write_buffer);
}
const CodedBitstreamType ff_cbs_type_mpeg2 = {
.codec_id = AV_CODEC_ID_MPEG2VIDEO,
@ -478,6 +426,4 @@ const CodedBitstreamType ff_cbs_type_mpeg2 = {
.read_unit = &cbs_mpeg2_read_unit,
.write_unit = &cbs_mpeg2_write_unit,
.assemble_fragment = &cbs_mpeg2_assemble_fragment,
.close = &cbs_mpeg2_close,
};

View File

@ -225,10 +225,6 @@ typedef struct CodedBitstreamMPEG2Context {
uint8_t scalable_mode;
uint8_t progressive_sequence;
uint8_t number_of_frame_centre_offsets;
// Write buffer.
uint8_t *write_buffer;
size_t write_buffer_size;
} CodedBitstreamMPEG2Context;

View File

@ -522,62 +522,28 @@ static int cbs_vp9_read_unit(CodedBitstreamContext *ctx,
}
static int cbs_vp9_write_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit)
CodedBitstreamUnit *unit,
PutBitContext *pbc)
{
CodedBitstreamVP9Context *priv = ctx->priv_data;
VP9RawFrame *frame = unit->content;
PutBitContext pbc;
int err;
if (!priv->write_buffer) {
// Initial write buffer size is 1MB.
priv->write_buffer_size = 1024 * 1024;
reallocate_and_try_again:
err = av_reallocp(&priv->write_buffer, priv->write_buffer_size);
if (err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Unable to allocate a "
"sufficiently large write buffer (last attempt "
"%"SIZE_SPECIFIER" bytes).\n", priv->write_buffer_size);
return err;
}
}
init_put_bits(&pbc, priv->write_buffer, priv->write_buffer_size);
err = cbs_vp9_write_frame(ctx, &pbc, frame);
if (err == AVERROR(ENOSPC)) {
priv->write_buffer_size *= 2;
goto reallocate_and_try_again;
}
err = cbs_vp9_write_frame(ctx, pbc, frame);
if (err < 0)
return err;
// Frame must be byte-aligned.
av_assert0(put_bits_count(&pbc) % 8 == 0);
unit->data_size = put_bits_count(&pbc) / 8;
unit->data_bit_padding = 0;
flush_put_bits(&pbc);
av_assert0(put_bits_count(pbc) % 8 == 0);
if (frame->data) {
if (unit->data_size + frame->data_size >
priv->write_buffer_size) {
priv->write_buffer_size *= 2;
goto reallocate_and_try_again;
}
if (frame->data_size > put_bits_left(pbc) / 8)
return AVERROR(ENOSPC);
memcpy(priv->write_buffer + unit->data_size,
frame->data, frame->data_size);
unit->data_size += frame->data_size;
flush_put_bits(pbc);
memcpy(put_bits_ptr(pbc), frame->data, frame->data_size);
skip_put_bytes(pbc, frame->data_size);
}
err = ff_cbs_alloc_unit_data(ctx, unit, unit->data_size);
if (err < 0)
return err;
memcpy(unit->data, priv->write_buffer, unit->data_size);
return 0;
}
@ -671,13 +637,6 @@ static int cbs_vp9_assemble_fragment(CodedBitstreamContext *ctx,
return 0;
}
static void cbs_vp9_close(CodedBitstreamContext *ctx)
{
CodedBitstreamVP9Context *priv = ctx->priv_data;
av_freep(&priv->write_buffer);
}
const CodedBitstreamType ff_cbs_type_vp9 = {
.codec_id = AV_CODEC_ID_VP9,
@ -687,6 +646,4 @@ const CodedBitstreamType ff_cbs_type_vp9 = {
.read_unit = &cbs_vp9_read_unit,
.write_unit = &cbs_vp9_write_unit,
.assemble_fragment = &cbs_vp9_assemble_fragment,
.close = &cbs_vp9_close,
};

View File

@ -207,10 +207,6 @@ typedef struct CodedBitstreamVP9Context {
int bit_depth;
VP9ReferenceFrameState ref[VP9_NUM_REF_FRAMES];
// Write buffer.
uint8_t *write_buffer;
size_t write_buffer_size;
} CodedBitstreamVP9Context;

View File

@ -173,7 +173,7 @@ AVCodec ff_comfortnoise_decoder = {
.close = cng_decode_close,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1,
.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
};

View File

@ -1733,6 +1733,20 @@ static const AVCodecDescriptor codec_descriptors[] = {
.long_name = NULL_IF_CONFIG_SMALL("Infinity IMM5"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MVDV,
.type = AVMEDIA_TYPE_VIDEO,
.name = "mvdv",
.long_name = NULL_IF_CONFIG_SMALL("MidiVid VQ"),
.props = AV_CODEC_PROP_LOSSY,
},
{
.id = AV_CODEC_ID_MVHA,
.type = AVMEDIA_TYPE_VIDEO,
.name = "mvha",
.long_name = NULL_IF_CONFIG_SMALL("MidiVid Archive Codec"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
},
/* various PCM "codecs" */
{
@ -2994,6 +3008,7 @@ static const AVCodecDescriptor codec_descriptors[] = {
},
{
.id = AV_CODEC_ID_ACELP_KELVIN,
.type = AVMEDIA_TYPE_AUDIO,
.name = "acelp.kelvin",
.long_name = NULL_IF_CONFIG_SMALL("Sipro ACELP.KELVIN"),
.props = AV_CODEC_PROP_LOSSY,

View File

@ -83,6 +83,7 @@ enum dv_pack_type {
#define DV_PROFILE_IS_HD(p) ((p)->video_stype & 0x10)
#define DV_PROFILE_IS_1080i50(p) (((p)->video_stype == 0x14) && ((p)->dsf == 1))
#define DV_PROFILE_IS_1080i60(p) (((p)->video_stype == 0x14) && ((p)->dsf == 0))
#define DV_PROFILE_IS_720p50(p) (((p)->video_stype == 0x18) && ((p)->dsf == 1))
/**

View File

@ -272,11 +272,10 @@ static inline void bit_copy(PutBitContext *pb, GetBitContext *gb)
static av_always_inline void put_block_8x4(int16_t *block, uint8_t *av_restrict p, int stride)
{
int i, j;
const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
for (i = 0; i < 4; i++) {
for (j = 0; j < 8; j++)
p[j] = cm[block[j]];
p[j] = av_clip_uint8(block[j]);
block += 8;
p += stride;
}

View File

@ -60,10 +60,7 @@ static av_cold int dvvideo_encode_init(AVCodecContext *avctx)
ff_dv_print_profiles(avctx, AV_LOG_ERROR);
return AVERROR(EINVAL);
}
if (avctx->height > 576) {
av_log(avctx, AV_LOG_ERROR, "DVCPRO HD encoding is not supported.\n");
return AVERROR_PATCHWELCOME;
}
ret = ff_dv_init_dynamic_tables(s, s->sys);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Error initializing work tables.\n");
@ -90,6 +87,7 @@ static av_cold int dvvideo_encode_init(AVCodecContext *avctx)
}
/* bit budget for AC only in 5 MBs */
static const int vs_total_ac_bits_hd = (68 * 6 + 52*2) * 5;
static const int vs_total_ac_bits = (100 * 4 + 68 * 2) * 5;
static const int mb_area_start[5] = { 1, 6, 21, 43, 64 };
@ -158,6 +156,11 @@ typedef struct EncBlockInfo {
uint8_t sign[64];
uint8_t partial_bit_count;
uint32_t partial_bit_buffer; /* we can't use uint16_t here */
/* used by DV100 only: a copy of the weighted and classified but
not-yet-quantized AC coefficients. This is necessary for
re-quantizing at different steps. */
int16_t save[64];
int min_qlevel; /* DV100 only: minimum qlevel (for AC coefficients >255) */
} EncBlockInfo;
static av_always_inline PutBitContext *dv_encode_ac(EncBlockInfo *bi,
@ -243,13 +246,123 @@ static const int dv_weight_248[64] = {
170627, 170627, 153560, 153560, 165371, 165371, 144651, 144651,
};
static av_always_inline int dv_init_enc_block(EncBlockInfo *bi, uint8_t *data,
ptrdiff_t linesize,
DVVideoContext *s, int bias)
/* setting this to 1 results in a faster codec but
* somewhat lower image quality */
#define DV100_SACRIFICE_QUALITY_FOR_SPEED 1
#define DV100_ENABLE_FINER 1
/* pack combination of QNO and CNO into a single 8-bit value */
#define DV100_MAKE_QLEVEL(qno,cno) ((qno<<2) | (cno))
#define DV100_QLEVEL_QNO(qlevel) (qlevel>>2)
#define DV100_QLEVEL_CNO(qlevel) (qlevel&0x3)
#define DV100_NUM_QLEVELS 31
/* The quantization step is determined by a combination of QNO and
CNO. We refer to these combinations as "qlevels" (this term is our
own, it's not mentioned in the spec). We use CNO, a multiplier on
the quantization step, to "fill in the gaps" between quantization
steps associated with successive values of QNO. e.g. there is no
QNO for a quantization step of 10, but we can use QNO=5 CNO=1 to
get the same result. The table below encodes combinations of QNO
and CNO in order of increasing quantization coarseness. */
static const uint8_t dv100_qlevels[DV100_NUM_QLEVELS] = {
DV100_MAKE_QLEVEL( 1,0), // 1*1= 1
DV100_MAKE_QLEVEL( 1,0), // 1*1= 1
DV100_MAKE_QLEVEL( 2,0), // 2*1= 2
DV100_MAKE_QLEVEL( 3,0), // 3*1= 3
DV100_MAKE_QLEVEL( 4,0), // 4*1= 4
DV100_MAKE_QLEVEL( 5,0), // 5*1= 5
DV100_MAKE_QLEVEL( 6,0), // 6*1= 6
DV100_MAKE_QLEVEL( 7,0), // 7*1= 7
DV100_MAKE_QLEVEL( 8,0), // 8*1= 8
DV100_MAKE_QLEVEL( 5,1), // 5*2=10
DV100_MAKE_QLEVEL( 6,1), // 6*2=12
DV100_MAKE_QLEVEL( 7,1), // 7*2=14
DV100_MAKE_QLEVEL( 9,0), // 16*1=16
DV100_MAKE_QLEVEL(10,0), // 18*1=18
DV100_MAKE_QLEVEL(11,0), // 20*1=20
DV100_MAKE_QLEVEL(12,0), // 22*1=22
DV100_MAKE_QLEVEL(13,0), // 24*1=24
DV100_MAKE_QLEVEL(14,0), // 28*1=28
DV100_MAKE_QLEVEL( 9,1), // 16*2=32
DV100_MAKE_QLEVEL(10,1), // 18*2=36
DV100_MAKE_QLEVEL(11,1), // 20*2=40
DV100_MAKE_QLEVEL(12,1), // 22*2=44
DV100_MAKE_QLEVEL(13,1), // 24*2=48
DV100_MAKE_QLEVEL(15,0), // 52*1=52
DV100_MAKE_QLEVEL(14,1), // 28*2=56
DV100_MAKE_QLEVEL( 9,2), // 16*4=64
DV100_MAKE_QLEVEL(10,2), // 18*4=72
DV100_MAKE_QLEVEL(11,2), // 20*4=80
DV100_MAKE_QLEVEL(12,2), // 22*4=88
DV100_MAKE_QLEVEL(13,2), // 24*4=96
// ...
DV100_MAKE_QLEVEL(15,3), // 52*8=416
};
static const int dv100_min_bias = 0;
static const int dv100_chroma_bias = 0;
static const int dv100_starting_qno = 1;
#if DV100_SACRIFICE_QUALITY_FOR_SPEED
static const int dv100_qlevel_inc = 4;
#else
static const int dv100_qlevel_inc = 1;
#endif
// 1/qstep, shifted up by 16 bits
static const int dv100_qstep_bits = 16;
static const int dv100_qstep_inv[16] = {
65536, 65536, 32768, 21845, 16384, 13107, 10923, 9362, 8192, 4096, 3641, 3277, 2979, 2731, 2341, 1260,
};
/* DV100 weights are pre-zigzagged, inverted and multiplied by 2^(dv100_weight_shift)
(in DV100 the AC components are divided by the spec weights) */
static const int dv100_weight_shift = 16;
static const int dv_weight_1080[2][64] = {
{ 8192, 65536, 65536, 61681, 61681, 61681, 58254, 58254,
58254, 58254, 58254, 58254, 55188, 58254, 58254, 55188,
55188, 55188, 55188, 55188, 55188, 24966, 27594, 26214,
26214, 26214, 27594, 24966, 23831, 24385, 25575, 25575,
25575, 25575, 24385, 23831, 23302, 23302, 24966, 24966,
24966, 23302, 23302, 21845, 22795, 24385, 24385, 22795,
21845, 21400, 21845, 23831, 21845, 21400, 10382, 10700,
10700, 10382, 10082, 9620, 10082, 9039, 9039, 8525, },
{ 8192, 65536, 65536, 61681, 61681, 61681, 41943, 41943,
41943, 41943, 40330, 41943, 40330, 41943, 40330, 40330,
40330, 38836, 38836, 40330, 40330, 24966, 27594, 26214,
26214, 26214, 27594, 24966, 23831, 24385, 25575, 25575,
25575, 25575, 24385, 23831, 11523, 11523, 12483, 12483,
12483, 11523, 11523, 10923, 11275, 12193, 12193, 11275,
10923, 5323, 5490, 5924, 5490, 5323, 5165, 5323,
5323, 5165, 5017, 4788, 5017, 4520, 4520, 4263, }
};
static const int dv_weight_720[2][64] = {
{ 8192, 65536, 65536, 61681, 61681, 61681, 58254, 58254,
58254, 58254, 58254, 58254, 55188, 58254, 58254, 55188,
55188, 55188, 55188, 55188, 55188, 24966, 27594, 26214,
26214, 26214, 27594, 24966, 23831, 24385, 25575, 25575,
25575, 25575, 24385, 23831, 15420, 15420, 16644, 16644,
16644, 15420, 15420, 10923, 11398, 12193, 12193, 11398,
10923, 10700, 10923, 11916, 10923, 10700, 5191, 5350,
5350, 5191, 5041, 4810, 5041, 4520, 4520, 4263, },
{ 8192, 43691, 43691, 40330, 40330, 40330, 29127, 29127,
29127, 29127, 29127, 29127, 27594, 29127, 29127, 27594,
27594, 27594, 27594, 27594, 27594, 12483, 13797, 13107,
13107, 13107, 13797, 12483, 11916, 12193, 12788, 12788,
12788, 12788, 12193, 11916, 5761, 5761, 6242, 6242,
6242, 5761, 5761, 5461, 5638, 5461, 6096, 5638,
5461, 2661, 2745, 2962, 2745, 2661, 2583, 2661,
2661, 2583, 2509, 2394, 2509, 2260, 2260, 2131, }
};
static av_always_inline int dv_set_class_number_sd(DVVideoContext *s,
int16_t *blk, EncBlockInfo *bi,
const uint8_t *zigzag_scan,
const int *weight, int bias)
{
const int *weight;
const uint8_t *zigzag_scan;
LOCAL_ALIGNED_16(int16_t, blk, [64]);
int i, area;
/* We offer two different methods for class number assignment: the
* method suggested in SMPTE 314M Table 22, and an improved
@ -271,31 +384,8 @@ static av_always_inline int dv_init_enc_block(EncBlockInfo *bi, uint8_t *data,
const unsigned deadzone = s->quant_deadzone;
const unsigned threshold = 2 * deadzone;
av_assert2((((int) blk) & 15) == 0);
bi->area_q[0] =
bi->area_q[1] =
bi->area_q[2] =
bi->area_q[3] = 0;
bi->partial_bit_count = 0;
bi->partial_bit_buffer = 0;
bi->cur_ac = 0;
if (data) {
bi->dct_mode = dv_guess_dct_mode(s, data, linesize);
s->get_pixels(blk, data, linesize);
s->fdct[bi->dct_mode](blk);
} else {
/* We rely on the fact that encoding all zeros leads to an immediate
* EOB, which is precisely what the spec calls for in the "dummy"
* blocks. */
memset(blk, 0, 64 * sizeof(*blk));
bi->dct_mode = 0;
}
bi->mb[0] = blk[0];
zigzag_scan = bi->dct_mode ? ff_dv_zigzag248_direct : ff_zigzag_direct;
weight = bi->dct_mode ? dv_weight_248 : dv_weight_88;
for (area = 0; area < 4; area++) {
bi->prev[area] = prev;
bi->bit_size[area] = 1; // 4 areas 4 bits for EOB :)
@ -350,6 +440,309 @@ static av_always_inline int dv_init_enc_block(EncBlockInfo *bi, uint8_t *data,
bi->bit_size[2] + bi->bit_size[3];
}
/* this function just copies the DCT coefficients and performs
the initial (non-)quantization. */
static inline void dv_set_class_number_hd(DVVideoContext *s,
int16_t *blk, EncBlockInfo *bi,
const uint8_t *zigzag_scan,
const int *weight, int bias)
{
int i, max = 0;
/* the first quantization (none at all) */
bi->area_q[0] = 1;
/* weigh AC components and store to save[] */
/* (i=0 is the DC component; we only include it to make the
number of loop iterations even, for future possible SIMD optimization) */
for (i = 0; i < 64; i += 2) {
int level0, level1;
/* get the AC component (in zig-zag order) */
level0 = blk[zigzag_scan[i+0]];
level1 = blk[zigzag_scan[i+1]];
/* extract sign and make it the lowest bit */
bi->sign[i+0] = (level0>>31)&1;
bi->sign[i+1] = (level1>>31)&1;
/* take absolute value of the level */
level0 = FFABS(level0);
level1 = FFABS(level1);
/* weigh it */
level0 = (level0*weight[i+0] + 4096 + (1<<17)) >> 18;
level1 = (level1*weight[i+1] + 4096 + (1<<17)) >> 18;
/* save unquantized value */
bi->save[i+0] = level0;
bi->save[i+1] = level1;
/* find max component */
if (bi->save[i+0] > max)
max = bi->save[i+0];
if (bi->save[i+1] > max)
max = bi->save[i+1];
}
/* copy DC component */
bi->mb[0] = blk[0];
/* the EOB code is 4 bits */
bi->bit_size[0] = 4;
bi->bit_size[1] = bi->bit_size[2] = bi->bit_size[3] = 0;
/* ensure that no AC coefficients are cut off */
bi->min_qlevel = ((max+256) >> 8);
bi->area_q[0] = 25; /* set to an "impossible" value */
bi->cno = 0;
}
static av_always_inline int dv_init_enc_block(EncBlockInfo* bi, uint8_t *data, int linesize,
DVVideoContext *s, int chroma)
{
LOCAL_ALIGNED_16(int16_t, blk, [64]);
bi->area_q[0] = bi->area_q[1] = bi->area_q[2] = bi->area_q[3] = 0;
bi->partial_bit_count = 0;
bi->partial_bit_buffer = 0;
bi->cur_ac = 0;
if (data) {
if (DV_PROFILE_IS_HD(s->sys)) {
s->get_pixels(blk, data, linesize << bi->dct_mode);
s->fdct[0](blk);
} else {
bi->dct_mode = dv_guess_dct_mode(s, data, linesize);
s->get_pixels(blk, data, linesize);
s->fdct[bi->dct_mode](blk);
}
} else {
/* We rely on the fact that encoding all zeros leads to an immediate EOB,
which is precisely what the spec calls for in the "dummy" blocks. */
memset(blk, 0, 64*sizeof(*blk));
bi->dct_mode = 0;
}
if (DV_PROFILE_IS_HD(s->sys)) {
const int *weights;
if (s->sys->height == 1080) {
weights = dv_weight_1080[chroma];
} else { /* 720p */
weights = dv_weight_720[chroma];
}
dv_set_class_number_hd(s, blk, bi,
ff_zigzag_direct,
weights,
dv100_min_bias+chroma*dv100_chroma_bias);
} else {
dv_set_class_number_sd(s, blk, bi,
bi->dct_mode ? ff_dv_zigzag248_direct : ff_zigzag_direct,
bi->dct_mode ? dv_weight_248 : dv_weight_88,
chroma);
}
return bi->bit_size[0] + bi->bit_size[1] + bi->bit_size[2] + bi->bit_size[3];
}
/* DV100 quantize
Perform quantization by divinding the AC component by the qstep.
As an optimization we use a fixed-point integer multiply instead
of a divide. */
static av_always_inline int dv100_quantize(int level, int qsinv)
{
/* this code is equivalent to */
/* return (level + qs/2) / qs; */
return (level * qsinv + 1024 + (1<<(dv100_qstep_bits-1))) >> dv100_qstep_bits;
/* the extra +1024 is needed to make the rounding come out right. */
/* I (DJM) have verified that the results are exactly the same as
division for level 0-2048 at all QNOs. */
}
static int dv100_actual_quantize(EncBlockInfo *b, int qlevel)
{
int prev, k, qsinv;
int qno = DV100_QLEVEL_QNO(dv100_qlevels[qlevel]);
int cno = DV100_QLEVEL_CNO(dv100_qlevels[qlevel]);
if (b->area_q[0] == qno && b->cno == cno)
return b->bit_size[0];
qsinv = dv100_qstep_inv[qno];
/* record the new qstep */
b->area_q[0] = qno;
b->cno = cno;
/* reset encoded size (EOB = 4 bits) */
b->bit_size[0] = 4;
/* visit nonzero components and quantize */
prev = 0;
for (k = 1; k < 64; k++) {
/* quantize */
int ac = dv100_quantize(b->save[k], qsinv) >> cno;
if (ac) {
if (ac > 255)
ac = 255;
b->mb[k] = ac;
b->bit_size[0] += dv_rl2vlc_size(k - prev - 1, ac);
b->next[prev] = k;
prev = k;
}
}
b->next[prev] = k;
return b->bit_size[0];
}
static inline void dv_guess_qnos_hd(EncBlockInfo *blks, int *qnos)
{
EncBlockInfo *b;
int min_qlevel[5];
int qlevels[5];
int size[5];
int i, j;
/* cache block sizes at hypothetical qlevels */
uint16_t size_cache[5*8][DV100_NUM_QLEVELS] = {{0}};
/* get minimum qlevels */
for (i = 0; i < 5; i++) {
min_qlevel[i] = 1;
for (j = 0; j < 8; j++) {
if (blks[8*i+j].min_qlevel > min_qlevel[i])
min_qlevel[i] = blks[8*i+j].min_qlevel;
}
}
/* initialize sizes */
for (i = 0; i < 5; i++) {
qlevels[i] = dv100_starting_qno;
if (qlevels[i] < min_qlevel[i])
qlevels[i] = min_qlevel[i];
qnos[i] = DV100_QLEVEL_QNO(dv100_qlevels[qlevels[i]]);
size[i] = 0;
for (j = 0; j < 8; j++) {
size_cache[8*i+j][qlevels[i]] = dv100_actual_quantize(&blks[8*i+j], qlevels[i]);
size[i] += size_cache[8*i+j][qlevels[i]];
}
}
/* must we go coarser? */
if (size[0]+size[1]+size[2]+size[3]+size[4] > vs_total_ac_bits_hd) {
int largest = size[0] % 5; /* 'random' number */
int qlevels_done = 0;
do {
/* find the macroblock with the lowest qlevel */
for (i = 0; i < 5; i++) {
if (qlevels[i] < qlevels[largest])
largest = i;
}
i = largest;
/* ensure that we don't enter infinite loop */
largest = (largest+1) % 5;
/* quantize a little bit more */
qlevels[i] += dv100_qlevel_inc;
if (qlevels[i] > DV100_NUM_QLEVELS-1) {
qlevels[i] = DV100_NUM_QLEVELS-1;
qlevels_done++;
}
qnos[i] = DV100_QLEVEL_QNO(dv100_qlevels[qlevels[i]]);
size[i] = 0;
/* for each block */
b = &blks[8*i];
for (j = 0; j < 8; j++, b++) {
/* accumulate block size into macroblock */
if(size_cache[8*i+j][qlevels[i]] == 0) {
/* it is safe to use actual_quantize() here because we only go from finer to coarser,
and it saves the final actual_quantize() down below */
size_cache[8*i+j][qlevels[i]] = dv100_actual_quantize(b, qlevels[i]);
}
size[i] += size_cache[8*i+j][qlevels[i]];
} /* for each block */
} while (vs_total_ac_bits_hd < size[0] + size[1] + size[2] + size[3] + size[4] && qlevels_done < 5);
// can we go finer?
} else if (DV100_ENABLE_FINER &&
size[0]+size[1]+size[2]+size[3]+size[4] < vs_total_ac_bits_hd) {
int save_qlevel;
int largest = size[0] % 5; /* 'random' number */
while (qlevels[0] > min_qlevel[0] ||
qlevels[1] > min_qlevel[1] ||
qlevels[2] > min_qlevel[2] ||
qlevels[3] > min_qlevel[3] ||
qlevels[4] > min_qlevel[4]) {
/* find the macroblock with the highest qlevel */
for (i = 0; i < 5; i++) {
if (qlevels[i] > min_qlevel[i] && qlevels[i] > qlevels[largest])
largest = i;
}
i = largest;
/* ensure that we don't enter infinite loop */
largest = (largest+1) % 5;
if (qlevels[i] <= min_qlevel[i]) {
/* can't unquantize any more */
continue;
}
/* quantize a little bit less */
save_qlevel = qlevels[i];
qlevels[i] -= dv100_qlevel_inc;
if (qlevels[i] < min_qlevel[i])
qlevels[i] = min_qlevel[i];
qnos[i] = DV100_QLEVEL_QNO(dv100_qlevels[qlevels[i]]);
size[i] = 0;
/* for each block */
b = &blks[8*i];
for (j = 0; j < 8; j++, b++) {
/* accumulate block size into macroblock */
if(size_cache[8*i+j][qlevels[i]] == 0) {
size_cache[8*i+j][qlevels[i]] = dv100_actual_quantize(b, qlevels[i]);
}
size[i] += size_cache[8*i+j][qlevels[i]];
} /* for each block */
/* did we bust the limit? */
if (vs_total_ac_bits_hd < size[0] + size[1] + size[2] + size[3] + size[4]) {
/* go back down and exit */
qlevels[i] = save_qlevel;
qnos[i] = DV100_QLEVEL_QNO(dv100_qlevels[qlevels[i]]);
break;
}
}
}
/* now do the actual quantization */
for (i = 0; i < 5; i++) {
/* for each block */
b = &blks[8*i];
size[i] = 0;
for (j = 0; j < 8; j++, b++) {
/* accumulate block size into macroblock */
size[i] += dv100_actual_quantize(b, qlevels[i]);
} /* for each block */
}
}
static inline void dv_guess_qnos(EncBlockInfo *blks, int *qnos)
{
int size[5];
@ -422,6 +815,26 @@ static inline void dv_guess_qnos(EncBlockInfo *blks, int *qnos)
}
}
/* update all cno values into the blocks, over-writing the old values without
touching anything else. (only used for DV100) */
static inline void dv_revise_cnos(uint8_t *dif, EncBlockInfo *blk, const AVDVProfile *profile)
{
uint8_t *data;
int mb_index, i;
for (mb_index = 0; mb_index < 5; mb_index++) {
data = dif + mb_index*80 + 4;
for (i = 0; i < profile->bpm; i++) {
/* zero out the class number */
data[1] &= 0xCF;
/* add the new one */
data[1] |= blk[profile->bpm*mb_index+i].cno << 4;
data += profile->block_sizes[i] >> 3;
}
}
}
static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
{
DVVideoContext *s = avctx->priv_data;
@ -430,26 +843,38 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
int mb_x, mb_y, c_offset;
ptrdiff_t linesize, y_stride;
uint8_t *y_ptr;
uint8_t *dif;
uint8_t *dif, *p;
LOCAL_ALIGNED_8(uint8_t, scratch, [128]);
EncBlockInfo enc_blks[5 * DV_MAX_BPM];
PutBitContext pbs[5 * DV_MAX_BPM];
PutBitContext *pb;
EncBlockInfo *enc_blk;
int vs_bit_size = 0;
int qnos[5] = { 15, 15, 15, 15, 15 }; /* No quantization */
int qnos[5];
int *qnosp = &qnos[0];
dif = &s->buf[work_chunk->buf_offset * 80];
p = dif = &s->buf[work_chunk->buf_offset * 80];
enc_blk = &enc_blks[0];
for (mb_index = 0; mb_index < 5; mb_index++) {
dv_calculate_mb_xy(s, work_chunk, mb_index, &mb_x, &mb_y);
qnos[mb_index] = DV_PROFILE_IS_HD(s->sys) ? 1 : 15;
y_ptr = s->frame->data[0] + ((mb_y * s->frame->linesize[0] + mb_x) << 3);
linesize = s->frame->linesize[0];
if (s->sys->height == 1080 && mb_y < 134)
enc_blk->dct_mode = dv_guess_dct_mode(s, y_ptr, linesize);
else
enc_blk->dct_mode = 0;
for (i = 1; i < 8; i++)
enc_blk[i].dct_mode = enc_blk->dct_mode;
/* initializing luminance blocks */
if ((s->sys->pix_fmt == AV_PIX_FMT_YUV420P) ||
(s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) ||
(s->sys->height >= 720 && mb_y != 134)) {
y_stride = s->frame->linesize[0] << 3;
y_stride = s->frame->linesize[0] << (3*!enc_blk->dct_mode);
} else {
y_stride = 16;
}
@ -478,7 +903,7 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
for (j = 2; j; j--) {
uint8_t *c_ptr = s->frame->data[j] + c_offset;
linesize = s->frame->linesize[j];
y_stride = (mb_y == 134) ? 8 : (s->frame->linesize[j] << 3);
y_stride = (mb_y == 134) ? 8 : (s->frame->linesize[j] << (3*!enc_blk->dct_mode));
if (s->sys->pix_fmt == AV_PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
uint8_t *d;
uint8_t *b = scratch;
@ -506,27 +931,31 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
}
}
if (vs_total_ac_bits < vs_bit_size)
if (DV_PROFILE_IS_HD(s->sys)) {
/* unconditional */
dv_guess_qnos_hd(&enc_blks[0], qnosp);
} else if (vs_total_ac_bits < vs_bit_size) {
dv_guess_qnos(&enc_blks[0], qnosp);
}
/* DIF encoding process */
for (j = 0; j < 5 * s->sys->bpm;) {
int start_mb = j;
dif[3] = *qnosp++;
dif += 4;
p[3] = *qnosp++;
p += 4;
/* First pass over individual cells only */
for (i = 0; i < s->sys->bpm; i++, j++) {
int sz = s->sys->block_sizes[i] >> 3;
init_put_bits(&pbs[j], dif, sz);
init_put_bits(&pbs[j], p, sz);
put_sbits(&pbs[j], 9, ((enc_blks[j].mb[0] >> 3) - 1024 + 2) >> 2);
put_bits(&pbs[j], 1, enc_blks[j].dct_mode);
put_bits(&pbs[j], 1, DV_PROFILE_IS_HD(s->sys) && i ? 1 : enc_blks[j].dct_mode);
put_bits(&pbs[j], 2, enc_blks[j].cno);
dv_encode_ac(&enc_blks[j], &pbs[j], &pbs[j + 1]);
dif += sz;
p += sz;
}
/* Second pass over each MB space */
@ -559,6 +988,9 @@ static int dv_encode_video_segment(AVCodecContext *avctx, void *arg)
memset(pbs[j].buf + pos, 0xff, size - pos);
}
if (DV_PROFILE_IS_HD(s->sys))
dv_revise_cnos(dif, enc_blks, s->sys);
return 0;
}
@ -583,12 +1015,19 @@ static inline int dv_write_pack(enum dv_pack_type pack_id, DVVideoContext *c,
* 2. It is not at all clear what STYPE is used for 4:2:0 PAL
* compression scheme (if any).
*/
int apt = (c->sys->pix_fmt == AV_PIX_FMT_YUV420P ? 0 : 1);
int fs = c->frame->top_field_first ? 0x00 : 0x40;
uint8_t aspect = 0;
if ((int) (av_q2d(c->avctx->sample_aspect_ratio) *
c->avctx->width / c->avctx->height * 10) >= 17) /* 16:9 */
int apt = (c->sys->pix_fmt == AV_PIX_FMT_YUV420P ? 0 : 1);
int fs;
if (c->avctx->height >= 720)
fs = c->avctx->height == 720 || c->frame->top_field_first ? 0x40 : 0x00;
else
fs = c->frame->top_field_first ? 0x00 : 0x40;
if (DV_PROFILE_IS_HD(c->sys) ||
(int)(av_q2d(c->avctx->sample_aspect_ratio) *
c->avctx->width / c->avctx->height * 10) >= 17)
/* HD formats are always 16:9 */
aspect = 0x02;
buf[0] = (uint8_t) pack_id;
@ -643,10 +1082,14 @@ static inline int dv_write_dif_id(enum dv_section_type t, uint8_t chan_num,
uint8_t seq_num, uint8_t dif_num,
uint8_t *buf)
{
int fsc = chan_num & 1;
int fsp = 1 - (chan_num >> 1);
buf[0] = (uint8_t) t; /* Section type */
buf[1] = (seq_num << 4) | /* DIF seq number 0-9 for 525/60; 0-11 for 625/50 */
(chan_num << 3) | /* FSC: for 50Mb/s 0 - first channel; 1 - second */
7; /* reserved -- always 1 */
(fsc << 3) | /* FSC: for 50 and 100Mb/s 0 - first channel; 1 - second */
(fsp << 2) | /* FSP: for 100Mb/s 1 - channels 0-1; 0 - channels 2-3 */
3; /* reserved -- always 1 */
buf[2] = dif_num; /* DIF block number Video: 0-134, Audio: 0-8 */
return 3;
}
@ -674,20 +1117,22 @@ static inline int dv_write_ssyb_id(uint8_t syb_num, uint8_t fr, uint8_t *buf)
static void dv_format_frame(DVVideoContext *c, uint8_t *buf)
{
int chan, i, j, k;
/* We work with 720p frames split in half. The odd half-frame is chan 2,3 */
int chan_offset = 2*(c->sys->height == 720 && c->avctx->frame_number & 1);
for (chan = 0; chan < c->sys->n_difchan; chan++) {
for (i = 0; i < c->sys->difseg_size; i++) {
memset(buf, 0xff, 80 * 6); /* first 6 DIF blocks are for control data */
/* DV header: 1DIF */
buf += dv_write_dif_id(dv_sect_header, chan, i, 0, buf);
buf += dv_write_dif_id(dv_sect_header, chan+chan_offset, i, 0, buf);
buf += dv_write_pack((c->sys->dsf ? dv_header625 : dv_header525),
c, buf);
buf += 72; /* unused bytes */
/* DV subcode: 2DIFs */
for (j = 0; j < 2; j++) {
buf += dv_write_dif_id(dv_sect_subcode, chan, i, j, buf);
buf += dv_write_dif_id(dv_sect_subcode, chan+chan_offset, i, j, buf);
for (k = 0; k < 6; k++)
buf += dv_write_ssyb_id(k, (i < c->sys->difseg_size / 2), buf) + 5;
buf += 29; /* unused bytes */
@ -695,7 +1140,7 @@ static void dv_format_frame(DVVideoContext *c, uint8_t *buf)
/* DV VAUX: 3DIFS */
for (j = 0; j < 3; j++) {
buf += dv_write_dif_id(dv_sect_vaux, chan, i, j, buf);
buf += dv_write_dif_id(dv_sect_vaux, chan+chan_offset, i, j, buf);
buf += dv_write_pack(dv_video_source, c, buf);
buf += dv_write_pack(dv_video_control, c, buf);
buf += 7 * 5;
@ -708,10 +1153,10 @@ static void dv_format_frame(DVVideoContext *c, uint8_t *buf)
for (j = 0; j < 135; j++) {
if (j % 15 == 0) {
memset(buf, 0xff, 80);
buf += dv_write_dif_id(dv_sect_audio, chan, i, j / 15, buf);
buf += dv_write_dif_id(dv_sect_audio, chan+chan_offset, i, j/15, buf);
buf += 77; /* audio control & shuffled PCM audio */
}
buf += dv_write_dif_id(dv_sect_video, chan, i, j, buf);
buf += dv_write_dif_id(dv_sect_video, chan+chan_offset, i, j, buf);
buf += 77; /* 1 video macroblock: 1 bytes control
* 4 * 14 bytes Y 8x8 data
* 10 bytes Cr 8x8 data
@ -738,15 +1183,15 @@ FF_DISABLE_DEPRECATION_WARNINGS
c->coded_frame->pict_type = AV_PICTURE_TYPE_I;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
s->buf = pkt->data;
dv_format_frame(s, pkt->data);
c->execute(c, dv_encode_video_segment, s->work_chunks, NULL,
dv_work_pool_size(s->sys), sizeof(DVwork_chunk));
emms_c();
dv_format_frame(s, pkt->data);
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;

View File

@ -428,9 +428,15 @@ int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *
return AVERROR(EINVAL);
if (avctx->codec->receive_packet) {
int ret;
if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
return AVERROR_EOF;
return avctx->codec->receive_packet(avctx, avpkt);
ret = avctx->codec->receive_packet(avctx, avpkt);
if (!ret)
// Encoders must always return ref-counted buffers.
// Side-data only packets have no data and can be not ref-counted.
av_assert0(!avpkt->data || avpkt->buf);
return ret;
}
// Emulation via old API.

View File

@ -27,6 +27,7 @@
#include "av1.h"
#include "av1_parse.h"
#include "bsf.h"
#include "bytestream.h"
#include "h2645_parse.h"
#include "h264.h"
#include "hevc.h"
@ -85,8 +86,9 @@ static int extract_extradata_av1(AVBSFContext *ctx, AVPacket *pkt,
}
if (extradata_size && has_seq) {
AVBufferRef *filtered_buf;
uint8_t *extradata, *filtered_data;
AVBufferRef *filtered_buf = NULL;
PutByteContext pb_filtered_data, pb_extradata;
uint8_t *extradata;
if (s->remove) {
filtered_buf = av_buffer_alloc(filtered_size + AV_INPUT_BUFFER_PADDING_SIZE);
@ -94,8 +96,6 @@ static int extract_extradata_av1(AVBSFContext *ctx, AVPacket *pkt,
return AVERROR(ENOMEM);
}
memset(filtered_buf->data + filtered_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
filtered_data = filtered_buf->data;
}
extradata = av_malloc(extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
@ -108,15 +108,17 @@ static int extract_extradata_av1(AVBSFContext *ctx, AVPacket *pkt,
*data = extradata;
*size = extradata_size;
bytestream2_init_writer(&pb_extradata, extradata, extradata_size);
if (s->remove)
bytestream2_init_writer(&pb_filtered_data, filtered_buf->data, filtered_size);
for (i = 0; i < s->av1_pkt.nb_obus; i++) {
AV1OBU *obu = &s->av1_pkt.obus[i];
if (val_in_array(extradata_obu_types, nb_extradata_obu_types,
obu->type)) {
memcpy(extradata, obu->raw_data, obu->raw_size);
extradata += obu->raw_size;
bytestream2_put_bufferu(&pb_extradata, obu->raw_data, obu->raw_size);
} else if (s->remove) {
memcpy(filtered_data, obu->raw_data, obu->raw_size);
filtered_data += obu->raw_size;
bytestream2_put_bufferu(&pb_filtered_data, obu->raw_data, obu->raw_size);
}
}
@ -179,8 +181,9 @@ static int extract_extradata_h2645(AVBSFContext *ctx, AVPacket *pkt,
if (extradata_size &&
((ctx->par_in->codec_id == AV_CODEC_ID_HEVC && has_sps && has_vps) ||
(ctx->par_in->codec_id == AV_CODEC_ID_H264 && has_sps))) {
AVBufferRef *filtered_buf;
uint8_t *extradata, *filtered_data;
AVBufferRef *filtered_buf = NULL;
PutByteContext pb_filtered_data, pb_extradata;
uint8_t *extradata;
if (s->remove) {
filtered_buf = av_buffer_alloc(filtered_size + AV_INPUT_BUFFER_PADDING_SIZE);
@ -188,8 +191,6 @@ static int extract_extradata_h2645(AVBSFContext *ctx, AVPacket *pkt,
return AVERROR(ENOMEM);
}
memset(filtered_buf->data + filtered_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
filtered_data = filtered_buf->data;
}
extradata = av_malloc(extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
@ -202,17 +203,19 @@ static int extract_extradata_h2645(AVBSFContext *ctx, AVPacket *pkt,
*data = extradata;
*size = extradata_size;
bytestream2_init_writer(&pb_extradata, extradata, extradata_size);
if (s->remove)
bytestream2_init_writer(&pb_filtered_data, filtered_buf->data, filtered_size);
for (i = 0; i < s->h2645_pkt.nb_nals; i++) {
H2645NAL *nal = &s->h2645_pkt.nals[i];
if (val_in_array(extradata_nal_types, nb_extradata_nal_types,
nal->type)) {
AV_WB24(extradata, 1); // startcode
memcpy(extradata + 3, nal->raw_data, nal->raw_size);
extradata += 3 + nal->raw_size;
bytestream2_put_be24u(&pb_extradata, 1); //startcode
bytestream2_put_bufferu(&pb_extradata, nal->raw_data, nal->raw_size);
} else if (s->remove) {
AV_WB24(filtered_data, 1); // startcode
memcpy(filtered_data + 3, nal->raw_data, nal->raw_size);
filtered_data += 3 + nal->raw_size;
bytestream2_put_be24u(&pb_filtered_data, 1); // startcode
bytestream2_put_bufferu(&pb_filtered_data, nal->raw_data, nal->raw_size);
}
}

View File

@ -217,8 +217,8 @@ static void wavesynth_seek(struct wavesynth_context *ws, int64_t ts)
*last = -1;
lcg_seek(&ws->dither_state, (uint32_t)ts - (uint32_t)ws->cur_ts);
if (ws->pink_need) {
int64_t pink_ts_cur = (ws->cur_ts + PINK_UNIT - 1) & ~(PINK_UNIT - 1);
int64_t pink_ts_next = ts & ~(PINK_UNIT - 1);
uint64_t pink_ts_cur = (ws->cur_ts + PINK_UNIT - 1) & ~(PINK_UNIT - 1);
uint64_t pink_ts_next = ts & ~(PINK_UNIT - 1);
int pos = ts & (PINK_UNIT - 1);
lcg_seek(&ws->pink_state, (uint32_t)(pink_ts_next - pink_ts_cur) * 2);
if (pos) {

View File

@ -279,7 +279,7 @@ static int fits_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
for (j = 0; j < avctx->width; j++) { \
t = rd; \
if (!header.blank_found || t != header.blank) { \
*dst++ = ((t - header.data_min) * ((1 << (sizeof(type) * 8)) - 1)) * scale; \
*dst++ = lrint(((t - header.data_min) * ((1 << (sizeof(type) * 8)) - 1)) * scale); \
} else { \
*dst++ = fitsctx->blank_val; \
} \

View File

@ -97,6 +97,7 @@ typedef struct {
uint8_t gc_2nd_index_bits; ///< gain codebook (second stage) index (size in bits)
uint8_t fc_signs_bits; ///< number of pulses in fixed-codebook vector
uint8_t fc_indexes_bits; ///< size (in bits) of fixed-codebook index entry
uint8_t block_size;
} G729FormatDescription;
typedef struct {
@ -165,6 +166,7 @@ static const G729FormatDescription format_g729_8k = {
.gc_2nd_index_bits = GC_2ND_IDX_BITS_8K,
.fc_signs_bits = 4,
.fc_indexes_bits = 13,
.block_size = G729_8K_BLOCK_SIZE,
};
static const G729FormatDescription format_g729d_6k4 = {
@ -174,6 +176,7 @@ static const G729FormatDescription format_g729d_6k4 = {
.gc_2nd_index_bits = GC_2ND_IDX_BITS_6K4,
.fc_signs_bits = 2,
.fc_indexes_bits = 9,
.block_size = G729D_6K4_BLOCK_SIZE,
};
/**
@ -332,11 +335,14 @@ static int16_t g729d_voice_decision(int onset, int prev_voice_decision, const in
static int32_t scalarproduct_int16_c(const int16_t * v1, const int16_t * v2, int order)
{
int res = 0;
int64_t res = 0;
while (order--)
res += *v1++ * *v2++;
if (res > INT32_MAX) return INT32_MAX;
else if (res < INT32_MIN) return INT32_MIN;
return res;
}
@ -424,14 +430,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
if (buf_size % ((G729_8K_BLOCK_SIZE + (avctx->codec_id == AV_CODEC_ID_ACELP_KELVIN)) * avctx->channels) == 0) {
if (buf_size && buf_size % ((G729_8K_BLOCK_SIZE + (avctx->codec_id == AV_CODEC_ID_ACELP_KELVIN)) * avctx->channels) == 0) {
packet_type = FORMAT_G729_8K;
format = &format_g729_8k;
//Reset voice decision
ctx->onset = 0;
ctx->voice_decision = DECISION_VOICE;
av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729 @ 8kbit/s");
} else if (buf_size == G729D_6K4_BLOCK_SIZE * avctx->channels) {
} else if (buf_size == G729D_6K4_BLOCK_SIZE * avctx->channels && avctx->codec_id != AV_CODEC_ID_ACELP_KELVIN) {
packet_type = FORMAT_G729D_6K4;
format = &format_g729d_6k4;
av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729D @ 6.4kbit/s");
@ -451,11 +457,11 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
buf++;
}
for (i = 0; i < buf_size; i++)
for (i = 0; i < format->block_size; i++)
frame_erasure |= buf[i];
frame_erasure = !frame_erasure;
init_get_bits(&gb, buf, 8*buf_size);
init_get_bits8(&gb, buf, format->block_size);
ma_predictor = get_bits(&gb, 1);
quantizer_1st = get_bits(&gb, VQ_1ST_BITS);
@ -728,12 +734,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
/* Save signal for use in next frame. */
memmove(ctx->exc_base, ctx->exc_base + 2 * SUBFRAME_SIZE, (PITCH_DELAY_MAX+INTERPOL_LEN)*sizeof(int16_t));
buf += packet_type == FORMAT_G729_8K ? G729_8K_BLOCK_SIZE : G729D_6K4_BLOCK_SIZE;
buf += format->block_size;
ctx++;
}
*got_frame_ptr = 1;
return packet_type == FORMAT_G729_8K ? (G729_8K_BLOCK_SIZE + (avctx->codec_id == AV_CODEC_ID_ACELP_KELVIN)) * avctx->channels : G729D_6K4_BLOCK_SIZE * avctx->channels;
return (format->block_size + (avctx->codec_id == AV_CODEC_ID_ACELP_KELVIN)) * avctx->channels;
}
static av_cold int decode_close(AVCodecContext *avctx)

View File

@ -456,11 +456,12 @@ static av_cold int decode_init(AVCodecContext *avctx)
*/
static void decodeplane8(uint8_t *dst, const uint8_t *buf, int buf_size, int plane)
{
const uint64_t *lut = plane8_lut[plane];
const uint64_t *lut;
if (plane >= 8) {
av_log(NULL, AV_LOG_WARNING, "Ignoring extra planes beyond 8\n");
return;
}
lut = plane8_lut[plane];
do {
uint64_t v = AV_RN64A(dst) | lut[*buf++];
AV_WN64A(dst, v);

View File

@ -40,6 +40,8 @@ typedef struct Libdav1dContext {
int tile_threads;
int frame_threads;
int apply_grain;
int operating_point;
int all_layers;
} Libdav1dContext;
static const enum AVPixelFormat pix_fmt[][3] = {
@ -134,6 +136,10 @@ static av_cold int libdav1d_init(AVCodecContext *c)
if (dav1d->apply_grain >= 0)
s.apply_grain = dav1d->apply_grain;
s.all_layers = dav1d->all_layers;
if (dav1d->operating_point >= 0)
s.operating_point = dav1d->operating_point;
s.n_tile_threads = dav1d->tile_threads
? dav1d->tile_threads
: FFMIN(floor(sqrt(threads)), DAV1D_MAX_TILE_THREADS);
@ -378,6 +384,8 @@ static const AVOption libdav1d_options[] = {
{ "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_TILE_THREADS, VD },
{ "framethreads", "Frame threads", OFFSET(frame_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, DAV1D_MAX_FRAME_THREADS, VD },
{ "filmgrain", "Apply Film Grain", OFFSET(apply_grain), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD },
{ "oppoint", "Select an operating point of the scalable bitstream", OFFSET(operating_point), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 31, VD },
{ "alllayers", "Output all spatial layers", OFFSET(all_layers), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
{ NULL }
};

View File

@ -29,6 +29,7 @@
#include "audio_frame_queue.h"
#include "internal.h"
#if CONFIG_LIBOPENCORE_AMRNB_DECODER || CONFIG_LIBOPENCORE_AMRWB_DECODER
static int amr_decode_fix_avctx(AVCodecContext *avctx)
{
const int is_amr_wb = 1 + (avctx->codec_id == AV_CODEC_ID_AMR_WB);
@ -46,6 +47,7 @@ static int amr_decode_fix_avctx(AVCodecContext *avctx)
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
return 0;
}
#endif
#if CONFIG_LIBOPENCORE_AMRNB

View File

@ -533,7 +533,7 @@ retry:
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "qp", "use constant quantizer mode", OFFSET(quantizer), AV_OPT_TYPE_INT, { .i64 = 100 }, -1, 255, VE },
{ "qp", "use constant quantizer mode", OFFSET(quantizer), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 255, VE },
{ "speed", "what speed preset to use", OFFSET(speed), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 10, VE },
{ "tiles", "number of tiles encode with", OFFSET(tiles), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
{ "tile-rows", "number of tiles rows to encode with", OFFSET(tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },

View File

@ -59,7 +59,7 @@ typedef struct XAVS2EContext {
static av_cold int xavs2_init(AVCodecContext *avctx)
{
XAVS2EContext *cae= avctx->priv_data;
XAVS2EContext *cae = avctx->priv_data;
int bit_depth, code;
bit_depth = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 8 : 10;
@ -67,13 +67,13 @@ static av_cold int xavs2_init(AVCodecContext *avctx)
/* get API handler */
cae->api = xavs2_api_get(bit_depth);
if (!cae->api) {
av_log(avctx, AV_LOG_ERROR, "api get failed\n");
av_log(avctx, AV_LOG_ERROR, "Failed to get xavs2 api context\n");
return AVERROR_EXTERNAL;
}
cae->param = cae->api->opt_alloc();
if (!cae->param) {
av_log(avctx, AV_LOG_ERROR, "param alloc failed\n");
av_log(avctx, AV_LOG_ERROR, "Failed to alloc xavs2 parameters\n");
return AVERROR(ENOMEM);
}
@ -115,15 +115,13 @@ static av_cold int xavs2_init(AVCodecContext *avctx)
xavs2_opt_set2("InitialQP", "%d", cae->qp);
}
ff_mpeg12_find_best_frame_rate(avctx->framerate, &code, NULL, NULL, 0);
xavs2_opt_set2("FrameRate", "%d", code);
cae->encoder = cae->api->encoder_create(cae->param);
if (!cae->encoder) {
av_log(avctx,AV_LOG_ERROR, "Can not create encoder. Null pointer returned\n");
av_log(avctx, AV_LOG_ERROR, "Failed to create xavs2 encoder instance.\n");
return AVERROR(EINVAL);
}
@ -132,29 +130,42 @@ static av_cold int xavs2_init(AVCodecContext *avctx)
static void xavs2_copy_frame_with_shift(xavs2_picture_t *pic, const AVFrame *frame, const int shift_in)
{
int j, k;
for (k = 0; k < 3; k++) {
int i_stride = pic->img.i_stride[k];
for (j = 0; j < pic->img.i_lines[k]; j++) {
uint16_t *p_plane = (uint16_t *)&pic->img.img_planes[k][j * i_stride];
int i;
uint8_t *p_buffer = frame->data[k] + frame->linesize[k] * j;
memset(p_plane, 0, i_stride);
for (i = 0; i < pic->img.i_width[k]; i++) {
p_plane[i] = p_buffer[i] << shift_in;
uint16_t *p_plane;
uint8_t *p_buffer;
int plane;
int hIdx;
int wIdx;
for (plane = 0; plane < 3; plane++) {
p_plane = (uint16_t *)pic->img.img_planes[plane];
p_buffer = frame->data[plane];
for (hIdx = 0; hIdx < pic->img.i_lines[plane]; hIdx++) {
memset(p_plane, 0, pic->img.i_stride[plane]);
for (wIdx = 0; wIdx < pic->img.i_width[plane]; wIdx++) {
p_plane[wIdx] = p_buffer[wIdx] << shift_in;
}
p_plane += pic->img.i_stride[plane];
p_buffer += frame->linesize[plane];
}
}
}
static void xavs2_copy_frame(xavs2_picture_t *pic, const AVFrame *frame)
{
int j, k;
for (k = 0; k < 3; k++) {
for (j = 0; j < pic->img.i_lines[k]; j++) {
memcpy( pic->img.img_planes[k] + pic->img.i_stride[k] * j,
frame->data[k]+frame->linesize[k] * j,
pic->img.i_width[k] * pic->img.in_sample_size);
uint8_t *p_plane;
uint8_t *p_buffer;
int plane;
int hIdx;
int stride;
for (plane = 0; plane < 3; plane++) {
p_plane = pic->img.img_planes[plane];
p_buffer = frame->data[plane];
stride = pic->img.i_width[plane] * pic->img.in_sample_size;
for (hIdx = 0; hIdx < pic->img.i_lines[plane]; hIdx++) {
memcpy(p_plane, p_buffer, stride);
p_plane += pic->img.i_stride[plane];
p_buffer += frame->linesize[plane];
}
}
}
@ -169,7 +180,7 @@ static int xavs2_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
/* create the XAVS2 video encoder */
/* read frame data and send to the XAVS2 video encoder */
if (cae->api->encoder_get_buffer(cae->encoder, &pic) < 0) {
av_log(avctx,AV_LOG_ERROR, "failed to get frame buffer\n");
av_log(avctx, AV_LOG_ERROR, "Failed to get xavs2 frame buffer\n");
return AVERROR_EXTERNAL;
}
if (frame) {
@ -200,7 +211,7 @@ static int xavs2_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
ret = cae->api->encoder_encode(cae->encoder, &pic, &cae->packet);
if (ret) {
av_log(avctx, AV_LOG_ERROR, "encode failed\n");
av_log(avctx, AV_LOG_ERROR, "Encoding error occured.\n");
return AVERROR_EXTERNAL;
}
@ -208,10 +219,9 @@ static int xavs2_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
cae->api->encoder_encode(cae->encoder, NULL, &cae->packet);
}
if ((cae->packet.len) && (cae->packet.state != XAVS2_STATE_FLUSH_END)){
if (av_new_packet(pkt, cae->packet.len) < 0){
av_log(avctx, AV_LOG_ERROR, "packet alloc failed\n");
if ((cae->packet.len) && (cae->packet.state != XAVS2_STATE_FLUSH_END)) {
if (av_new_packet(pkt, cae->packet.len) < 0) {
av_log(avctx, AV_LOG_ERROR, "Failed to alloc xavs2 packet.\n");
cae->api->encoder_packet_unref(cae->encoder, &cae->packet);
return AVERROR(ENOMEM);
}

283
libavcodec/midivid.c Normal file
View File

@ -0,0 +1,283 @@
/*
* MidiVid decoder
* Copyright (c) 2019 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mem.h"
#define BITSTREAM_READER_LE
#include "avcodec.h"
#include "get_bits.h"
#include "bytestream.h"
#include "internal.h"
typedef struct MidiVidContext {
GetByteContext gb;
uint8_t *uncompressed;
unsigned int uncompressed_size;
uint8_t *skip;
AVFrame *frame;
} MidiVidContext;
static int decode_mvdv(MidiVidContext *s, AVCodecContext *avctx, AVFrame *frame)
{
GetByteContext *gb = &s->gb;
GetBitContext mask;
GetByteContext idx9;
uint16_t nb_vectors, intra_flag;
const uint8_t *vec;
const uint8_t *mask_start;
uint8_t *skip;
uint32_t mask_size;
int idx9bits = 0;
int idx9val = 0;
uint32_t nb_blocks;
nb_vectors = bytestream2_get_le16(gb);
intra_flag = bytestream2_get_le16(gb);
if (intra_flag) {
nb_blocks = (avctx->width / 2) * (avctx->height / 2);
} else {
int skip_linesize;
nb_blocks = bytestream2_get_le32(gb);
skip_linesize = avctx->width >> 1;
mask_start = gb->buffer_start + bytestream2_tell(gb);
mask_size = (avctx->width >> 5) * (avctx->height >> 2);
if (bytestream2_get_bytes_left(gb) < mask_size)
return AVERROR_INVALIDDATA;
init_get_bits8(&mask, mask_start, mask_size);
bytestream2_skip(gb, mask_size);
skip = s->skip;
for (int y = 0; y < avctx->height >> 2; y++) {
for (int x = 0; x < avctx->width >> 2; x++) {
int flag = !get_bits1(&mask);
skip[(y*2) *skip_linesize + x*2 ] = flag;
skip[(y*2) *skip_linesize + x*2+1] = flag;
skip[(y*2+1)*skip_linesize + x*2 ] = flag;
skip[(y*2+1)*skip_linesize + x*2+1] = flag;
}
}
}
vec = gb->buffer_start + bytestream2_tell(gb);
if (bytestream2_get_bytes_left(gb) < nb_vectors * 12)
return AVERROR_INVALIDDATA;
bytestream2_skip(gb, nb_vectors * 12);
if (nb_vectors > 256) {
if (bytestream2_get_bytes_left(gb) < (nb_blocks + 7) / 8)
return AVERROR_INVALIDDATA;
bytestream2_init(&idx9, gb->buffer_start + bytestream2_tell(gb), (nb_blocks + 7) / 8);
bytestream2_skip(gb, (nb_blocks + 7) / 8);
}
skip = s->skip;
for (int y = avctx->height - 2; y >= 0; y -= 2) {
uint8_t *dsty = frame->data[0] + y * frame->linesize[0];
uint8_t *dstu = frame->data[1] + y * frame->linesize[1];
uint8_t *dstv = frame->data[2] + y * frame->linesize[2];
for (int x = 0; x < avctx->width; x += 2) {
int idx;
if (!intra_flag && *skip++)
continue;
if (bytestream2_get_bytes_left(gb) <= 0)
return AVERROR_INVALIDDATA;
if (nb_vectors <= 256) {
idx = bytestream2_get_byte(gb);
} else {
if (idx9bits == 0) {
idx9val = bytestream2_get_byte(&idx9);
idx9bits = 8;
}
idx9bits--;
idx = bytestream2_get_byte(gb) | (((idx9val >> (7 - idx9bits)) & 1) << 8);
}
dsty[x +frame->linesize[0]] = vec[idx * 12 + 0];
dsty[x+1+frame->linesize[0]] = vec[idx * 12 + 3];
dsty[x] = vec[idx * 12 + 6];
dsty[x+1] = vec[idx * 12 + 9];
dstu[x +frame->linesize[1]] = vec[idx * 12 + 1];
dstu[x+1+frame->linesize[1]] = vec[idx * 12 + 4];
dstu[x] = vec[idx * 12 + 7];
dstu[x+1] = vec[idx * 12 +10];
dstv[x +frame->linesize[2]] = vec[idx * 12 + 2];
dstv[x+1+frame->linesize[2]] = vec[idx * 12 + 5];
dstv[x] = vec[idx * 12 + 8];
dstv[x+1] = vec[idx * 12 +11];
}
}
return intra_flag;
}
static ptrdiff_t lzss_uncompress(MidiVidContext *s, GetByteContext *gb, uint8_t *dst, unsigned int size)
{
uint8_t *dst_start = dst;
uint8_t *dst_end = dst + size;
for (;bytestream2_get_bytes_left(gb) >= 3;) {
int op = bytestream2_get_le16(gb);
for (int i = 0; i < 16; i++) {
if (op & 1) {
int s0 = bytestream2_get_byte(gb);
int s1 = bytestream2_get_byte(gb);
int offset = ((s0 & 0xF0) << 4) | s1;
int length = (s0 & 0xF) + 3;
if (dst + length > dst_end ||
dst - offset < dst_start)
return AVERROR_INVALIDDATA;
if (offset > 0) {
for (int j = 0; j < length; j++) {
dst[j] = dst[j - offset];
}
}
dst += length;
} else {
if (dst >= dst_end)
return AVERROR_INVALIDDATA;
*dst++ = bytestream2_get_byte(gb);
}
op >>= 1;
}
}
return dst - dst_start;
}
static int decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
MidiVidContext *s = avctx->priv_data;
GetByteContext *gb = &s->gb;
AVFrame *frame = s->frame;
int ret, key, uncompressed;
if (avpkt->size <= 13)
return AVERROR_INVALIDDATA;
bytestream2_init(gb, avpkt->data, avpkt->size);
bytestream2_skip(gb, 8);
uncompressed = bytestream2_get_le32(gb);
if ((ret = ff_reget_buffer(avctx, s->frame, 0)) < 0)
return ret;
if (uncompressed) {
ret = decode_mvdv(s, avctx, frame);
} else {
av_fast_padded_malloc(&s->uncompressed, &s->uncompressed_size, 16LL * (avpkt->size - 12));
if (!s->uncompressed)
return AVERROR(ENOMEM);
ret = lzss_uncompress(s, gb, s->uncompressed, s->uncompressed_size);
if (ret < 0)
return ret;
bytestream2_init(gb, s->uncompressed, ret);
ret = decode_mvdv(s, avctx, frame);
}
if (ret < 0)
return ret;
key = ret;
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
frame->pict_type = key ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
frame->key_frame = key;
*got_frame = 1;
return avpkt->size;
}
static av_cold int decode_init(AVCodecContext *avctx)
{
MidiVidContext *s = avctx->priv_data;
int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n",
avctx->width, avctx->height);
return ret;
}
avctx->pix_fmt = AV_PIX_FMT_YUV444P;
s->frame = av_frame_alloc();
if (!s->frame)
return AVERROR(ENOMEM);
s->skip = av_calloc(avctx->width >> 1, avctx->height >> 1);
if (!s->skip)
return AVERROR(ENOMEM);
return 0;
}
static void decode_flush(AVCodecContext *avctx)
{
MidiVidContext *s = avctx->priv_data;
av_frame_unref(s->frame);
}
static av_cold int decode_close(AVCodecContext *avctx)
{
MidiVidContext *s = avctx->priv_data;
av_frame_free(&s->frame);
av_freep(&s->uncompressed);
av_freep(&s->skip);
return 0;
}
AVCodec ff_mvdv_decoder = {
.name = "mvdv",
.long_name = NULL_IF_CONFIG_SMALL("MidiVid VQ"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MVDV,
.priv_data_size = sizeof(MidiVidContext),
.init = decode_init,
.decode = decode_frame,
.flush = decode_flush,
.close = decode_close,
.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
};

View File

@ -50,7 +50,7 @@ static int find_frame_end(MJPEGParserContext *m, const uint8_t *buf, int buf_siz
for(i=0; i<buf_size;){
state= (state<<8) | buf[i];
if(state>=0xFFC00000 && state<=0xFFFEFFFF){
if(state>=0xFFD80000 && state<=0xFFD8FFFF){
if(state>=0xFFD8FFC0 && state<=0xFFD8FFFF){
i++;
vop_found=1;
break;
@ -76,12 +76,14 @@ static int find_frame_end(MJPEGParserContext *m, const uint8_t *buf, int buf_siz
for(; i<buf_size;){
state= (state<<8) | buf[i];
if(state>=0xFFC00000 && state<=0xFFFEFFFF){
if(state>=0xFFD80000 && state<=0xFFD8FFFF){
if(state>=0xFFD8FFC0 && state<=0xFFD8FFFF){
pc->frame_start_found=0;
pc->state=0;
return i-3;
} else if(state<0xFFD00000 || state>0xFFD9FFFF){
m->size= (state&0xFFFF)-1;
if (m->size >= 0x8000)
m->size = 0;
}
}
if(m->size>0){

View File

@ -56,6 +56,7 @@ static int mjpegb_decode_frame(AVCodecContext *avctx,
buf_ptr = buf;
buf_end = buf + buf_size;
s->got_picture = 0;
s->adobe_transform = -1;
read_header:
/* reset on every SOI */

View File

@ -154,7 +154,7 @@ static void mpegvideo_extract_headers(AVCodecParserContext *s,
break;
}
}
the_end: ;
the_end:
if (set_dim_ret < 0)
av_log(avctx, AV_LOG_ERROR, "Failed to set dimensions\n");

315
libavcodec/mvha.c Normal file
View File

@ -0,0 +1,315 @@
/*
* MidiVid Archive codec
*
* Copyright (c) 2019 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define CACHED_BITSTREAM_READER !ARCH_X86_32
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "bytestream.h"
#include "get_bits.h"
#include "internal.h"
#include "lossless_videodsp.h"
#include <zlib.h>
typedef struct MVHAContext {
GetBitContext gb;
int nb_symbols;
uint8_t symb[256];
uint32_t prob[256];
VLC vlc;
z_stream zstream;
LLVidDSPContext llviddsp;
} MVHAContext;
typedef struct Node {
int16_t sym;
int16_t n0;
int16_t l, r;
uint32_t count;
} Node;
static void get_tree_codes(uint32_t *bits, int16_t *lens, uint8_t *xlat,
Node *nodes, int node,
uint32_t pfx, int pl, int *pos)
{
int s;
s = nodes[node].sym;
if (s != -1) {
bits[*pos] = (~pfx) & ((1ULL << FFMAX(pl, 1)) - 1);
lens[*pos] = FFMAX(pl, 1);
xlat[*pos] = s + (pl == 0);
(*pos)++;
} else {
pfx <<= 1;
pl++;
get_tree_codes(bits, lens, xlat, nodes, nodes[node].l, pfx, pl,
pos);
pfx |= 1;
get_tree_codes(bits, lens, xlat, nodes, nodes[node].r, pfx, pl,
pos);
}
}
static int build_vlc(AVCodecContext *avctx, VLC *vlc)
{
MVHAContext *s = avctx->priv_data;
Node nodes[512];
uint32_t bits[256];
int16_t lens[256];
uint8_t xlat[256];
int cur_node, i, j, pos = 0;
ff_free_vlc(vlc);
for (i = 0; i < s->nb_symbols; i++) {
nodes[i].count = s->prob[i];
nodes[i].sym = s->symb[i];
nodes[i].n0 = -2;
nodes[i].l = i;
nodes[i].r = i;
}
cur_node = s->nb_symbols;
j = 0;
do {
for (i = 0; ; i++) {
int new_node = j;
int first_node = cur_node;
int second_node = cur_node;
unsigned nd, st;
nodes[cur_node].count = -1;
do {
int val = nodes[new_node].count;
if (val && (val < nodes[first_node].count)) {
if (val >= nodes[second_node].count) {
first_node = new_node;
} else {
first_node = second_node;
second_node = new_node;
}
}
new_node += 1;
} while (new_node != cur_node);
if (first_node == cur_node)
break;
nd = nodes[second_node].count;
st = nodes[first_node].count;
nodes[second_node].count = 0;
nodes[first_node].count = 0;
if (nd >= UINT32_MAX - st) {
av_log(avctx, AV_LOG_ERROR, "count overflow\n");
return AVERROR_INVALIDDATA;
}
nodes[cur_node].count = nd + st;
nodes[cur_node].sym = -1;
nodes[cur_node].n0 = cur_node;
nodes[cur_node].l = first_node;
nodes[cur_node].r = second_node;
cur_node++;
}
j++;
} while (cur_node - s->nb_symbols == j);
get_tree_codes(bits, lens, xlat, nodes, cur_node - 1, 0, 0, &pos);
return ff_init_vlc_sparse(vlc, 12, pos, lens, 2, 2, bits, 4, 4, xlat, 1, 1, 0);
}
static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
MVHAContext *s = avctx->priv_data;
AVFrame *frame = data;
uint32_t type, size;
int ret;
if (avpkt->size <= 8)
return AVERROR_INVALIDDATA;
type = AV_RB32(avpkt->data);
size = AV_RL32(avpkt->data + 4);
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
if (type == MKTAG('L','Z','Y','V')) {
ret = inflateReset(&s->zstream);
if (ret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", ret);
return AVERROR_EXTERNAL;
}
s->zstream.next_in = avpkt->data + 8;
s->zstream.avail_in = avpkt->size - 8;
for (int p = 0; p < 3; p++) {
for (int y = 0; y < avctx->height; y++) {
s->zstream.next_out = frame->data[p] + (avctx->height - y - 1) * frame->linesize[p];
s->zstream.avail_out = avctx->width >> (p > 0);
ret = inflate(&s->zstream, Z_SYNC_FLUSH);
if (ret != Z_OK && ret != Z_STREAM_END) {
av_log(avctx, AV_LOG_ERROR, "Inflate error: %d\n", ret);
return AVERROR_EXTERNAL;
}
}
}
} else if (type == MKTAG('H','U','F','Y')) {
GetBitContext *gb = &s->gb;
int first_symbol, symbol;
ret = init_get_bits8(gb, avpkt->data + 8, avpkt->size - 8);
if (ret < 0)
return ret;
skip_bits(gb, 24);
first_symbol = get_bits(gb, 8);
s->nb_symbols = get_bits(gb, 8) + 1;
symbol = first_symbol;
for (int i = 0; i < s->nb_symbols; symbol++) {
int prob;
if (get_bits_left(gb) < 4)
return AVERROR_INVALIDDATA;
if (get_bits1(gb)) {
prob = get_bits(gb, 12);
} else {
prob = get_bits(gb, 3);
}
if (prob) {
s->symb[i] = symbol;
s->prob[i] = prob;
i++;
}
}
ret = build_vlc(avctx, &s->vlc);
if (ret < 0)
return ret;
for (int p = 0; p < 3; p++) {
int width = avctx->width >> (p > 0);
ptrdiff_t stride = frame->linesize[p];
uint8_t *dst;
dst = frame->data[p] + (avctx->height - 1) * frame->linesize[p];
for (int y = 0; y < avctx->height; y++) {
for (int x = 0; x < width; x++) {
int v = get_vlc2(gb, s->vlc.table, s->vlc.bits, 3);
if (v < 0)
return AVERROR_INVALIDDATA;
dst[x] = v;
}
dst -= stride;
}
}
} else {
return AVERROR_INVALIDDATA;
}
for (int p = 0; p < 3; p++) {
int left, lefttop;
int width = avctx->width >> (p > 0);
ptrdiff_t stride = frame->linesize[p];
uint8_t *dst;
dst = frame->data[p] + (avctx->height - 1) * frame->linesize[p];
s->llviddsp.add_left_pred(dst, dst, width, 0);
dst -= stride;
lefttop = left = dst[0];
for (int y = 1; y < avctx->height; y++) {
s->llviddsp.add_median_pred(dst, dst + stride, dst, width, &left, &lefttop);
lefttop = left = dst[0];
dst -= stride;
}
}
frame->pict_type = AV_PICTURE_TYPE_I;
frame->key_frame = 1;
*got_frame = 1;
return avpkt->size;
}
static av_cold int decode_init(AVCodecContext *avctx)
{
MVHAContext *s = avctx->priv_data;
int zret;
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
s->zstream.zalloc = Z_NULL;
s->zstream.zfree = Z_NULL;
s->zstream.opaque = Z_NULL;
zret = inflateInit(&s->zstream);
if (zret != Z_OK) {
av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
return AVERROR_EXTERNAL;
}
ff_llviddsp_init(&s->llviddsp);
return 0;
}
static av_cold int decode_close(AVCodecContext *avctx)
{
MVHAContext *s = avctx->priv_data;
inflateEnd(&s->zstream);
ff_free_vlc(&s->vlc);
return 0;
}
AVCodec ff_mvha_decoder = {
.name = "mvha",
.long_name = NULL_IF_CONFIG_SMALL("MidiVid Archive Codec"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MVHA,
.priv_data_size = sizeof(MVHAContext),
.init = decode_init,
.close = decode_close,
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
};

View File

@ -199,6 +199,7 @@ static int mxpeg_decode_frame(AVCodecContext *avctx,
buf_end = buf + buf_size;
jpg->got_picture = 0;
s->got_mxm_bitmask = 0;
s->got_sof_data = !!s->got_sof_data;
while (buf_ptr < buf_end) {
start_code = ff_mjpeg_find_marker(jpg, &buf_ptr, buf_end,
&unescaped_buf_ptr, &unescaped_buf_size);
@ -241,6 +242,11 @@ static int mxpeg_decode_frame(AVCodecContext *avctx,
return ret;
break;
case SOF0:
if (s->got_sof_data > 1) {
av_log(avctx, AV_LOG_ERROR,
"Multiple SOF in a frame\n");
return AVERROR_INVALIDDATA;
}
s->got_sof_data = 0;
ret = ff_mjpeg_decode_sof(jpg);
if (ret < 0) {
@ -253,7 +259,7 @@ static int mxpeg_decode_frame(AVCodecContext *avctx,
"Interlaced mode not supported in MxPEG\n");
return AVERROR(EINVAL);
}
s->got_sof_data = 1;
s->got_sof_data ++;
break;
case SOS:
if (!s->got_sof_data) {

View File

@ -131,10 +131,10 @@ static int codec_reinit(AVCodecContext *avctx, int width, int height,
+ RTJPEG_HEADER_SIZE;
if (buf_size > INT_MAX/8)
return -1;
if ((ret = av_image_check_size(height, width, 0, avctx)) < 0)
if ((ret = ff_set_dimensions(avctx, width, height)) < 0)
return ret;
avctx->width = c->width = width;
avctx->height = c->height = height;
c->width = width;
c->height = height;
av_fast_malloc(&c->decomp_buf, &c->decomp_size,
buf_size);
if (!c->decomp_buf) {
@ -219,6 +219,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
case NUV_RTJPEG:
minsize = c->width/16 * (c->height/16) * 6;
break;
case NUV_BLACK:
case NUV_COPY_LAST:
case NUV_LZO:
case NUV_RTJPEG_IN_LZO:
break;
default:
av_log(avctx, AV_LOG_ERROR, "unknown compression\n");
return AVERROR_INVALIDDATA;
}
if (buf_size < minsize / 4)
return AVERROR_INVALIDDATA;
@ -307,9 +315,6 @@ retry:
case NUV_COPY_LAST:
/* nothing more to do here */
break;
default:
av_log(avctx, AV_LOG_ERROR, "unknown compression\n");
return AVERROR_INVALIDDATA;
}
if ((result = av_frame_ref(picture, c->pic)) < 0)

View File

@ -1881,7 +1881,11 @@ static int process_output_surface(AVCodecContext *avctx, AVPacket *pkt, NvencSur
goto error;
}
if (res = ff_alloc_packet2(avctx, pkt, lock_params.bitstreamSizeInBytes,0)) {
res = pkt->data ?
ff_alloc_packet2(avctx, pkt, lock_params.bitstreamSizeInBytes, lock_params.bitstreamSizeInBytes) :
av_new_packet(pkt, lock_params.bitstreamSizeInBytes);
if (res < 0) {
p_nvenc->nvEncUnlockBitstream(ctx->nvencoder, tmpoutsurf->output_surface);
goto error;
}

View File

@ -61,24 +61,6 @@ static inline void init_put_bits(PutBitContext *s, uint8_t *buffer,
s->bit_buf = 0;
}
/**
* Rebase the bit writer onto a reallocated buffer.
*
* @param buffer the buffer where to put bits
* @param buffer_size the size in bytes of buffer,
* must be larger than the previous size
*/
static inline void rebase_put_bits(PutBitContext *s, uint8_t *buffer,
int buffer_size)
{
av_assert0(8*buffer_size > s->size_in_bits);
s->buf_end = buffer + buffer_size;
s->buf_ptr = buffer + (s->buf_ptr - s->buf);
s->buf = buffer;
s->size_in_bits = 8 * buffer_size;
}
/**
* @return the total number of bits written to the bitstream.
*/
@ -87,6 +69,24 @@ static inline int put_bits_count(PutBitContext *s)
return (s->buf_ptr - s->buf) * 8 + 32 - s->bit_left;
}
/**
* Rebase the bit writer onto a reallocated buffer.
*
* @param buffer the buffer where to put bits
* @param buffer_size the size in bytes of buffer,
* must be large enough to hold everything written so far
*/
static inline void rebase_put_bits(PutBitContext *s, uint8_t *buffer,
int buffer_size)
{
av_assert0(8*buffer_size >= put_bits_count(s));
s->buf_end = buffer + buffer_size;
s->buf_ptr = buffer + (s->buf_ptr - s->buf);
s->buf = buffer;
s->size_in_bits = 8 * buffer_size;
}
/**
* @return the number of bits available in the bitstream.
*/

View File

@ -1704,7 +1704,7 @@ static av_cold int qdm2_decode_init(AVCodecContext *avctx)
s->group_size = bytestream2_get_be32(&gb);
s->fft_size = bytestream2_get_be32(&gb);
s->checksum_size = bytestream2_get_be32(&gb);
if (s->checksum_size >= 1U << 28 || !s->checksum_size) {
if (s->checksum_size >= 1U << 28 || s->checksum_size <= 1) {
av_log(avctx, AV_LOG_ERROR, "data block size invalid (%u)\n", s->checksum_size);
return AVERROR_INVALIDDATA;
}

View File

@ -764,10 +764,10 @@ FF_ENABLE_DEPRECATION_WARNINGS
#if QSV_HAVE_EXT_VP9_PARAM
if (avctx->codec_id == AV_CODEC_ID_VP9) {
q->extvp9param.Header.BufferId = MFX_EXTBUFF_VP9_PARAM;
q->extvp9param.Header.BufferSz = sizeof(q->extvp9param);
q->extvp9param.WriteIVFHeaders = MFX_CODINGOPTION_OFF;
q->extparam_internal[q->nb_extparam_internal++] = (mfxExtBuffer *)&q->extvp9param;
q->extvp9param.Header.BufferId = MFX_EXTBUFF_VP9_PARAM;
q->extvp9param.Header.BufferSz = sizeof(q->extvp9param);
q->extvp9param.WriteIVFHeaders = MFX_CODINGOPTION_OFF;
q->extparam_internal[q->nb_extparam_internal++] = (mfxExtBuffer *)&q->extvp9param;
}
#endif
@ -824,7 +824,9 @@ static int qsv_retrieve_enc_vp9_params(AVCodecContext *avctx, QSVEncContext *q)
#endif
mfxExtBuffer *ext_buffers[] = {
#if QSV_HAVE_EXT_VP9_PARAM
(mfxExtBuffer*)&vp9_extend_buf,
#endif
#if QSV_HAVE_CO2
(mfxExtBuffer*)&co2,
#endif

View File

@ -125,7 +125,7 @@ typedef struct QSVEncContext {
mfxExtMultiFrameControl extmfc;
#endif
#if QSV_HAVE_EXT_VP9_PARAM
mfxExtVP9Param extvp9param;
mfxExtVP9Param extvp9param;
#endif
mfxExtOpaqueSurfaceAlloc opaque_alloc;

View File

@ -77,7 +77,7 @@ static av_cold int ra288_decode_init(AVCodecContext *avctx)
avctx->channel_layout = AV_CH_LAYOUT_MONO;
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
if (avctx->block_align <= 0) {
if (avctx->block_align != 38) {
av_log(avctx, AV_LOG_ERROR, "unsupported block align\n");
return AVERROR_PATCHWELCOME;
}

View File

@ -264,8 +264,8 @@ static int decode_channel(RALFContext *ctx, GetBitContext *gb, int ch,
t = get_vlc2(gb, vlc[cmode].table, vlc[cmode].bits, 2);
t = extend_code(gb, t, 21, add_bits);
if (!cmode)
coeff -= 12 << add_bits;
coeff = t - coeff;
coeff -= 12U << add_bits;
coeff = (unsigned)t - coeff;
ctx->filter[i] = coeff;
cmode = coeff >> add_bits;
@ -408,7 +408,7 @@ static int decode_block(AVCodecContext *avctx, GetBitContext *gb,
case 4:
for (i = 0; i < len; i++) {
t = ch1[i] + ctx->bias[1];
t2 = ((ch0[i] + ctx->bias[0]) << 1) | (t & 1);
t2 = ((ch0[i] + ctx->bias[0]) * 2) | (t & 1);
dst0[i] = (t2 + t) / 2;
dst1[i] = (t2 - t) / 2;
}

View File

@ -223,7 +223,7 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
FFALIGN(avctx->width, 16),
avctx->height, 1);
} else {
context->is_lt_16bpp = av_get_bits_per_pixel(desc) == 16 && avctx->bits_per_coded_sample && avctx->bits_per_coded_sample < 16;
context->is_lt_16bpp = av_get_bits_per_pixel(desc) == 16 && avctx->bits_per_coded_sample > 8 && avctx->bits_per_coded_sample < 16;
context->frame_size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width,
avctx->height, 1);
}

View File

@ -473,7 +473,7 @@ static int predictor_calc_error(int *k, int *state, int order, int error)
{
int k_value = *k_ptr, state_value = *state_ptr;
x -= shift_down(k_value * state_value, LATTICE_SHIFT);
state_ptr[1] = state_value + shift_down(k_value * x, LATTICE_SHIFT);
state_ptr[1] = state_value + shift_down(k_value * (unsigned)x, LATTICE_SHIFT);
}
#else
for (i = order-2; i >= 0; i--)

View File

@ -586,10 +586,10 @@ static inline void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int
last[0] = (int)((unsigned)last[1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3])>> 1;
last[2] = (int)((unsigned)last[1] + last[3]) >> 1;
t1 = ctx->D[0] + ctx->D[1];
t1 = ctx->D[0] + (unsigned)ctx->D[1];
ctx->D[0] = t1 >> 1;
ctx->D[1] = t1 - (t1 >> 1);
t2 = ctx->D[2] + ctx->D[3];
t2 = ctx->D[2] + (unsigned)ctx->D[3];
ctx->D[2] = t2 >> 1;
ctx->D[3] = t2 - (t2 >> 1);

View File

@ -255,7 +255,7 @@ static void truespeech_synth(TSContext *dec, int16_t *out, int quart)
int sum = 0;
for(k = 0; k < 8; k++)
sum += ptr0[k] * (unsigned)ptr1[k];
sum = out[i] + ((sum + 0x800) >> 12);
sum = out[i] + ((int)(sum + 0x800U) >> 12);
out[i] = av_clip(sum, -0x7FFE, 0x7FFE);
for(k = 7; k > 0; k--)
ptr0[k] = ptr0[k - 1];

View File

@ -129,7 +129,7 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
s->avctx = avctx;
// 30bytes includes TTA1 header
// 22 bytes for a TTA1 header
if (avctx->extradata_size < 22)
return AVERROR_INVALIDDATA;

View File

@ -28,6 +28,7 @@
#include "libavutil/internal.h"
#include "libavutil/mem.h"
#include "libavutil/intreadwrite.h"
#include "thread.h"
#define READ_PIXELS(a, b, c) \
do { \
@ -37,6 +38,12 @@
*c++ = (val >> 20) & 0x3FF; \
} while (0)
typedef struct ThreadData {
AVFrame *frame;
uint8_t *buf;
int stride;
} ThreadData;
static void v210_planar_unpack_c(const uint32_t *src, uint16_t *y, uint16_t *u, uint16_t *v, int width)
{
uint32_t val;
@ -64,62 +71,29 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
avctx->bits_per_raw_sample = 10;
s->thread_count = av_clip(avctx->thread_count, 1, avctx->height/4);
s->aligned_input = 0;
ff_v210dec_init(s);
return 0;
}
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
static int v210_decode_slice(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
{
V210DecContext *s = avctx->priv_data;
int h, w, ret, stride, aligned_input;
AVFrame *pic = data;
const uint8_t *psrc = avpkt->data;
int h, w;
ThreadData *td = arg;
AVFrame *frame = td->frame;
int stride = td->stride;
int slice_start = (avctx->height * jobnr) / s->thread_count;
int slice_end = (avctx->height * (jobnr+1)) / s->thread_count;
uint8_t *psrc = td->buf + stride * slice_start;
uint16_t *y, *u, *v;
if (s->custom_stride )
stride = s->custom_stride;
else {
int aligned_width = ((avctx->width + 47) / 48) * 48;
stride = aligned_width * 8 / 3;
}
if (avpkt->size < stride * avctx->height) {
if ((((avctx->width + 23) / 24) * 24 * 8) / 3 * avctx->height == avpkt->size) {
stride = avpkt->size / avctx->height;
if (!s->stride_warning_shown)
av_log(avctx, AV_LOG_WARNING, "Broken v210 with too small padding (64 byte) detected\n");
s->stride_warning_shown = 1;
} else {
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
return AVERROR_INVALIDDATA;
}
}
if ( avctx->codec_tag == MKTAG('C', '2', '1', '0')
&& avpkt->size > 64
&& AV_RN32(psrc) == AV_RN32("INFO")
&& avpkt->size - 64 >= stride * avctx->height)
psrc += 64;
aligned_input = !((uintptr_t)psrc & 0x1f) && !(stride & 0x1f);
if (aligned_input != s->aligned_input) {
s->aligned_input = aligned_input;
ff_v210dec_init(s);
}
if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
y = (uint16_t*)pic->data[0];
u = (uint16_t*)pic->data[1];
v = (uint16_t*)pic->data[2];
pic->pict_type = AV_PICTURE_TYPE_I;
pic->key_frame = 1;
for (h = 0; h < avctx->height; h++) {
y = (uint16_t*)frame->data[0] + slice_start * frame->linesize[0] / 2;
u = (uint16_t*)frame->data[1] + slice_start * frame->linesize[1] / 2;
v = (uint16_t*)frame->data[2] + slice_start * frame->linesize[2] / 2;
for (h = slice_start; h < slice_end; h++) {
const uint32_t *src = (const uint32_t*)psrc;
uint32_t val;
@ -155,11 +129,65 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
}
psrc += stride;
y += pic->linesize[0] / 2 - avctx->width + (avctx->width & 1);
u += pic->linesize[1] / 2 - avctx->width / 2;
v += pic->linesize[2] / 2 - avctx->width / 2;
y += frame->linesize[0] / 2 - avctx->width + (avctx->width & 1);
u += frame->linesize[1] / 2 - avctx->width / 2;
v += frame->linesize[2] / 2 - avctx->width / 2;
}
return 0;
}
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
{
V210DecContext *s = avctx->priv_data;
ThreadData td;
int ret, stride, aligned_input;
ThreadFrame frame = { .f = data };
AVFrame *pic = data;
const uint8_t *psrc = avpkt->data;
if (s->custom_stride )
stride = s->custom_stride;
else {
int aligned_width = ((avctx->width + 47) / 48) * 48;
stride = aligned_width * 8 / 3;
}
td.stride = stride;
if (avpkt->size < stride * avctx->height) {
if ((((avctx->width + 23) / 24) * 24 * 8) / 3 * avctx->height == avpkt->size) {
stride = avpkt->size / avctx->height;
if (!s->stride_warning_shown)
av_log(avctx, AV_LOG_WARNING, "Broken v210 with too small padding (64 byte) detected\n");
s->stride_warning_shown = 1;
} else {
av_log(avctx, AV_LOG_ERROR, "packet too small\n");
return AVERROR_INVALIDDATA;
}
}
if ( avctx->codec_tag == MKTAG('C', '2', '1', '0')
&& avpkt->size > 64
&& AV_RN32(psrc) == AV_RN32("INFO")
&& avpkt->size - 64 >= stride * avctx->height)
psrc += 64;
aligned_input = !((uintptr_t)psrc & 0x1f) && !(stride & 0x1f);
if (aligned_input != s->aligned_input) {
s->aligned_input = aligned_input;
ff_v210dec_init(s);
}
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
return ret;
pic->pict_type = AV_PICTURE_TYPE_I;
pic->key_frame = 1;
td.buf = (uint8_t*)psrc;
td.frame = pic;
avctx->execute2(avctx, v210_decode_slice, &td, NULL, s->thread_count);
if (avctx->field_order > AV_FIELD_PROGRESSIVE) {
/* we have interlaced material flagged in container */
pic->interlaced_frame = 1;
@ -194,6 +222,8 @@ AVCodec ff_v210_decoder = {
.priv_data_size = sizeof(V210DecContext),
.init = decode_init,
.decode = decode_frame,
.capabilities = AV_CODEC_CAP_DR1,
.capabilities = AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_FRAME_THREADS,
.priv_class = &v210dec_class,
};

View File

@ -27,6 +27,7 @@ typedef struct {
AVClass *av_class;
int custom_stride;
int aligned_input;
int thread_count;
int stride_warning_shown;
void (*unpack_frame)(const uint32_t *src, uint16_t *y, uint16_t *u, uint16_t *v, int width);
} V210DecContext;

View File

@ -24,6 +24,13 @@
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
#include "internal.h"
#include "thread.h"
typedef struct ThreadData {
AVFrame *frame;
uint8_t *buf;
int stride;
} ThreadData;
static av_cold int v410_decode_init(AVCodecContext *avctx)
{
@ -42,31 +49,24 @@ static av_cold int v410_decode_init(AVCodecContext *avctx)
return 0;
}
static int v410_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
static int v410_decode_slice(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
{
AVFrame *pic = data;
uint8_t *src = avpkt->data;
ThreadData *td = arg;
AVFrame *pic = td->frame;
int stride = td->stride;
int thread_count = av_clip(avctx->thread_count, 1, avctx->height/4);
int slice_start = (avctx->height * jobnr) / thread_count;
int slice_end = (avctx->height * (jobnr+1)) / thread_count;
const uint8_t *src = td->buf + stride * slice_start;
uint16_t *y, *u, *v;
uint32_t val;
int i, j, ret;
int i, j;
if (avpkt->size < 4 * avctx->height * avctx->width) {
av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
return AVERROR(EINVAL);
}
y = (uint16_t*)pic->data[0] + slice_start * (pic->linesize[0] >> 1);
u = (uint16_t*)pic->data[1] + slice_start * (pic->linesize[1] >> 1);
v = (uint16_t*)pic->data[2] + slice_start * (pic->linesize[2] >> 1);
if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
pic->key_frame = 1;
pic->pict_type = AV_PICTURE_TYPE_I;
y = (uint16_t *)pic->data[0];
u = (uint16_t *)pic->data[1];
v = (uint16_t *)pic->data[2];
for (i = 0; i < avctx->height; i++) {
for (i = slice_start; i < slice_end; i++) {
for (j = 0; j < avctx->width; j++) {
val = AV_RL32(src);
@ -82,6 +82,35 @@ static int v410_decode_frame(AVCodecContext *avctx, void *data,
v += pic->linesize[2] >> 1;
}
return 0;
}
static int v410_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
ThreadData td;
ThreadFrame frame = { .f = data };
AVFrame *pic = data;
uint8_t *src = avpkt->data;
int ret;
int thread_count = av_clip(avctx->thread_count, 1, avctx->height/4);
td.stride = avctx->width * 4;
if (avpkt->size < 4 * avctx->height * avctx->width) {
av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
return AVERROR(EINVAL);
}
if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
return ret;
pic->key_frame = 1;
pic->pict_type = AV_PICTURE_TYPE_I;
td.buf = src;
td.frame = pic;
avctx->execute2(avctx, v410_decode_slice, &td, NULL, thread_count);
*got_frame = 1;
return avpkt->size;
@ -94,5 +123,6 @@ AVCodec ff_v410_decoder = {
.id = AV_CODEC_ID_V410,
.init = v410_decode_init,
.decode = v410_decode_frame,
.capabilities = AV_CODEC_CAP_DR1,
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_FRAME_THREADS
};

View File

@ -717,8 +717,8 @@ int ff_v4l2_context_init(V4L2Context* ctx)
ctx->num_buffers = req.count;
ctx->buffers = av_mallocz(ctx->num_buffers * sizeof(V4L2Buffer));
if (!ctx->buffers) {
av_log(logger(ctx), AV_LOG_ERROR, "%s malloc enomem\n", ctx->name);
return AVERROR(ENOMEM);
av_log(logger(ctx), AV_LOG_ERROR, "%s malloc enomem\n", ctx->name);
return AVERROR(ENOMEM);
}
for (i = 0; i < req.count; i++) {

View File

@ -201,6 +201,7 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx)
capture->av_codec_id = AV_CODEC_ID_RAWVIDEO;
capture->av_pix_fmt = avctx->pix_fmt;
s->avctx = avctx;
ret = ff_v4l2_m2m_codec_init(priv);
if (ret) {
av_log(avctx, AV_LOG_ERROR, "can't configure decoder\n");
@ -209,7 +210,6 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx)
return ret;
}
s->avctx = avctx;
return v4l2_prepare_decoder(s);
}

View File

@ -889,7 +889,7 @@ static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
q2 = FFABS(q2) * 2 + ((q2 < 0) ? 0 : v->halfpq) - 1;
if (q2 && q1 != q2) {
for (k = 1; k < 8; k++)
ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
ac_val2[k] = (int)(ac_val2[k] * q2 * (unsigned)ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
for (k = 1; k < 8; k++) {
block[k << sh] = ac_val2[k] * scale;
@ -1036,10 +1036,10 @@ static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
if (q2 && q1 != q2) {
if (dc_pred_dir) { // left
for (k = 1; k < 8; k++)
block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
block[k << v->left_blk_sh] += (int)(ac_val[k] * q2 * (unsigned)ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
} else { //top
for (k = 1; k < 8; k++)
block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
block[k << v->top_blk_sh] += (int)(ac_val[k + 8] * q2 * (unsigned)ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
} else {
if (dc_pred_dir) { // left

View File

@ -28,8 +28,8 @@
#include "libavutil/version.h"
#define LIBAVCODEC_VERSION_MAJOR 58
#define LIBAVCODEC_VERSION_MINOR 62
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_MINOR 64
#define LIBAVCODEC_VERSION_MICRO 101
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \

View File

@ -894,6 +894,14 @@ static int get_cv_color_primaries(AVCodecContext *avctx,
*primaries = NULL;
break;
case AVCOL_PRI_BT470BG:
*primaries = kCVImageBufferColorPrimaries_EBU_3213;
break;
case AVCOL_PRI_SMPTE170M:
*primaries = kCVImageBufferColorPrimaries_SMPTE_C;
break;
case AVCOL_PRI_BT709:
*primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
break;

View File

@ -179,6 +179,9 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
/* drop incomplete chunks */
buf_size = audio_chunks * s->chunk_size;
if (silent_chunks + audio_chunks >= INT_MAX / avctx->block_align)
return AVERROR_INVALIDDATA;
/* get output buffer */
frame->nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) /
avctx->channels;

View File

@ -2715,7 +2715,8 @@ int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
s->next_framep[VP56_FRAME_CURRENT] = curframe;
ff_thread_finish_setup(avctx);
if (avctx->codec->update_thread_context)
ff_thread_finish_setup(avctx);
if (avctx->hwaccel) {
ret = avctx->hwaccel->start_frame(avctx, avpkt->data, avpkt->size);

View File

@ -123,6 +123,7 @@ typedef struct WMACodecContext {
uint8_t last_superframe[MAX_CODED_SUPERFRAME_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; /* padding added */
int last_bitoffset;
int last_superframe_len;
int exponents_initialized;
float noise_table[NOISE_TAB_SIZE];
int noise_index;
float noise_mult; /* XXX: suppress that and integrate it in the noise array */

View File

@ -587,6 +587,9 @@ static int wma_decode_block(WMACodecContext *s)
s->exponents_bsize[ch] = bsize;
}
}
s->exponents_initialized = 1;
}else if (!s->exponents_initialized) {
return AVERROR_INVALIDDATA;
}
/* parse spectral coefficients : just RLE encoding */

View File

@ -1327,6 +1327,7 @@ AVCodec ff_wmalossless_decoder = {
.decode = decode_packet,
.flush = flush,
.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE },

View File

@ -1565,9 +1565,9 @@ static void save_bits(WMAProDecodeCtx *s, GetBitContext* gb, int len,
s->frame_offset = get_bits_count(gb) & 7;
s->num_saved_bits = s->frame_offset;
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
}
buflen = (put_bits_count(&s->pb) + len + 8) >> 3;
buflen = (s->num_saved_bits + len + 7) >> 3;
} else
buflen = (put_bits_count(&s->pb) + len + 7) >> 3;
if (len <= 0 || buflen > MAX_FRAMESIZE) {
avpriv_request_sample(s->avctx, "Too small input buffer");
@ -1644,6 +1644,7 @@ static int decode_packet(AVCodecContext *avctx, WMAProDecodeCtx *s,
if (avctx->codec_id == AV_CODEC_ID_WMAPRO && buf_size < avctx->block_align) {
av_log(avctx, AV_LOG_ERROR, "Input packet too small (%d < %d)\n",
buf_size, avctx->block_align);
s->packet_loss = 1;
return AVERROR_INVALIDDATA;
}
@ -1803,6 +1804,11 @@ static int xma_decode_packet(AVCodecContext *avctx, void *data,
ret = decode_packet(avctx, &s->xma[s->current_stream], s->frames[s->current_stream],
&got_stream_frame_ptr, avpkt);
if (got_stream_frame_ptr && s->offset[s->current_stream] >= 64) {
got_stream_frame_ptr = 0;
ret = AVERROR_INVALIDDATA;
}
/* copy stream samples (1/2ch) to sample buffer (Nch) */
if (got_stream_frame_ptr) {
int start_ch = s->start_channel[s->current_stream];
@ -1930,6 +1936,8 @@ static av_cold int xma_decode_init(AVCodecContext *avctx)
s->start_channel[i] = start_channels;
start_channels += s->xma[i].nb_channels;
}
if (start_channels != avctx->channels)
return AVERROR_INVALIDDATA;
return ret;
}

View File

@ -1523,7 +1523,7 @@ static int synth_frame(AVCodecContext *ctx, GetBitContext *gb, int frame_idx,
/* "pitch-diff-per-sample" for calculation of pitch per sample */
s->pitch_diff_sh16 =
((cur_pitch_val - s->last_pitch_val) << 16) / MAX_FRAMESIZE;
(cur_pitch_val - s->last_pitch_val) * (1 << 16) / MAX_FRAMESIZE;
}
/* Global gain (if silence) and pitch-adaptive window coordinates */

View File

@ -149,6 +149,7 @@ struct decklink_ctx {
int channels;
int audio_depth;
unsigned long tc_seen; // used with option wait_for_tc
};
typedef enum { DIRECTION_IN, DIRECTION_OUT} decklink_direction_t;

View File

@ -58,6 +58,7 @@ struct decklink_cctx {
int copyts;
int64_t timestamp_align;
int timing_offset;
int wait_for_tc;
};
#endif /* AVDEVICE_DECKLINK_COMMON_C_H */

View File

@ -784,6 +784,8 @@ HRESULT decklink_input_callback::VideoInputFrameArrived(
if (packed_metadata) {
if (av_packet_add_side_data(&pkt, AV_PKT_DATA_STRINGS_METADATA, packed_metadata, metadata_len) < 0)
av_freep(&packed_metadata);
else if (!ctx->tc_seen)
ctx->tc_seen = ctx->frameCount;
}
}
}
@ -793,6 +795,14 @@ HRESULT decklink_input_callback::VideoInputFrameArrived(
}
}
if (ctx->tc_format && cctx->wait_for_tc && !ctx->tc_seen) {
av_log(avctx, AV_LOG_WARNING, "No TC detected yet. wait_for_tc set. Dropping. \n");
av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - "
"- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
return S_OK;
}
pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, abs_wallclock, ctx->video_pts_source, ctx->video_st->time_base, &initial_video_pts, cctx->copyts);
pkt.dts = pkt.pts;

View File

@ -85,6 +85,7 @@ static const AVOption options[] = {
{ "audio_depth", "audio bitdepth (16 or 32)", OFFSET(audio_depth), AV_OPT_TYPE_INT, { .i64 = 16}, 16, 32, DEC },
{ "decklink_copyts", "copy timestamps, do not remove the initial offset", OFFSET(copyts), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, DEC },
{ "timestamp_align", "capture start time alignment (in seconds)", OFFSET(timestamp_align), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT_MAX, DEC },
{ "wait_for_tc", "drop frames till a frame with timecode is received. TC format must be set", OFFSET(wait_for_tc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, DEC },
{ NULL },
};

View File

@ -302,9 +302,13 @@ av_cold static int lavfi_read_header(AVFormatContext *avctx)
if (lavfi->dump_graph) {
char *dump = avfilter_graph_dump(lavfi->graph, lavfi->dump_graph);
fputs(dump, stderr);
fflush(stderr);
av_free(dump);
if (dump != NULL) {
fputs(dump, stderr);
fflush(stderr);
av_free(dump);
} else {
FAIL(AVERROR(ENOMEM));
}
}
/* fill each stream with the information in the corresponding sink */

View File

@ -29,7 +29,7 @@
#define LIBAVDEVICE_VERSION_MAJOR 58
#define LIBAVDEVICE_VERSION_MINOR 9
#define LIBAVDEVICE_VERSION_MICRO 100
#define LIBAVDEVICE_VERSION_MICRO 101
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \

View File

@ -146,6 +146,11 @@ static int xcbgrab_reposition(AVFormatContext *s,
return 0;
}
static void xcbgrab_image_reply_free(void *opaque, uint8_t *data)
{
free(opaque);
}
static int xcbgrab_frame(AVFormatContext *s, AVPacket *pkt)
{
XCBGrabContext *c = s->priv_data;
@ -154,7 +159,7 @@ static int xcbgrab_frame(AVFormatContext *s, AVPacket *pkt)
xcb_drawable_t drawable = c->screen->root;
xcb_generic_error_t *e = NULL;
uint8_t *data;
int length, ret;
int length;
iq = xcb_get_image(c->conn, XCB_IMAGE_FORMAT_Z_PIXMAP, drawable,
c->x, c->y, c->width, c->height, ~0);
@ -168,6 +173,7 @@ static int xcbgrab_frame(AVFormatContext *s, AVPacket *pkt)
"sequence:%u resource_id:%u minor_code:%u major_code:%u.\n",
e->response_type, e->error_code,
e->sequence, e->resource_id, e->minor_code, e->major_code);
free(e);
return AVERROR(EACCES);
}
@ -177,14 +183,18 @@ static int xcbgrab_frame(AVFormatContext *s, AVPacket *pkt)
data = xcb_get_image_data(img);
length = xcb_get_image_data_length(img);
ret = av_new_packet(pkt, length);
av_init_packet(pkt);
if (!ret)
memcpy(pkt->data, data, length);
pkt->buf = av_buffer_create(data, length, xcbgrab_image_reply_free, img, 0);
if (!pkt->buf) {
free(img);
return AVERROR(ENOMEM);
}
free(img);
pkt->data = data;
pkt->size = length;
return ret;
return 0;
}
static void wait_frame(AVFormatContext *s, AVPacket *pkt)
@ -276,6 +286,7 @@ static int xcbgrab_frame_shm(AVFormatContext *s, AVPacket *pkt)
e->response_type, e->error_code,
e->sequence, e->resource_id, e->minor_code, e->major_code);
free(e);
return AVERROR(EACCES);
}
@ -537,6 +548,8 @@ static int create_stream(AVFormatContext *s)
gc = xcb_get_geometry(c->conn, c->screen->root);
geo = xcb_get_geometry_reply(c->conn, gc, NULL);
if (!geo)
return AVERROR_EXTERNAL;
if (c->x + c->width > geo->width ||
c->y + c->height > geo->height) {
@ -546,6 +559,7 @@ static int create_stream(AVFormatContext *s)
c->width, c->height,
c->x, c->y,
geo->width, geo->height);
free(geo);
return AVERROR(EINVAL);
}

View File

@ -88,6 +88,7 @@ OBJS-$(CONFIG_ASTATS_FILTER) += af_astats.o
OBJS-$(CONFIG_ASTREAMSELECT_FILTER) += f_streamselect.o framesync.o
OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o
OBJS-$(CONFIG_ATRIM_FILTER) += trim.o
OBJS-$(CONFIG_AXCORRELATE_FILTER) += af_axcorrelate.o
OBJS-$(CONFIG_AZMQ_FILTER) += f_zmq.o
OBJS-$(CONFIG_BANDPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_BANDREJECT_FILTER) += af_biquads.o
@ -289,7 +290,7 @@ OBJS-$(CONFIG_LUMAKEY_FILTER) += vf_lumakey.o
OBJS-$(CONFIG_LUT1D_FILTER) += vf_lut3d.o
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUT2_FILTER) += vf_lut2.o framesync.o
OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o
OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o framesync.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o
OBJS-$(CONFIG_MASKEDCLAMP_FILTER) += vf_maskedclamp.o framesync.o
@ -358,12 +359,12 @@ OBJS-$(CONFIG_ROBERTS_OPENCL_FILTER) += vf_convolution_opencl.o opencl.o
opencl/convolution.o
OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o
OBJS-$(CONFIG_SAB_FILTER) += vf_sab.o
OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o scale.o
OBJS-$(CONFIG_SCALE_CUDA_FILTER) += vf_scale_cuda.o vf_scale_cuda.ptx.o
OBJS-$(CONFIG_SCALE_NPP_FILTER) += vf_scale_npp.o scale.o
OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o scale_eval.o
OBJS-$(CONFIG_SCALE_CUDA_FILTER) += vf_scale_cuda.o vf_scale_cuda.ptx.o scale_eval.o
OBJS-$(CONFIG_SCALE_NPP_FILTER) += vf_scale_npp.o scale_eval.o
OBJS-$(CONFIG_SCALE_QSV_FILTER) += vf_scale_qsv.o
OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o scale.o vaapi_vpp.o
OBJS-$(CONFIG_SCALE2REF_FILTER) += vf_scale.o scale.o
OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o scale_eval.o vaapi_vpp.o
OBJS-$(CONFIG_SCALE2REF_FILTER) += vf_scale.o scale_eval.o
OBJS-$(CONFIG_SCROLL_FILTER) += vf_scroll.o
OBJS-$(CONFIG_SELECT_FILTER) += f_select.o
OBJS-$(CONFIG_SELECTIVECOLOR_FILTER) += vf_selectivecolor.o
@ -442,6 +443,7 @@ OBJS-$(CONFIG_XSTACK_FILTER) += vf_stack.o framesync.o
OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o yadif_common.o
OBJS-$(CONFIG_YADIF_CUDA_FILTER) += vf_yadif_cuda.o vf_yadif_cuda.ptx.o \
yadif_common.o
OBJS-$(CONFIG_YAEPBLUR_FILTER) += vf_yaepblur.o
OBJS-$(CONFIG_ZMQ_FILTER) += f_zmq.o
OBJS-$(CONFIG_ZOOMPAN_FILTER) += vf_zoompan.o
OBJS-$(CONFIG_ZSCALE_FILTER) += vf_zscale.o

View File

@ -585,7 +585,7 @@ static int filter_frame(AVFilterLink *inlink)
out->nb_samples = FFMIN(s->hop_size, s->samples_left);
out->pts = s->pts;
s->pts += s->hop_size;
s->pts += av_rescale_q(s->hop_size, (AVRational){1, outlink->sample_rate}, outlink->time_base);
s->detected_errors += detected_errors;
s->nb_samples += out->nb_samples * inlink->channels;

View File

@ -24,6 +24,7 @@
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "audio.h"
#include "filters.h"
#include "internal.h"
typedef struct AudioEchoContext {
@ -36,6 +37,7 @@ typedef struct AudioEchoContext {
uint8_t **delayptrs;
int max_samples, fade_out;
int *samples;
int eof;
int64_t next_pts;
void (*echo_samples)(struct AudioEchoContext *ctx, uint8_t **delayptrs,
@ -302,42 +304,65 @@ static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioEchoContext *s = ctx->priv;
int ret;
int nb_samples = FFMIN(s->fade_out, 2048);
AVFrame *frame = ff_get_audio_buffer(outlink, nb_samples);
ret = ff_request_frame(ctx->inputs[0]);
if (!frame)
return AVERROR(ENOMEM);
s->fade_out -= nb_samples;
if (ret == AVERROR_EOF && !ctx->is_disabled && s->fade_out) {
int nb_samples = FFMIN(s->fade_out, 2048);
AVFrame *frame;
av_samples_set_silence(frame->extended_data, 0,
frame->nb_samples,
outlink->channels,
frame->format);
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
s->fade_out -= nb_samples;
s->echo_samples(s, s->delayptrs, frame->extended_data, frame->extended_data,
frame->nb_samples, outlink->channels);
av_samples_set_silence(frame->extended_data, 0,
frame->nb_samples,
outlink->channels,
frame->format);
frame->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
s->echo_samples(s, s->delayptrs, frame->extended_data, frame->extended_data,
frame->nb_samples, outlink->channels);
return ff_filter_frame(outlink, frame);
}
frame->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AudioEchoContext *s = ctx->priv;
AVFrame *in;
int ret, status;
int64_t pts;
return ff_filter_frame(outlink, frame);
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
ret = ff_inlink_consume_frame(inlink, &in);
if (ret < 0)
return ret;
if (ret > 0)
return filter_frame(inlink, in);
if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
if (status == AVERROR_EOF)
s->eof = 1;
}
return ret;
if (s->eof && s->fade_out <= 0) {
ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
return 0;
}
if (!s->eof)
FF_FILTER_FORWARD_WANTED(outlink, inlink);
return request_frame(outlink);
}
static const AVFilterPad aecho_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
@ -345,7 +370,6 @@ static const AVFilterPad aecho_inputs[] = {
static const AVFilterPad aecho_outputs[] = {
{
.name = "default",
.request_frame = request_frame,
.config_props = config_output,
.type = AVMEDIA_TYPE_AUDIO,
},
@ -359,6 +383,7 @@ AVFilter ff_af_aecho = {
.priv_size = sizeof(AudioEchoContext),
.priv_class = &aecho_class,
.init = init,
.activate = activate,
.uninit = uninit,
.inputs = aecho_inputs,
.outputs = aecho_outputs,

View File

@ -141,24 +141,25 @@ typedef struct AudioFFTDeNoiseContext {
} AudioFFTDeNoiseContext;
#define OFFSET(x) offsetof(AudioFFTDeNoiseContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define AFR AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption afftdn_options[] = {
{ "nr", "set the noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_FLOAT, {.dbl = 12}, .01, 97, A },
{ "nf", "set the noise floor", OFFSET(noise_floor), AV_OPT_TYPE_FLOAT, {.dbl =-50}, -80,-20, A },
{ "nt", "set the noise type", OFFSET(noise_type), AV_OPT_TYPE_INT, {.i64 = WHITE_NOISE}, WHITE_NOISE, NB_NOISE-1, A, "type" },
{ "w", "white noise", 0, AV_OPT_TYPE_CONST, {.i64 = WHITE_NOISE}, 0, 0, A, "type" },
{ "v", "vinyl noise", 0, AV_OPT_TYPE_CONST, {.i64 = VINYL_NOISE}, 0, 0, A, "type" },
{ "s", "shellac noise", 0, AV_OPT_TYPE_CONST, {.i64 = SHELLAC_NOISE}, 0, 0, A, "type" },
{ "c", "custom noise", 0, AV_OPT_TYPE_CONST, {.i64 = CUSTOM_NOISE}, 0, 0, A, "type" },
{ "bn", "set the custom bands noise", OFFSET(band_noise_str), AV_OPT_TYPE_STRING, {.str = 0}, 0, 0, A },
{ "rf", "set the residual floor", OFFSET(residual_floor), AV_OPT_TYPE_FLOAT, {.dbl =-38}, -80,-20, A },
{ "tn", "track noise", OFFSET(track_noise), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, A },
{ "tr", "track residual", OFFSET(track_residual), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, A },
{ "om", "set output mode", OFFSET(output_mode), AV_OPT_TYPE_INT, {.i64 = OUT_MODE}, 0, NB_MODES-1, A, "mode" },
{ "i", "input", 0, AV_OPT_TYPE_CONST, {.i64 = IN_MODE}, 0, 0, A, "mode" },
{ "o", "output", 0, AV_OPT_TYPE_CONST, {.i64 = OUT_MODE}, 0, 0, A, "mode" },
{ "n", "noise", 0, AV_OPT_TYPE_CONST, {.i64 = NOISE_MODE}, 0, 0, A, "mode" },
{ "nr", "set the noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_FLOAT, {.dbl = 12}, .01, 97, AFR },
{ "nf", "set the noise floor", OFFSET(noise_floor), AV_OPT_TYPE_FLOAT, {.dbl =-50}, -80,-20, AFR },
{ "nt", "set the noise type", OFFSET(noise_type), AV_OPT_TYPE_INT, {.i64 = WHITE_NOISE}, WHITE_NOISE, NB_NOISE-1, AF, "type" },
{ "w", "white noise", 0, AV_OPT_TYPE_CONST, {.i64 = WHITE_NOISE}, 0, 0, AF, "type" },
{ "v", "vinyl noise", 0, AV_OPT_TYPE_CONST, {.i64 = VINYL_NOISE}, 0, 0, AF, "type" },
{ "s", "shellac noise", 0, AV_OPT_TYPE_CONST, {.i64 = SHELLAC_NOISE}, 0, 0, AF, "type" },
{ "c", "custom noise", 0, AV_OPT_TYPE_CONST, {.i64 = CUSTOM_NOISE}, 0, 0, AF, "type" },
{ "bn", "set the custom bands noise", OFFSET(band_noise_str), AV_OPT_TYPE_STRING, {.str = 0}, 0, 0, AF },
{ "rf", "set the residual floor", OFFSET(residual_floor), AV_OPT_TYPE_FLOAT, {.dbl =-38}, -80,-20, AFR },
{ "tn", "track noise", OFFSET(track_noise), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AFR },
{ "tr", "track residual", OFFSET(track_residual), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AFR },
{ "om", "set output mode", OFFSET(output_mode), AV_OPT_TYPE_INT, {.i64 = OUT_MODE}, 0, NB_MODES-1, AFR, "mode" },
{ "i", "input", 0, AV_OPT_TYPE_CONST, {.i64 = IN_MODE}, 0, 0, AFR, "mode" },
{ "o", "output", 0, AV_OPT_TYPE_CONST, {.i64 = OUT_MODE}, 0, 0, AFR, "mode" },
{ "n", "noise", 0, AV_OPT_TYPE_CONST, {.i64 = NOISE_MODE}, 0, 0, AFR, "mode" },
{ NULL }
};
@ -1260,7 +1261,7 @@ static int output_frame(AVFilterLink *inlink)
ret = ff_filter_frame(outlink, out);
if (ret < 0)
goto end;
s->pts += s->sample_advance;
s->pts += av_rescale_q(s->sample_advance, (AVRational){1, outlink->sample_rate}, outlink->time_base);
end:
av_frame_free(&in);
@ -1375,6 +1376,7 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar
{
AudioFFTDeNoiseContext *s = ctx->priv;
int need_reset = 0;
int ret = 0;
if (!strcmp(cmd, "sample_noise") ||
!strcmp(cmd, "sn")) {
@ -1386,31 +1388,11 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar
s->sample_noise_start = 0;
s->sample_noise_end = 1;
}
} else if (!strcmp(cmd, "nr") ||
!strcmp(cmd, "noise_reduction")) {
float nr;
if (av_sscanf(args, "%f", &nr) == 1) {
s->noise_reduction = av_clipf(nr, 0.01, 97);
need_reset = 1;
}
} else if (!strcmp(cmd, "nf") ||
!strcmp(cmd, "noise_floor")) {
float nf;
if (av_sscanf(args, "%f", &nf) == 1) {
s->noise_floor = av_clipf(nf, -80, -20);
need_reset = 1;
}
} else if (!strcmp(cmd, "output_mode") ||
!strcmp(cmd, "om")) {
if (!strcmp(args, "i")) {
s->output_mode = IN_MODE;
} else if (!strcmp(args, "o")) {
s->output_mode = OUT_MODE;
} else if (!strcmp(args, "n")) {
s->output_mode = NOISE_MODE;
}
} else {
ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
if (ret < 0)
return ret;
need_reset = 1;
}
if (need_reset)

View File

@ -315,7 +315,7 @@ static int filter_frame(AVFilterLink *inlink)
}
out->pts = s->pts;
s->pts += s->hop_size;
s->pts += av_rescale_q(s->hop_size, (AVRational){1, outlink->sample_rate}, outlink->time_base);
for (ch = 0; ch < inlink->channels; ch++) {
float *dst = (float *)out->extended_data[ch];

View File

@ -318,7 +318,7 @@ static int activate(AVFilterContext *ctx)
dst = (double *)out->data[0];
out->pts = s->pts;
s->pts += nb_samples;
s->pts += av_rescale_q(nb_samples, (AVRational){1, ctx->outputs[0]->sample_rate}, ctx->outputs[0]->time_base);
gate(s, (double *)in[0]->data[0], dst,
(double *)in[1]->data[0], nb_samples,

Some files were not shown because too many files have changed in this diff Show More