ffmpeg/libavfilter/vf_deinterlace_qsv.c

611 lines
18 KiB
C
Raw Normal View History

2016-07-02 12:12:36 +02:00
/*
* This file is part of FFmpeg.
2016-07-02 12:12:36 +02:00
*
* FFmpeg is free software; you can redistribute it and/or
2016-07-02 12:12:36 +02:00
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
2016-07-02 12:12:36 +02:00
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
2016-07-02 12:12:36 +02:00
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* deinterlace video filter - QSV
*/
#include <mfx/mfxvideo.h>
#include <stdio.h>
#include <string.h>
#include "libavutil/avstring.h"
#include "libavutil/common.h"
#include "libavutil/hwcontext.h"
#include "libavutil/hwcontext_qsv.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/time.h"
#include "libavfilter/qsvvpp.h"
2016-07-02 12:12:36 +02:00
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
#define MFX_IMPL_VIA_MASK(impl) (0x0f00 & (impl))
2016-07-02 12:12:36 +02:00
enum {
QSVDEINT_MORE_OUTPUT = 1,
QSVDEINT_MORE_INPUT,
};
typedef struct QSVDeintContext {
const AVClass *class;
AVBufferRef *hw_frames_ctx;
/* a clone of the main session, used internally for deinterlacing */
mfxSession session;
mfxMemId *mem_ids;
int nb_mem_ids;
mfxFrameSurface1 **surface_ptrs;
int nb_surface_ptrs;
mfxExtOpaqueSurfaceAlloc opaque_alloc;
mfxExtVPPDeinterlacing deint_conf;
mfxExtBuffer *ext_buffers[2];
int num_ext_buffers;
2016-07-02 12:12:36 +02:00
QSVFrame *work_frames;
int64_t last_pts;
int eof;
/* option for Deinterlacing algorithm to be used */
int mode;
2016-07-02 12:12:36 +02:00
} QSVDeintContext;
static av_cold void qsvdeint_uninit(AVFilterContext *ctx)
2016-07-02 12:12:36 +02:00
{
QSVDeintContext *s = ctx->priv;
QSVFrame *cur;
if (s->session) {
MFXClose(s->session);
s->session = NULL;
}
av_buffer_unref(&s->hw_frames_ctx);
cur = s->work_frames;
while (cur) {
s->work_frames = cur->next;
av_frame_free(&cur->frame);
av_freep(&cur);
cur = s->work_frames;
}
av_freep(&s->mem_ids);
s->nb_mem_ids = 0;
av_freep(&s->surface_ptrs);
s->nb_surface_ptrs = 0;
}
static int qsvdeint_query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pixel_formats[] = {
AV_PIX_FMT_QSV, AV_PIX_FMT_NONE,
};
return ff_set_common_formats_from_list(ctx, pixel_formats);
2016-07-02 12:12:36 +02:00
}
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
mfxFrameAllocResponse *resp)
{
AVFilterContext *ctx = pthis;
QSVDeintContext *s = ctx->priv;
if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
!(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
!(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
return MFX_ERR_UNSUPPORTED;
resp->mids = s->mem_ids;
resp->NumFrameActual = s->nb_mem_ids;
return MFX_ERR_NONE;
}
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
{
return MFX_ERR_NONE;
}
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
{
return MFX_ERR_UNSUPPORTED;
}
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
{
return MFX_ERR_UNSUPPORTED;
}
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
{
mfxHDLPair *pair_dst = (mfxHDLPair*)hdl;
mfxHDLPair *pair_src = (mfxHDLPair*)mid;
pair_dst->first = pair_src->first;
if (pair_src->second != (mfxMemId)MFX_INFINITE)
pair_dst->second = pair_src->second;
2016-07-02 12:12:36 +02:00
return MFX_ERR_NONE;
}
static int init_out_session(AVFilterContext *ctx)
{
QSVDeintContext *s = ctx->priv;
AVHWFramesContext *hw_frames_ctx = (AVHWFramesContext*)s->hw_frames_ctx->data;
AVQSVFramesContext *hw_frames_hwctx = hw_frames_ctx->hwctx;
AVQSVDeviceContext *device_hwctx = hw_frames_ctx->device_ctx->hwctx;
int opaque = !!(hw_frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME);
mfxHDL handle = NULL;
mfxHandleType handle_type;
mfxVersion ver;
mfxIMPL impl;
mfxVideoParam par;
mfxStatus err;
int i;
/* extract the properties of the "master" session given to us */
err = MFXQueryIMPL(device_hwctx->session, &impl);
if (err == MFX_ERR_NONE)
err = MFXQueryVersion(device_hwctx->session, &ver);
if (err != MFX_ERR_NONE) {
av_log(ctx, AV_LOG_ERROR, "Error querying the session attributes\n");
return AVERROR_UNKNOWN;
}
if (MFX_IMPL_VIA_VAAPI == MFX_IMPL_VIA_MASK(impl)) {
handle_type = MFX_HANDLE_VA_DISPLAY;
} else if (MFX_IMPL_VIA_D3D11 == MFX_IMPL_VIA_MASK(impl)) {
handle_type = MFX_HANDLE_D3D11_DEVICE;
} else if (MFX_IMPL_VIA_D3D9 == MFX_IMPL_VIA_MASK(impl)) {
handle_type = MFX_HANDLE_D3D9_DEVICE_MANAGER;
} else {
av_log(ctx, AV_LOG_ERROR, "Error unsupported handle type\n");
return AVERROR_UNKNOWN;
2016-07-02 12:12:36 +02:00
}
err = MFXVideoCORE_GetHandle(device_hwctx->session, handle_type, &handle);
if (err < 0)
return ff_qsvvpp_print_error(ctx, err, "Error getting the session handle");
else if (err > 0) {
ff_qsvvpp_print_warning(ctx, err, "Warning in getting the session handle");
return AVERROR_UNKNOWN;
}
2016-07-02 12:12:36 +02:00
/* create a "slave" session with those same properties, to be used for
* actual deinterlacing */
err = MFXInit(impl, &ver, &s->session);
if (err < 0)
return ff_qsvvpp_print_error(ctx, err, "Error initializing a session for deinterlacing");
else if (err > 0) {
ff_qsvvpp_print_warning(ctx, err, "Warning in session initialization");
2016-07-02 12:12:36 +02:00
return AVERROR_UNKNOWN;
}
if (handle) {
err = MFXVideoCORE_SetHandle(s->session, handle_type, handle);
if (err != MFX_ERR_NONE)
return AVERROR_UNKNOWN;
}
if (QSV_RUNTIME_VERSION_ATLEAST(ver, 1, 25)) {
err = MFXJoinSession(device_hwctx->session, s->session);
if (err != MFX_ERR_NONE)
return AVERROR_UNKNOWN;
}
2016-07-02 12:12:36 +02:00
memset(&par, 0, sizeof(par));
s->deint_conf.Header.BufferId = MFX_EXTBUFF_VPP_DEINTERLACING;
s->deint_conf.Header.BufferSz = sizeof(s->deint_conf);
s->deint_conf.Mode = s->mode;
s->ext_buffers[s->num_ext_buffers++] = (mfxExtBuffer *)&s->deint_conf;
2016-07-02 12:12:36 +02:00
if (opaque) {
s->surface_ptrs = av_calloc(hw_frames_hwctx->nb_surfaces,
sizeof(*s->surface_ptrs));
2016-07-02 12:12:36 +02:00
if (!s->surface_ptrs)
return AVERROR(ENOMEM);
for (i = 0; i < hw_frames_hwctx->nb_surfaces; i++)
s->surface_ptrs[i] = hw_frames_hwctx->surfaces + i;
s->nb_surface_ptrs = hw_frames_hwctx->nb_surfaces;
s->opaque_alloc.In.Surfaces = s->surface_ptrs;
s->opaque_alloc.In.NumSurface = s->nb_surface_ptrs;
s->opaque_alloc.In.Type = hw_frames_hwctx->frame_type;
s->opaque_alloc.Out = s->opaque_alloc.In;
s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc);
s->ext_buffers[s->num_ext_buffers++] = (mfxExtBuffer *)&s->opaque_alloc;
2016-07-02 12:12:36 +02:00
par.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
} else {
mfxFrameAllocator frame_allocator = {
.pthis = ctx,
.Alloc = frame_alloc,
.Lock = frame_lock,
.Unlock = frame_unlock,
.GetHDL = frame_get_hdl,
.Free = frame_free,
};
s->mem_ids = av_calloc(hw_frames_hwctx->nb_surfaces,
sizeof(*s->mem_ids));
2016-07-02 12:12:36 +02:00
if (!s->mem_ids)
return AVERROR(ENOMEM);
for (i = 0; i < hw_frames_hwctx->nb_surfaces; i++)
s->mem_ids[i] = hw_frames_hwctx->surfaces[i].Data.MemId;
s->nb_mem_ids = hw_frames_hwctx->nb_surfaces;
err = MFXVideoCORE_SetFrameAllocator(s->session, &frame_allocator);
if (err != MFX_ERR_NONE)
return AVERROR_UNKNOWN;
par.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY | MFX_IOPATTERN_OUT_VIDEO_MEMORY;
}
par.ExtParam = s->ext_buffers;
par.NumExtParam = s->num_ext_buffers;
2016-07-02 12:12:36 +02:00
par.AsyncDepth = 1; // TODO async
par.vpp.In = hw_frames_hwctx->surfaces[0].Info;
par.vpp.In.CropW = ctx->inputs[0]->w;
par.vpp.In.CropH = ctx->inputs[0]->h;
if (ctx->inputs[0]->frame_rate.num) {
par.vpp.In.FrameRateExtN = ctx->inputs[0]->frame_rate.num;
par.vpp.In.FrameRateExtD = ctx->inputs[0]->frame_rate.den;
} else {
par.vpp.In.FrameRateExtN = ctx->inputs[0]->time_base.num;
par.vpp.In.FrameRateExtD = ctx->inputs[0]->time_base.den;
}
par.vpp.Out = par.vpp.In;
if (ctx->outputs[0]->frame_rate.num) {
par.vpp.Out.FrameRateExtN = ctx->outputs[0]->frame_rate.num;
par.vpp.Out.FrameRateExtD = ctx->outputs[0]->frame_rate.den;
} else {
par.vpp.Out.FrameRateExtN = ctx->outputs[0]->time_base.num;
par.vpp.Out.FrameRateExtD = ctx->outputs[0]->time_base.den;
}
/* Print input memory mode */
ff_qsvvpp_print_iopattern(ctx, par.IOPattern & 0x0F, "VPP");
/* Print output memory mode */
ff_qsvvpp_print_iopattern(ctx, par.IOPattern & 0xF0, "VPP");
2016-07-02 12:12:36 +02:00
err = MFXVideoVPP_Init(s->session, &par);
if (err < 0)
return ff_qsvvpp_print_error(ctx, err,
"Error opening the VPP for deinterlacing");
else if (err > 0) {
ff_qsvvpp_print_warning(ctx, err,
"Warning in VPP initialization");
2016-07-02 12:12:36 +02:00
return AVERROR_UNKNOWN;
}
return 0;
}
static int qsvdeint_config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
QSVDeintContext *s = ctx->priv;
int ret;
qsvdeint_uninit(ctx);
s->last_pts = AV_NOPTS_VALUE;
outlink->frame_rate = av_mul_q(inlink->frame_rate,
(AVRational){ 2, 1 });
outlink->time_base = av_mul_q(inlink->time_base,
(AVRational){ 1, 2 });
/* check that we have a hw context */
if (!inlink->hw_frames_ctx) {
av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n");
return AVERROR(EINVAL);
}
s->hw_frames_ctx = av_buffer_ref(inlink->hw_frames_ctx);
if (!s->hw_frames_ctx)
return AVERROR(ENOMEM);
av_buffer_unref(&outlink->hw_frames_ctx);
outlink->hw_frames_ctx = av_buffer_ref(inlink->hw_frames_ctx);
if (!outlink->hw_frames_ctx) {
qsvdeint_uninit(ctx);
return AVERROR(ENOMEM);
}
ret = init_out_session(ctx);
if (ret < 0)
return ret;
return 0;
}
static void clear_unused_frames(QSVDeintContext *s)
{
QSVFrame *cur = s->work_frames;
while (cur) {
if (!cur->surface.Data.Locked) {
av_frame_free(&cur->frame);
lavfi/qsvvpp: support async depth Async depth will allow qsv filter cache few frames, and avoid force switch and end filter task frame by frame. This change will improve performance for some multi-task case, for example 1:N transcode( decode + vpp + encode) with all QSV plugins. Performance data test on my Coffee Lake Desktop(i7-8700K) by using the following 1:8 transcode test case improvement: 1. Fps improved from 55 to 130. 2. Render/Video usage improved from ~61%/~38% to ~100%/~70%.(Data get from intel_gpu_top) test CMD: ffmpeg -v verbose -init_hw_device qsv=hw:/dev/dri/renderD128 -filter_hw_device \ hw -hwaccel qsv -hwaccel_output_format qsv -c:v h264_qsv -i 1920x1080.264 \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - Signed-off-by: Fei Wang <fei.w.wang@intel.com> Reviewed-by: Linjie Fu <linjie.justin.fu@gmail.com> Signed-off-by: Zhong Li <zhongli_dev@126.com>
2021-03-31 04:07:44 +02:00
cur->queued = 0;
2016-07-02 12:12:36 +02:00
}
cur = cur->next;
}
}
static int get_free_frame(QSVDeintContext *s, QSVFrame **f)
{
QSVFrame *frame, **last;
clear_unused_frames(s);
frame = s->work_frames;
last = &s->work_frames;
while (frame) {
lavfi/qsvvpp: support async depth Async depth will allow qsv filter cache few frames, and avoid force switch and end filter task frame by frame. This change will improve performance for some multi-task case, for example 1:N transcode( decode + vpp + encode) with all QSV plugins. Performance data test on my Coffee Lake Desktop(i7-8700K) by using the following 1:8 transcode test case improvement: 1. Fps improved from 55 to 130. 2. Render/Video usage improved from ~61%/~38% to ~100%/~70%.(Data get from intel_gpu_top) test CMD: ffmpeg -v verbose -init_hw_device qsv=hw:/dev/dri/renderD128 -filter_hw_device \ hw -hwaccel qsv -hwaccel_output_format qsv -c:v h264_qsv -i 1920x1080.264 \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - Signed-off-by: Fei Wang <fei.w.wang@intel.com> Reviewed-by: Linjie Fu <linjie.justin.fu@gmail.com> Signed-off-by: Zhong Li <zhongli_dev@126.com>
2021-03-31 04:07:44 +02:00
if (!frame->queued) {
2016-07-02 12:12:36 +02:00
*f = frame;
return 0;
}
last = &frame->next;
frame = frame->next;
}
frame = av_mallocz(sizeof(*frame));
if (!frame)
return AVERROR(ENOMEM);
*last = frame;
*f = frame;
return 0;
}
static int submit_frame(AVFilterContext *ctx, AVFrame *frame,
mfxFrameSurface1 **surface)
{
QSVDeintContext *s = ctx->priv;
QSVFrame *qf;
int ret;
ret = get_free_frame(s, &qf);
if (ret < 0)
return ret;
qf->frame = frame;
qf->surface = *(mfxFrameSurface1*)qf->frame->data[3];
qf->surface.Data.Locked = 0;
qf->surface.Info.CropW = qf->frame->width;
qf->surface.Info.CropH = qf->frame->height;
qf->surface.Info.PicStruct = !qf->frame->interlaced_frame ? MFX_PICSTRUCT_PROGRESSIVE :
(qf->frame->top_field_first ? MFX_PICSTRUCT_FIELD_TFF :
MFX_PICSTRUCT_FIELD_BFF);
if (qf->frame->repeat_pict == 1) {
2016-07-02 12:12:36 +02:00
qf->surface.Info.PicStruct |= MFX_PICSTRUCT_FIELD_REPEATED;
qf->surface.Info.PicStruct |= qf->frame->top_field_first ? MFX_PICSTRUCT_FIELD_TFF :
MFX_PICSTRUCT_FIELD_BFF;
} else if (qf->frame->repeat_pict == 2)
2016-07-02 12:12:36 +02:00
qf->surface.Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING;
else if (qf->frame->repeat_pict == 4)
qf->surface.Info.PicStruct |= MFX_PICSTRUCT_FRAME_TRIPLING;
if (ctx->inputs[0]->frame_rate.num) {
qf->surface.Info.FrameRateExtN = ctx->inputs[0]->frame_rate.num;
qf->surface.Info.FrameRateExtD = ctx->inputs[0]->frame_rate.den;
} else {
qf->surface.Info.FrameRateExtN = ctx->inputs[0]->time_base.num;
qf->surface.Info.FrameRateExtD = ctx->inputs[0]->time_base.den;
}
qf->surface.Data.TimeStamp = av_rescale_q(qf->frame->pts,
ctx->inputs[0]->time_base,
(AVRational){1, 90000});
*surface = &qf->surface;
lavfi/qsvvpp: support async depth Async depth will allow qsv filter cache few frames, and avoid force switch and end filter task frame by frame. This change will improve performance for some multi-task case, for example 1:N transcode( decode + vpp + encode) with all QSV plugins. Performance data test on my Coffee Lake Desktop(i7-8700K) by using the following 1:8 transcode test case improvement: 1. Fps improved from 55 to 130. 2. Render/Video usage improved from ~61%/~38% to ~100%/~70%.(Data get from intel_gpu_top) test CMD: ffmpeg -v verbose -init_hw_device qsv=hw:/dev/dri/renderD128 -filter_hw_device \ hw -hwaccel qsv -hwaccel_output_format qsv -c:v h264_qsv -i 1920x1080.264 \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - \ -vf 'vpp_qsv=w=1280:h=720:async_depth=4' -c:v h264_qsv -r:v 30 -preset 7 -g 33 -refs 2 -bf 3 -q 24 -f null - Signed-off-by: Fei Wang <fei.w.wang@intel.com> Reviewed-by: Linjie Fu <linjie.justin.fu@gmail.com> Signed-off-by: Zhong Li <zhongli_dev@126.com>
2021-03-31 04:07:44 +02:00
qf->queued = 1;
2016-07-02 12:12:36 +02:00
return 0;
}
static int process_frame(AVFilterContext *ctx, const AVFrame *in,
mfxFrameSurface1 *surf_in)
{
QSVDeintContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
mfxFrameSurface1 *surf_out;
mfxSyncPoint sync = NULL;
mfxStatus err;
int ret, again = 0;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
ret = AVERROR(ENOMEM);
2016-07-02 12:12:36 +02:00
goto fail;
}
2016-07-02 12:12:36 +02:00
surf_out = (mfxFrameSurface1*)out->data[3];
surf_out->Info.CropW = outlink->w;
surf_out->Info.CropH = outlink->h;
surf_out->Info.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
do {
err = MFXVideoVPP_RunFrameVPPAsync(s->session, surf_in, surf_out,
NULL, &sync);
if (err == MFX_WRN_DEVICE_BUSY)
av_usleep(1);
} while (err == MFX_WRN_DEVICE_BUSY);
if (err == MFX_ERR_MORE_DATA) {
av_frame_free(&out);
return QSVDEINT_MORE_INPUT;
}
if (err < 0 && err != MFX_ERR_MORE_SURFACE) {
ret = ff_qsvvpp_print_error(ctx, err, "Error during deinterlacing");
goto fail;
}
if (!sync) {
av_log(ctx, AV_LOG_ERROR, "No sync during deinterlacing\n");
2016-07-02 12:12:36 +02:00
ret = AVERROR_UNKNOWN;
goto fail;
}
if (err == MFX_ERR_MORE_SURFACE)
again = 1;
do {
err = MFXVideoCORE_SyncOperation(s->session, sync, 1000);
} while (err == MFX_WRN_IN_EXECUTION);
if (err < 0) {
ret = ff_qsvvpp_print_error(ctx, err, "Error synchronizing the operation");
2016-07-02 12:12:36 +02:00
goto fail;
}
ret = av_frame_copy_props(out, in);
if (ret < 0)
goto fail;
out->width = outlink->w;
out->height = outlink->h;
out->interlaced_frame = 0;
out->pts = av_rescale_q(out->pts, inlink->time_base, outlink->time_base);
if (out->pts == s->last_pts)
out->pts++;
s->last_pts = out->pts;
ret = ff_filter_frame(outlink, out);
if (ret < 0)
return ret;
return again ? QSVDEINT_MORE_OUTPUT : 0;
fail:
av_frame_free(&out);
return ret;
}
static int qsvdeint_filter_frame(AVFilterLink *link, AVFrame *in)
{
AVFilterContext *ctx = link->dst;
mfxFrameSurface1 *surf_in;
int ret;
ret = submit_frame(ctx, in, &surf_in);
if (ret < 0) {
av_frame_free(&in);
return ret;
}
do {
ret = process_frame(ctx, in, surf_in);
if (ret < 0)
return ret;
} while (ret == QSVDEINT_MORE_OUTPUT);
return 0;
}
static int qsvdeint_request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
return ff_request_frame(ctx->inputs[0]);
2016-07-02 12:12:36 +02:00
}
#define OFFSET(x) offsetof(QSVDeintContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
2016-07-02 12:12:36 +02:00
static const AVOption options[] = {
{ "mode", "set deinterlace mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MFX_DEINTERLACING_ADVANCED}, MFX_DEINTERLACING_BOB, MFX_DEINTERLACING_ADVANCED, FLAGS, "mode"},
{ "bob", "bob algorithm", 0, AV_OPT_TYPE_CONST, {.i64 = MFX_DEINTERLACING_BOB}, MFX_DEINTERLACING_BOB, MFX_DEINTERLACING_ADVANCED, FLAGS, "mode"},
{ "advanced", "Motion adaptive algorithm", 0, AV_OPT_TYPE_CONST, {.i64 = MFX_DEINTERLACING_ADVANCED}, MFX_DEINTERLACING_BOB, MFX_DEINTERLACING_ADVANCED, FLAGS, "mode"},
2016-07-02 12:12:36 +02:00
{ NULL },
};
static const AVClass qsvdeint_class = {
.class_name = "deinterlace_qsv",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
static const AVFilterPad qsvdeint_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = qsvdeint_filter_frame,
},
};
static const AVFilterPad qsvdeint_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = qsvdeint_config_props,
.request_frame = qsvdeint_request_frame,
},
};
const AVFilter ff_vf_deinterlace_qsv = {
2016-07-02 12:12:36 +02:00
.name = "deinterlace_qsv",
.description = NULL_IF_CONFIG_SMALL("QuickSync video deinterlacing"),
.uninit = qsvdeint_uninit,
.query_formats = qsvdeint_query_formats,
.priv_size = sizeof(QSVDeintContext),
.priv_class = &qsvdeint_class,
2021-08-12 13:05:31 +02:00
FILTER_INPUTS(qsvdeint_inputs),
FILTER_OUTPUTS(qsvdeint_outputs),
.flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
2016-07-02 12:12:36 +02:00
};