From 933765aa0e07653307ff65a6af159a5922f1dc1d Mon Sep 17 00:00:00 2001 From: Paul B Mahol Date: Thu, 7 Oct 2021 19:33:54 +0200 Subject: [PATCH] avfilter: add xcorrelate video filter --- Changelog | 1 + doc/filters.texi | 18 ++ libavfilter/Makefile | 1 + libavfilter/allfilters.c | 1 + libavfilter/version.h | 2 +- libavfilter/vf_convolve.c | 370 +++++++++++++++++++++++++++++++++----- 6 files changed, 348 insertions(+), 45 deletions(-) diff --git a/Changelog b/Changelog index 76f53128ad..e357473893 100644 --- a/Changelog +++ b/Changelog @@ -27,6 +27,7 @@ version : - asdr audio filter - speex decoder - limitdiff video filter +- xcorrelate video filter version 4.4: diff --git a/doc/filters.texi b/doc/filters.texi index 653760932e..543df2ef8d 100644 --- a/doc/filters.texi +++ b/doc/filters.texi @@ -22563,6 +22563,24 @@ Set the scaling dimension: @code{2} for @code{2xBR}, @code{3} for Default is @code{3}. @end table +@section xcorrelate +Apply normalized cross-correlation between first and second input video stream. + +Second input video stream dimensions must be lower than first input video stream. + +The filter accepts the following options: + +@table @option +@item planes +Set which planes to process. + +@item secondary +Set which secondary video frames will be processed from second input video stream, +can be @var{first} or @var{all}. Default is @var{all}. +@end table + +The @code{xcorrelate} filter also supports the @ref{framesync} options. + @section xfade Apply cross fade from one input video stream to another input video stream. diff --git a/libavfilter/Makefile b/libavfilter/Makefile index ecfe060e1a..7987bf36c6 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -500,6 +500,7 @@ OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o OBJS-$(CONFIG_WAVEFORM_FILTER) += vf_waveform.o OBJS-$(CONFIG_WEAVE_FILTER) += vf_weave.o OBJS-$(CONFIG_XBR_FILTER) += vf_xbr.o +OBJS-$(CONFIG_XCORRELATE_FILTER) += vf_convolve.o framesync.o OBJS-$(CONFIG_XFADE_FILTER) += vf_xfade.o OBJS-$(CONFIG_XFADE_OPENCL_FILTER) += vf_xfade_opencl.o opencl.o opencl/xfade.o OBJS-$(CONFIG_XMEDIAN_FILTER) += vf_xmedian.o framesync.o diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index 2127454aa9..d87c0ed981 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -477,6 +477,7 @@ extern const AVFilter ff_vf_w3fdif; extern const AVFilter ff_vf_waveform; extern const AVFilter ff_vf_weave; extern const AVFilter ff_vf_xbr; +extern const AVFilter ff_vf_xcorrelate; extern const AVFilter ff_vf_xfade; extern const AVFilter ff_vf_xfade_opencl; extern const AVFilter ff_vf_xmedian; diff --git a/libavfilter/version.h b/libavfilter/version.h index dca5aacb45..8df0478465 100644 --- a/libavfilter/version.h +++ b/libavfilter/version.h @@ -30,7 +30,7 @@ #include "libavutil/version.h" #define LIBAVFILTER_VERSION_MAJOR 8 -#define LIBAVFILTER_VERSION_MINOR 13 +#define LIBAVFILTER_VERSION_MINOR 14 #define LIBAVFILTER_VERSION_MICRO 100 diff --git a/libavfilter/vf_convolve.c b/libavfilter/vf_convolve.c index 9d506d49dd..55afb582b4 100644 --- a/libavfilter/vf_convolve.c +++ b/libavfilter/vf_convolve.c @@ -47,6 +47,12 @@ typedef struct ConvolveContext { int planewidth[4]; int planeheight[4]; + int primarywidth[4]; + int primaryheight[4]; + + int secondarywidth[4]; + int secondaryheight[4]; + AVComplexFloat *fft_hdata_in[4]; AVComplexFloat *fft_vdata_in[4]; AVComplexFloat *fft_hdata_out[4]; @@ -63,6 +69,13 @@ typedef struct ConvolveContext { int nb_planes; int got_impulse[4]; + void (*get_input)(struct ConvolveContext *s, AVComplexFloat *fft_hdata, + AVFrame *in, int w, int h, int n, int plane, float scale); + + void (*get_output)(struct ConvolveContext *s, AVComplexFloat *input, AVFrame *out, + int w, int h, int n, int plane, float scale); + void (*prepare_impulse)(AVFilterContext *ctx, AVFrame *impulsepic, int plane); + int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs); } ConvolveContext; @@ -99,21 +112,22 @@ static const enum AVPixelFormat pixel_fmts_fftfilt[] = { AV_PIX_FMT_NONE }; -static int config_input_main(AVFilterLink *inlink) +static int config_input(AVFilterLink *inlink) { ConvolveContext *s = inlink->dst->priv; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); - int i; + const int w = inlink->w; + const int h = inlink->h; - s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w); - s->planewidth[0] = s->planewidth[3] = inlink->w; - s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); - s->planeheight[0] = s->planeheight[3] = inlink->h; + s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(w, desc->log2_chroma_w); + s->planewidth[0] = s->planewidth[3] = w; + s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(h, desc->log2_chroma_h); + s->planeheight[0] = s->planeheight[3] = h; s->nb_planes = desc->nb_components; s->depth = desc->comp[0].depth; - for (i = 0; i < s->nb_planes; i++) { + for (int i = 0; i < s->nb_planes; i++) { int w = s->planewidth[i]; int h = s->planeheight[i]; int n = FFMAX(w, h); @@ -186,6 +200,98 @@ static int fft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_job return 0; } +#define SQR(x) ((x) * (x)) + +static void get_zeropadded_input(ConvolveContext *s, + AVComplexFloat *fft_hdata, + AVFrame *in, int w, int h, + int n, int plane, float scale) +{ + float sum = 0.f; + float mean, dev; + int y, x; + + if (s->depth == 8) { + for (y = 0; y < h; y++) { + const uint8_t *src = in->data[plane] + in->linesize[plane] * y; + + for (x = 0; x < w; x++) + sum += src[x]; + } + + mean = sum / (w * h); + sum = 0.f; + for (y = 0; y < h; y++) { + const uint8_t *src = in->data[plane] + in->linesize[plane] * y; + + for (x = 0; x < w; x++) + sum += SQR(src[x] - mean); + } + + dev = sqrtf(sum / (w * h)); + scale /= dev; + for (y = 0; y < h; y++) { + const uint8_t *src = in->data[plane] + in->linesize[plane] * y; + + for (x = 0; x < w; x++) { + fft_hdata[y * n + x].re = (src[x] - mean) * scale; + fft_hdata[y * n + x].im = 0; + } + + for (x = w; x < n; x++) { + fft_hdata[y * n + x].re = 0; + fft_hdata[y * n + x].im = 0; + } + } + + for (y = h; y < n; y++) { + for (x = 0; x < n; x++) { + fft_hdata[y * n + x].re = 0; + fft_hdata[y * n + x].im = 0; + } + } + } else { + for (y = 0; y < h; y++) { + const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y); + + for (x = 0; x < w; x++) + sum += src[x]; + } + + mean = sum / (w * h); + sum = 0.f; + for (y = 0; y < h; y++) { + const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y); + + for (x = 0; x < w; x++) + sum += SQR(src[x] - mean); + } + + dev = sqrtf(sum / (w * h)); + scale /= dev; + for (y = 0; y < h; y++) { + const uint16_t *src = (const uint16_t *)(in->data[plane] + in->linesize[plane] * y); + + for (x = 0; x < w; x++) { + fft_hdata[y * n + x].re = (src[x] - mean) * scale; + fft_hdata[y * n + x].im = 0; + } + + for (x = w; x < n; x++) { + fft_hdata[y * n + x].re = 0; + fft_hdata[y * n + x].im = 0; + } + } + + for (y = h; y < n; y++) { + for (x = 0; x < n; x++) { + fft_hdata[y * n + x].re = 0; + fft_hdata[y * n + x].im = 0; + } + } + } +} + static void get_input(ConvolveContext *s, AVComplexFloat *fft_hdata, AVFrame *in, int w, int h, int n, int plane, float scale) { @@ -330,6 +436,27 @@ static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jo return 0; } +static void get_xoutput(ConvolveContext *s, AVComplexFloat *input, AVFrame *out, + int w, int h, int n, int plane, float scale) +{ + const int imax = (1 << s->depth) - 1; + + scale *= imax * 16; + if (s->depth == 8) { + for (int y = 0; y < h; y++) { + uint8_t *dst = out->data[plane] + y * out->linesize[plane]; + for (int x = 0; x < w; x++) + dst[x] = av_clip_uint8(input[y * n + x].re * scale); + } + } else { + for (int y = 0; y < h; y++) { + uint16_t *dst = (uint16_t *)(out->data[plane] + y * out->linesize[plane]); + for (int x = 0; x < w; x++) + dst[x] = av_clip(input[y * n + x].re * scale, 0, imax); + } + } +} + static void get_output(ConvolveContext *s, AVComplexFloat *input, AVFrame *out, int w, int h, int n, int plane, float scale) { @@ -414,6 +541,35 @@ static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_j return 0; } +static int complex_xcorrelate(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) +{ + ThreadData *td = arg; + AVComplexFloat *input = td->hdata_in; + AVComplexFloat *filter = td->vdata_in; + const int n = td->n; + const float scale = 1.f / (n * n); + int start = (n * jobnr) / nb_jobs; + int end = (n * (jobnr+1)) / nb_jobs; + + for (int y = start; y < end; y++) { + int yn = y * n; + + for (int x = 0; x < n; x++) { + float re, im, ire, iim; + + re = input[yn + x].re; + im = input[yn + x].im; + ire = filter[yn + x].re * scale; + iim = -filter[yn + x].im * scale; + + input[yn + x].re = ire * re - iim * im; + input[yn + x].im = iim * re + ire * im; + } + } + + return 0; +} + static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) { ConvolveContext *s = ctx->priv; @@ -446,13 +602,82 @@ static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_job return 0; } +static void prepare_impulse(AVFilterContext *ctx, AVFrame *impulsepic, int plane) +{ + ConvolveContext *s = ctx->priv; + const int n = s->fft_len[plane]; + const int w = s->secondarywidth[plane]; + const int h = s->secondaryheight[plane]; + ThreadData td; + float total = 0; + + if (s->depth == 8) { + for (int y = 0; y < h; y++) { + const uint8_t *src = (const uint8_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ; + for (int x = 0; x < w; x++) { + total += src[x]; + } + } + } else { + for (int y = 0; y < h; y++) { + const uint16_t *src = (const uint16_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ; + for (int x = 0; x < w; x++) { + total += src[x]; + } + } + } + total = FFMAX(1, total); + + s->get_input(s, s->fft_hdata_impulse_in[plane], impulsepic, w, h, n, plane, 1.f / total); + + td.n = n; + td.plane = plane; + td.hdata_in = s->fft_hdata_impulse_in[plane]; + td.vdata_in = s->fft_vdata_impulse_in[plane]; + td.hdata_out = s->fft_hdata_impulse_out[plane]; + td.vdata_out = s->fft_vdata_impulse_out[plane]; + + ff_filter_execute(ctx, fft_horizontal, &td, NULL, + FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx))); + ff_filter_execute(ctx, fft_vertical, &td, NULL, + FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx))); + + s->got_impulse[plane] = 1; +} + +static void prepare_secondary(AVFilterContext *ctx, AVFrame *secondary, int plane) +{ + ConvolveContext *s = ctx->priv; + const int n = s->fft_len[plane]; + ThreadData td; + + s->get_input(s, s->fft_hdata_impulse_in[plane], secondary, + s->secondarywidth[plane], + s->secondaryheight[plane], + n, plane, 1.f); + + td.n = n; + td.plane = plane; + td.hdata_in = s->fft_hdata_impulse_in[plane]; + td.vdata_in = s->fft_vdata_impulse_in[plane]; + td.hdata_out = s->fft_hdata_impulse_out[plane]; + td.vdata_out = s->fft_vdata_impulse_out[plane]; + + ff_filter_execute(ctx, fft_horizontal, &td, NULL, + FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx))); + ff_filter_execute(ctx, fft_vertical, &td, NULL, + FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx))); + + s->got_impulse[plane] = 1; +} + static int do_convolve(FFFrameSync *fs) { AVFilterContext *ctx = fs->parent; AVFilterLink *outlink = ctx->outputs[0]; ConvolveContext *s = ctx->priv; AVFrame *mainpic = NULL, *impulsepic = NULL; - int ret, y, x, plane; + int ret, plane; ret = ff_framesync_dualinput_get(fs, &mainpic, &impulsepic); if (ret < 0) @@ -464,9 +689,10 @@ static int do_convolve(FFFrameSync *fs) AVComplexFloat *filter = s->fft_vdata_impulse_out[plane]; AVComplexFloat *input = s->fft_vdata_out[plane]; const int n = s->fft_len[plane]; - const int w = s->planewidth[plane]; - const int h = s->planeheight[plane]; - float total = 0; + const int w = s->primarywidth[plane]; + const int h = s->primaryheight[plane]; + const int ow = s->planewidth[plane]; + const int oh = s->planeheight[plane]; ThreadData td; if (!(s->planes & (1 << plane))) { @@ -474,7 +700,7 @@ static int do_convolve(FFFrameSync *fs) } td.plane = plane, td.n = n; - get_input(s, s->fft_hdata_in[plane], mainpic, w, h, n, plane, 1.f); + s->get_input(s, s->fft_hdata_in[plane], mainpic, w, h, n, plane, 1.f); td.hdata_in = s->fft_hdata_in[plane]; td.vdata_in = s->fft_vdata_in[plane]; @@ -487,36 +713,7 @@ static int do_convolve(FFFrameSync *fs) FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx))); if ((!s->impulse && !s->got_impulse[plane]) || s->impulse) { - if (s->depth == 8) { - for (y = 0; y < h; y++) { - const uint8_t *src = (const uint8_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ; - for (x = 0; x < w; x++) { - total += src[x]; - } - } - } else { - for (y = 0; y < h; y++) { - const uint16_t *src = (const uint16_t *)(impulsepic->data[plane] + y * impulsepic->linesize[plane]) ; - for (x = 0; x < w; x++) { - total += src[x]; - } - } - } - total = FFMAX(1, total); - - get_input(s, s->fft_hdata_impulse_in[plane], impulsepic, w, h, n, plane, 1.f / total); - - td.hdata_in = s->fft_hdata_impulse_in[plane]; - td.vdata_in = s->fft_vdata_impulse_in[plane]; - td.hdata_out = s->fft_hdata_impulse_out[plane]; - td.vdata_out = s->fft_vdata_impulse_out[plane]; - - ff_filter_execute(ctx, fft_horizontal, &td, NULL, - FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx))); - ff_filter_execute(ctx, fft_vertical, &td, NULL, - FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx))); - - s->got_impulse[plane] = 1; + s->prepare_impulse(ctx, impulsepic, plane); } td.hdata_in = input; @@ -539,7 +736,7 @@ static int do_convolve(FFFrameSync *fs) ff_filter_execute(ctx, ifft_horizontal, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx))); - get_output(s, s->fft_hdata_out[plane], mainpic, w, h, n, plane, 1.f / (n * n)); + s->get_output(s, s->fft_hdata_out[plane], mainpic, ow, oh, n, plane, 1.f / (n * n)); } return ff_filter_frame(outlink, mainpic); @@ -547,11 +744,23 @@ static int do_convolve(FFFrameSync *fs) static int config_output(AVFilterLink *outlink) { + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format); AVFilterContext *ctx = outlink->src; ConvolveContext *s = ctx->priv; AVFilterLink *mainlink = ctx->inputs[0]; + AVFilterLink *secondlink = ctx->inputs[1]; int ret, i, j; + s->primarywidth[1] = s->primarywidth[2] = AV_CEIL_RSHIFT(mainlink->w, desc->log2_chroma_w); + s->primarywidth[0] = s->primarywidth[3] = mainlink->w; + s->primaryheight[1] = s->primaryheight[2] = AV_CEIL_RSHIFT(mainlink->h, desc->log2_chroma_h); + s->primaryheight[0] = s->primaryheight[3] = mainlink->h; + + s->secondarywidth[1] = s->secondarywidth[2] = AV_CEIL_RSHIFT(secondlink->w, desc->log2_chroma_w); + s->secondarywidth[0] = s->secondarywidth[3] = secondlink->w; + s->secondaryheight[1] = s->secondaryheight[2] = AV_CEIL_RSHIFT(secondlink->h, desc->log2_chroma_h); + s->secondaryheight[0] = s->secondaryheight[3] = secondlink->h; + s->fs.on_event = do_convolve; ret = ff_framesync_init_dualinput(&s->fs, ctx); if (ret < 0) @@ -593,8 +802,19 @@ static av_cold int init(AVFilterContext *ctx) if (!strcmp(ctx->filter->name, "convolve")) { s->filter = complex_multiply; + s->prepare_impulse = prepare_impulse; + s->get_input = get_input; + s->get_output = get_output; + } else if (!strcmp(ctx->filter->name, "xcorrelate")) { + s->filter = complex_xcorrelate; + s->prepare_impulse = prepare_secondary; + s->get_input = get_zeropadded_input; + s->get_output = get_xoutput; } else if (!strcmp(ctx->filter->name, "deconvolve")) { s->filter = complex_divide; + s->prepare_impulse = prepare_impulse; + s->get_input = get_input; + s->get_output = get_output; } else { return AVERROR_BUG; } @@ -630,7 +850,7 @@ static const AVFilterPad convolve_inputs[] = { { .name = "main", .type = AVMEDIA_TYPE_VIDEO, - .config_props = config_input_main, + .config_props = config_input, },{ .name = "impulse", .type = AVMEDIA_TYPE_VIDEO, @@ -698,3 +918,65 @@ const AVFilter ff_vf_deconvolve = { }; #endif /* CONFIG_DECONVOLVE_FILTER */ + +#if CONFIG_XCORRELATE_FILTER + +static const AVOption xcorrelate_options[] = { + { "planes", "set planes to cross-correlate", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS }, + { "secondary", "when to process secondary frame", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" }, + { "first", "process only first secondary frame, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" }, + { "all", "process all secondary frames", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" }, + { NULL }, +}; + +FRAMESYNC_DEFINE_PURE_CLASS(xcorrelate, "xcorrelate", convolve, xcorrelate_options); + +static int config_input_secondary(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + + if (ctx->inputs[0]->w <= ctx->inputs[1]->w || + ctx->inputs[0]->h <= ctx->inputs[1]->h) { + av_log(ctx, AV_LOG_ERROR, "Width and height of second input videos must be less than first input.\n"); + return AVERROR(EINVAL); + } + + return 0; +} + +static const AVFilterPad xcorrelate_inputs[] = { + { + .name = "primary", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input, + },{ + .name = "secondary", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input_secondary, + }, +}; + +static const AVFilterPad xcorrelate_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + }, +}; + +const AVFilter ff_vf_xcorrelate = { + .name = "xcorrelate", + .description = NULL_IF_CONFIG_SMALL("Cross-correlate first video stream with second video stream."), + .preinit = convolve_framesync_preinit, + .init = init, + .uninit = uninit, + .activate = activate, + .priv_size = sizeof(ConvolveContext), + .priv_class = &xcorrelate_class, + FILTER_INPUTS(xcorrelate_inputs), + FILTER_OUTPUTS(xcorrelate_outputs), + FILTER_PIXFMTS_ARRAY(pixel_fmts_fftfilt), + .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS, +}; + +#endif /* CONFIG_XCORRELATE_FILTER */