From 62dc5df941f5e196164c151691e4274195523e95 Mon Sep 17 00:00:00 2001 From: Timo Rothenpieler Date: Thu, 24 Jun 2021 01:53:10 +0200 Subject: [PATCH] avfilter/scale_cuda: add support for pixel format conversion --- configure | 2 + doc/filters.texi | 84 +++ libavfilter/version.h | 2 +- libavfilter/vf_scale_cuda.c | 359 +++++----- libavfilter/vf_scale_cuda.cu | 1271 ++++++++++++++++++++++++++++++---- 5 files changed, 1381 insertions(+), 337 deletions(-) diff --git a/configure b/configure index ae357371d0..b124411609 100755 --- a/configure +++ b/configure @@ -6283,6 +6283,8 @@ if [ -z "$nvccflags" ]; then nvccflags=$nvccflags_default fi +nvccflags="$nvccflags -std=c++11" + if enabled x86_64 || enabled ppc64 || enabled aarch64; then nvccflags="$nvccflags -m64" else diff --git a/doc/filters.texi b/doc/filters.texi index da8f7d7726..3368a90877 100644 --- a/doc/filters.texi +++ b/doc/filters.texi @@ -17832,6 +17832,90 @@ If the specified expression is not valid, it is kept at its current value. @end table +@section scale_cuda + +Scale (resize) and convert (pixel format) the input video, using accelerated CUDA kernels. +Setting the output width and height works in the same way as for the @ref{scale} filter. + +The filter accepts the following options: +@table @option +@item w +@item h +Set the output video dimension expression. Default value is the input dimension. + +Allows for the same expressions as the @ref{scale} filter. + +@item interp_algo +Sets the algorithm used for scaling: + +@table @var +@item nearest +Nearest neighbour + +Used by default if input parameters match the desired output. + +@item bilinear +Bilinear + +@item bicubic +Bicubic + +This is the default. + +@item lanczos +Lanczos + +@end table + +@item format +Controls the output pixel format. By default, or if none is specified, the input +pixel format is used. + +The filter does not support converting between YUV and RGB pixel formats. + +@item passthrough +If set to 0, every frame is processed, even if no conversion is neccesary. +This mode can be useful to use the filter as a buffer for a downstream +frame-consumer that exhausts the limited decoder frame pool. + +If set to 1, frames are passed through as-is if they match the desired output +parameters. This is the default behaviour. + +@item param +Algorithm-Specific parameter. + +Affects the curves of the bicubic algorithm. + +@item force_original_aspect_ratio +@item force_divisible_by +Work the same as the identical @ref{scale} filter options. + +@end table + +@subsection Examples + +@itemize +@item +Scale input to 720p, keeping aspect ratio and ensuring the output is yuv420p. +@example +scale_cuda=-2:720:format=yuv420p +@end example + +@item +Upscale to 4K using nearest neighbour algorithm. +@example +scale_cuda=4096:2160:interp_algo=nearest +@end example + +@item +Don't do any conversion or scaling, but copy all input frames into newly allocated ones. +This can be useful to deal with a filter and encode chain that otherwise exhausts the +decoders frame pool. +@example +scale_cuda=passthrough=0 +@end example +@end itemize + @section scale_npp Use the NVIDIA Performance Primitives (libnpp) to perform scaling and/or pixel diff --git a/libavfilter/version.h b/libavfilter/version.h index 5052681653..fbb81ef31c 100644 --- a/libavfilter/version.h +++ b/libavfilter/version.h @@ -31,7 +31,7 @@ #define LIBAVFILTER_VERSION_MAJOR 8 #define LIBAVFILTER_VERSION_MINOR 0 -#define LIBAVFILTER_VERSION_MICRO 102 +#define LIBAVFILTER_VERSION_MICRO 103 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ diff --git a/libavfilter/vf_scale_cuda.c b/libavfilter/vf_scale_cuda.c index a3da4dc0bc..f3ea01634b 100644 --- a/libavfilter/vf_scale_cuda.c +++ b/libavfilter/vf_scale_cuda.c @@ -75,8 +75,11 @@ typedef struct CUDAScaleContext { AVCUDADeviceContext *hwctx; - enum AVPixelFormat in_fmt; - enum AVPixelFormat out_fmt; + enum AVPixelFormat in_fmt, out_fmt; + const AVPixFmtDescriptor *in_desc, *out_desc; + int in_planes, out_planes; + int in_plane_depths[4]; + int in_plane_channels[4]; AVBufferRef *frames_ctx; AVFrame *frame; @@ -97,18 +100,10 @@ typedef struct CUDAScaleContext { CUcontext cu_ctx; CUmodule cu_module; - CUfunction cu_func_uchar; - CUfunction cu_func_uchar2; - CUfunction cu_func_uchar4; - CUfunction cu_func_ushort; - CUfunction cu_func_ushort2; - CUfunction cu_func_ushort4; + CUfunction cu_func; + CUfunction cu_func_uv; CUstream cu_stream; - CUdeviceptr srcBuffer; - CUdeviceptr dstBuffer; - int tex_alignment; - int interp_algo; int interp_use_linear; int interp_as_integer; @@ -120,7 +115,6 @@ static av_cold int cudascale_init(AVFilterContext *ctx) { CUDAScaleContext *s = ctx->priv; - s->format = AV_PIX_FMT_NONE; s->frame = av_frame_alloc(); if (!s->frame) return AVERROR(ENOMEM); @@ -210,6 +204,32 @@ static int format_is_supported(enum AVPixelFormat fmt) return 0; } +static av_cold void set_format_info(AVFilterContext *ctx, enum AVPixelFormat in_format, enum AVPixelFormat out_format) +{ + CUDAScaleContext *s = ctx->priv; + int i, p, d; + + s->in_fmt = in_format; + s->out_fmt = out_format; + + s->in_desc = av_pix_fmt_desc_get(s->in_fmt); + s->out_desc = av_pix_fmt_desc_get(s->out_fmt); + s->in_planes = av_pix_fmt_count_planes(s->in_fmt); + s->out_planes = av_pix_fmt_count_planes(s->out_fmt); + + // find maximum step of each component of each plane + // For our subset of formats, this should accurately tell us how many channels CUDA needs + // i.e. 1 for Y plane, 2 for UV plane of NV12, 4 for single plane of RGB0 formats + + for (i = 0; i < s->in_desc->nb_components; i++) { + d = (s->in_desc->comp[i].depth + 7) / 8; + p = s->in_desc->comp[i].plane; + s->in_plane_channels[p] = FFMAX(s->in_plane_channels[p], s->in_desc->comp[i].step / d); + + s->in_plane_depths[p] = s->in_desc->comp[i].depth; + } +} + static av_cold int init_processing_chain(AVFilterContext *ctx, int in_width, int in_height, int out_width, int out_height) { @@ -241,8 +261,7 @@ static av_cold int init_processing_chain(AVFilterContext *ctx, int in_width, int return AVERROR(ENOSYS); } - s->in_fmt = in_format; - s->out_fmt = out_format; + set_format_info(ctx, in_format, out_format); if (s->passthrough && in_width == out_width && in_height == out_height && in_format == out_format) { s->frames_ctx = av_buffer_ref(ctx->inputs[0]->hw_frames_ctx); @@ -254,6 +273,10 @@ static av_cold int init_processing_chain(AVFilterContext *ctx, int in_width, int ret = init_hwframe_ctx(s, in_frames_ctx->device_ref, out_width, out_height); if (ret < 0) return ret; + + if (in_width == out_width && in_height == out_height && + in_format == out_format && s->interp_algo == INTERP_ALGO_DEFAULT) + s->interp_algo = INTERP_ALGO_NEAREST; } ctx->outputs[0]->hw_frames_ctx = av_buffer_ref(s->frames_ctx); @@ -263,19 +286,17 @@ static av_cold int init_processing_chain(AVFilterContext *ctx, int in_width, int return 0; } -static av_cold int cudascale_config_props(AVFilterLink *outlink) +static av_cold int cudascale_load_functions(AVFilterContext *ctx) { - AVFilterContext *ctx = outlink->src; - AVFilterLink *inlink = outlink->src->inputs[0]; - CUDAScaleContext *s = ctx->priv; - AVHWFramesContext *frames_ctx = (AVHWFramesContext*)inlink->hw_frames_ctx->data; - AVCUDADeviceContext *device_hwctx = frames_ctx->device_ctx->hwctx; - CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx; - CudaFunctions *cu = device_hwctx->internal->cuda_dl; - char buf[64]; - int w, h; + CUDAScaleContext *s = ctx->priv; + CUcontext dummy, cuda_ctx = s->hwctx->cuda_ctx; + CudaFunctions *cu = s->hwctx->internal->cuda_dl; + char buf[128]; int ret; + const char *in_fmt_name = av_get_pix_fmt_name(s->in_fmt); + const char *out_fmt_name = av_get_pix_fmt_name(s->out_fmt); + const char *function_infix = ""; extern const unsigned char ff_vf_scale_cuda_ptx_data[]; @@ -283,23 +304,23 @@ static av_cold int cudascale_config_props(AVFilterLink *outlink) switch(s->interp_algo) { case INTERP_ALGO_NEAREST: - function_infix = "_Nearest"; + function_infix = "Nearest"; s->interp_use_linear = 0; s->interp_as_integer = 1; break; case INTERP_ALGO_BILINEAR: - function_infix = "_Bilinear"; + function_infix = "Bilinear"; s->interp_use_linear = 1; s->interp_as_integer = 1; break; case INTERP_ALGO_DEFAULT: case INTERP_ALGO_BICUBIC: - function_infix = "_Bicubic"; + function_infix = "Bicubic"; s->interp_use_linear = 0; s->interp_as_integer = 0; break; case INTERP_ALGO_LANCZOS: - function_infix = "_Lanczos"; + function_infix = "Lanczos"; s->interp_use_linear = 0; s->interp_as_integer = 0; break; @@ -308,51 +329,47 @@ static av_cold int cudascale_config_props(AVFilterLink *outlink) return AVERROR_BUG; } - s->hwctx = device_hwctx; - s->cu_stream = s->hwctx->stream; - ret = CHECK_CU(cu->cuCtxPushCurrent(cuda_ctx)); if (ret < 0) - goto fail; + return ret; - ret = ff_cuda_load_module(ctx, device_hwctx, &s->cu_module, + ret = ff_cuda_load_module(ctx, s->hwctx, &s->cu_module, ff_vf_scale_cuda_ptx_data, ff_vf_scale_cuda_ptx_len); if (ret < 0) goto fail; - snprintf(buf, sizeof(buf), "Subsample%s_uchar", function_infix); - CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_uchar, s->cu_module, buf)); + snprintf(buf, sizeof(buf), "Subsample_%s_%s_%s", function_infix, in_fmt_name, out_fmt_name); + ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func, s->cu_module, buf)); + if (ret < 0) { + av_log(ctx, AV_LOG_FATAL, "Unsupported conversion: %s -> %s\n", in_fmt_name, out_fmt_name); + ret = AVERROR(ENOSYS); + goto fail; + } + + snprintf(buf, sizeof(buf), "Subsample_%s_%s_%s_uv", function_infix, in_fmt_name, out_fmt_name); + ret = CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_uv, s->cu_module, buf)); if (ret < 0) goto fail; - snprintf(buf, sizeof(buf), "Subsample%s_uchar2", function_infix); - CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_uchar2, s->cu_module, buf)); - if (ret < 0) - goto fail; - - snprintf(buf, sizeof(buf), "Subsample%s_uchar4", function_infix); - CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_uchar4, s->cu_module, buf)); - if (ret < 0) - goto fail; - - snprintf(buf, sizeof(buf), "Subsample%s_ushort", function_infix); - CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_ushort, s->cu_module, buf)); - if (ret < 0) - goto fail; - - snprintf(buf, sizeof(buf), "Subsample%s_ushort2", function_infix); - CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_ushort2, s->cu_module, buf)); - if (ret < 0) - goto fail; - - snprintf(buf, sizeof(buf), "Subsample%s_ushort4", function_infix); - CHECK_CU(cu->cuModuleGetFunction(&s->cu_func_ushort4, s->cu_module, buf)); - if (ret < 0) - goto fail; - - +fail: CHECK_CU(cu->cuCtxPopCurrent(&dummy)); + return ret; +} + +static av_cold int cudascale_config_props(AVFilterLink *outlink) +{ + AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = outlink->src->inputs[0]; + CUDAScaleContext *s = ctx->priv; + AVHWFramesContext *frames_ctx = (AVHWFramesContext*)inlink->hw_frames_ctx->data; + AVCUDADeviceContext *device_hwctx = frames_ctx->device_ctx->hwctx; + int w, h; + int ret; + + s->hwctx = device_hwctx; + s->cu_stream = s->hwctx->stream; + if ((ret = ff_scale_eval_dimensions(s, s->w_expr, s->h_expr, inlink, outlink, @@ -373,9 +390,6 @@ static av_cold int cudascale_config_props(AVFilterLink *outlink) if (ret < 0) return ret; - av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d%s\n", - inlink->w, inlink->h, outlink->w, outlink->h, s->passthrough ? " (passthrough)" : ""); - if (inlink->sample_aspect_ratio.num) { outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w, outlink->w*inlink->h}, @@ -384,154 +398,118 @@ static av_cold int cudascale_config_props(AVFilterLink *outlink) outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; } + av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d fmt:%s%s\n", + inlink->w, inlink->h, av_get_pix_fmt_name(s->in_fmt), + outlink->w, outlink->h, av_get_pix_fmt_name(s->out_fmt), + s->passthrough ? " (passthrough)" : ""); + + ret = cudascale_load_functions(ctx); + if (ret < 0) + return ret; + return 0; fail: return ret; } -static int call_resize_kernel(AVFilterContext *ctx, CUfunction func, int channels, - uint8_t *src_dptr, int src_width, int src_height, int src_pitch, - uint8_t *dst_dptr, int dst_width, int dst_height, int dst_pitch, - int pixel_size, int bit_depth) +static int call_resize_kernel(AVFilterContext *ctx, CUfunction func, + CUtexObject src_tex[4], int src_width, int src_height, + AVFrame *out_frame, int dst_width, int dst_height, int dst_pitch) { CUDAScaleContext *s = ctx->priv; CudaFunctions *cu = s->hwctx->internal->cuda_dl; - CUdeviceptr dst_devptr = (CUdeviceptr)dst_dptr; - CUtexObject tex = 0; - void *args_uchar[] = { &tex, &dst_devptr, &dst_width, &dst_height, &dst_pitch, - &src_width, &src_height, &bit_depth, &s->param }; - int ret; - CUDA_TEXTURE_DESC tex_desc = { - .filterMode = s->interp_use_linear ? - CU_TR_FILTER_MODE_LINEAR : - CU_TR_FILTER_MODE_POINT, - .flags = s->interp_as_integer ? CU_TRSF_READ_AS_INTEGER : 0, + CUdeviceptr dst_devptr[4] = { + (CUdeviceptr)out_frame->data[0], (CUdeviceptr)out_frame->data[1], + (CUdeviceptr)out_frame->data[2], (CUdeviceptr)out_frame->data[3] }; - CUDA_RESOURCE_DESC res_desc = { - .resType = CU_RESOURCE_TYPE_PITCH2D, - .res.pitch2D.format = pixel_size == 1 ? - CU_AD_FORMAT_UNSIGNED_INT8 : - CU_AD_FORMAT_UNSIGNED_INT16, - .res.pitch2D.numChannels = channels, - .res.pitch2D.width = src_width, - .res.pitch2D.height = src_height, - .res.pitch2D.pitchInBytes = src_pitch, - .res.pitch2D.devPtr = (CUdeviceptr)src_dptr, + void *args_uchar[] = { + &src_tex[0], &src_tex[1], &src_tex[2], &src_tex[3], + &dst_devptr[0], &dst_devptr[1], &dst_devptr[2], &dst_devptr[3], + &dst_width, &dst_height, &dst_pitch, + &src_width, &src_height, &s->param }; - // Handling of channels is done via vector-types in cuda, so their size is implicitly part of the pitch - // Same for pixel_size, which is represented via datatypes on the cuda side of things. - dst_pitch /= channels * pixel_size; - - ret = CHECK_CU(cu->cuTexObjectCreate(&tex, &res_desc, &tex_desc, NULL)); - if (ret < 0) - goto exit; - - ret = CHECK_CU(cu->cuLaunchKernel(func, - DIV_UP(dst_width, BLOCKX), DIV_UP(dst_height, BLOCKY), 1, - BLOCKX, BLOCKY, 1, 0, s->cu_stream, args_uchar, NULL)); - -exit: - if (tex) - CHECK_CU(cu->cuTexObjectDestroy(tex)); - - return ret; + return CHECK_CU(cu->cuLaunchKernel(func, + DIV_UP(dst_width, BLOCKX), DIV_UP(dst_height, BLOCKY), 1, + BLOCKX, BLOCKY, 1, 0, s->cu_stream, args_uchar, NULL)); } static int scalecuda_resize(AVFilterContext *ctx, AVFrame *out, AVFrame *in) { - AVHWFramesContext *in_frames_ctx = (AVHWFramesContext*)in->hw_frames_ctx->data; CUDAScaleContext *s = ctx->priv; + CudaFunctions *cu = s->hwctx->internal->cuda_dl; + CUcontext dummy, cuda_ctx = s->hwctx->cuda_ctx; + int i, ret; - switch (in_frames_ctx->sw_format) { - case AV_PIX_FMT_YUV420P: - call_resize_kernel(ctx, s->cu_func_uchar, 1, - in->data[0], in->width, in->height, in->linesize[0], - out->data[0], out->width, out->height, out->linesize[0], - 1, 8); - call_resize_kernel(ctx, s->cu_func_uchar, 1, - in->data[1], in->width / 2, in->height / 2, in->linesize[1], - out->data[1], out->width / 2, out->height / 2, out->linesize[1], - 1, 8); - call_resize_kernel(ctx, s->cu_func_uchar, 1, - in->data[2], in->width / 2, in->height / 2, in->linesize[2], - out->data[2], out->width / 2, out->height / 2, out->linesize[2], - 1, 8); - break; - case AV_PIX_FMT_YUV444P: - call_resize_kernel(ctx, s->cu_func_uchar, 1, - in->data[0], in->width, in->height, in->linesize[0], - out->data[0], out->width, out->height, out->linesize[0], - 1, 8); - call_resize_kernel(ctx, s->cu_func_uchar, 1, - in->data[1], in->width, in->height, in->linesize[1], - out->data[1], out->width, out->height, out->linesize[1], - 1, 8); - call_resize_kernel(ctx, s->cu_func_uchar, 1, - in->data[2], in->width, in->height, in->linesize[2], - out->data[2], out->width, out->height, out->linesize[2], - 1, 8); - break; - case AV_PIX_FMT_YUV444P16: - call_resize_kernel(ctx, s->cu_func_ushort, 1, - in->data[0], in->width, in->height, in->linesize[0], - out->data[0], out->width, out->height, out->linesize[0], - 2, 16); - call_resize_kernel(ctx, s->cu_func_ushort, 1, - in->data[1], in->width, in->height, in->linesize[1], - out->data[1], out->width, out->height, out->linesize[1], - 2, 16); - call_resize_kernel(ctx, s->cu_func_ushort, 1, - in->data[2], in->width, in->height, in->linesize[2], - out->data[2], out->width, out->height, out->linesize[2], - 2, 16); - break; - case AV_PIX_FMT_NV12: - call_resize_kernel(ctx, s->cu_func_uchar, 1, - in->data[0], in->width, in->height, in->linesize[0], - out->data[0], out->width, out->height, out->linesize[0], - 1, 8); - call_resize_kernel(ctx, s->cu_func_uchar2, 2, - in->data[1], in->width / 2, in->height / 2, in->linesize[1], - out->data[1], out->width / 2, out->height / 2, out->linesize[1], - 1, 8); - break; - case AV_PIX_FMT_P010LE: - call_resize_kernel(ctx, s->cu_func_ushort, 1, - in->data[0], in->width, in->height, in->linesize[0], - out->data[0], out->width, out->height, out->linesize[0], - 2, 10); - call_resize_kernel(ctx, s->cu_func_ushort2, 2, - in->data[1], in->width / 2, in->height / 2, in->linesize[1], - out->data[1], out->width / 2, out->height / 2, out->linesize[1], - 2, 10); - break; - case AV_PIX_FMT_P016LE: - call_resize_kernel(ctx, s->cu_func_ushort, 1, - in->data[0], in->width, in->height, in->linesize[0], - out->data[0], out->width, out->height, out->linesize[0], - 2, 16); - call_resize_kernel(ctx, s->cu_func_ushort2, 2, - in->data[1], in->width / 2, in->height / 2, in->linesize[1], - out->data[1], out->width / 2, out->height / 2, out->linesize[1], - 2, 16); - break; - case AV_PIX_FMT_0RGB32: - case AV_PIX_FMT_0BGR32: - call_resize_kernel(ctx, s->cu_func_uchar4, 4, - in->data[0], in->width, in->height, in->linesize[0], - out->data[0], out->width, out->height, out->linesize[0], - 1, 8); - break; - default: - return AVERROR_BUG; + CUtexObject tex[4] = { 0, 0, 0, 0 }; + + ret = CHECK_CU(cu->cuCtxPushCurrent(cuda_ctx)); + if (ret < 0) + return ret; + + for (i = 0; i < s->in_planes; i++) { + CUDA_TEXTURE_DESC tex_desc = { + .filterMode = s->interp_use_linear ? + CU_TR_FILTER_MODE_LINEAR : + CU_TR_FILTER_MODE_POINT, + .flags = s->interp_as_integer ? CU_TRSF_READ_AS_INTEGER : 0, + }; + + CUDA_RESOURCE_DESC res_desc = { + .resType = CU_RESOURCE_TYPE_PITCH2D, + .res.pitch2D.format = s->in_plane_depths[i] <= 8 ? + CU_AD_FORMAT_UNSIGNED_INT8 : + CU_AD_FORMAT_UNSIGNED_INT16, + .res.pitch2D.numChannels = s->in_plane_channels[i], + .res.pitch2D.pitchInBytes = in->linesize[i], + .res.pitch2D.devPtr = (CUdeviceptr)in->data[i], + }; + + if (i == 1 || i == 2) { + res_desc.res.pitch2D.width = AV_CEIL_RSHIFT(in->width, s->in_desc->log2_chroma_w); + res_desc.res.pitch2D.height = AV_CEIL_RSHIFT(in->height, s->in_desc->log2_chroma_h); + } else { + res_desc.res.pitch2D.width = in->width; + res_desc.res.pitch2D.height = in->height; + } + + ret = CHECK_CU(cu->cuTexObjectCreate(&tex[i], &res_desc, &tex_desc, NULL)); + if (ret < 0) + goto exit; } - return 0; + // scale primary plane(s). Usually Y (and A), or single plane of RGB frames. + ret = call_resize_kernel(ctx, s->cu_func, + tex, in->width, in->height, + out, out->width, out->height, out->linesize[0]); + if (ret < 0) + goto exit; + + if (s->out_planes > 1) { + // scale UV plane. Scale function sets both U and V plane, or singular interleaved plane. + ret = call_resize_kernel(ctx, s->cu_func_uv, tex, + AV_CEIL_RSHIFT(in->width, s->in_desc->log2_chroma_w), + AV_CEIL_RSHIFT(in->height, s->in_desc->log2_chroma_h), + out, + AV_CEIL_RSHIFT(out->width, s->out_desc->log2_chroma_w), + AV_CEIL_RSHIFT(out->height, s->out_desc->log2_chroma_h), + out->linesize[1]); + if (ret < 0) + goto exit; + } + +exit: + for (i = 0; i < s->in_planes; i++) + if (tex[i]) + CHECK_CU(cu->cuTexObjectDestroy(tex[i])); + + CHECK_CU(cu->cuCtxPopCurrent(&dummy)); + + return ret; } static int cudascale_scale(AVFilterContext *ctx, AVFrame *out, AVFrame *in) @@ -625,6 +603,7 @@ static const AVOption options[] = { { "bilinear", "bilinear", 0, AV_OPT_TYPE_CONST, { .i64 = INTERP_ALGO_BILINEAR }, 0, 0, FLAGS, "interp_algo" }, { "bicubic", "bicubic", 0, AV_OPT_TYPE_CONST, { .i64 = INTERP_ALGO_BICUBIC }, 0, 0, FLAGS, "interp_algo" }, { "lanczos", "lanczos", 0, AV_OPT_TYPE_CONST, { .i64 = INTERP_ALGO_LANCZOS }, 0, 0, FLAGS, "interp_algo" }, + { "format", "Output video pixel format", OFFSET(format), AV_OPT_TYPE_PIXEL_FMT, { .i64 = AV_PIX_FMT_NONE }, INT_MIN, INT_MAX, .flags=FLAGS }, { "passthrough", "Do not process frames at all if parameters match", OFFSET(passthrough), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS }, { "param", "Algorithm-Specific parameter", OFFSET(param), AV_OPT_TYPE_FLOAT, { .dbl = SCALE_CUDA_PARAM_DEFAULT }, -FLT_MAX, FLT_MAX, FLAGS }, { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, FLAGS, "force_oar" }, diff --git a/libavfilter/vf_scale_cuda.cu b/libavfilter/vf_scale_cuda.cu index 7fda4b74a5..c9c6cafdb6 100644 --- a/libavfilter/vf_scale_cuda.cu +++ b/libavfilter/vf_scale_cuda.cu @@ -23,9 +23,929 @@ #include "cuda/vector_helpers.cuh" #include "vf_scale_cuda.h" +template +using subsample_function_t = T (*)(cudaTextureObject_t tex, int xo, int yo, + int dst_width, int dst_height, + int src_width, int src_height, + int bit_depth, float param); + +// --- CONVERSION LOGIC --- + +static const ushort mask_10bit = 0xFFC0; +static const ushort mask_16bit = 0xFFFF; + +static inline __device__ ushort conv_8to16(uchar in, ushort mask) +{ + return ((ushort)in | ((ushort)in << 8)) & mask; +} + +static inline __device__ uchar conv_16to8(ushort in) +{ + return in >> 8; +} + +static inline __device__ uchar conv_10to8(ushort in) +{ + return in >> 8; +} + +static inline __device__ ushort conv_10to16(ushort in) +{ + return in | (in >> 10); +} + +static inline __device__ ushort conv_16to10(ushort in) +{ + return in & mask_10bit; +} + +#define DEF_F(N, T) \ + template subsample_func_y, \ + subsample_function_t subsample_func_uv> \ + __device__ static inline void N(cudaTextureObject_t src_tex[4], T *dst[4], int xo, int yo, \ + int dst_width, int dst_height, int dst_pitch, \ + int src_width, int src_height, float param) + +#define SUB_F(m, plane) \ + subsample_func_##m(src_tex[plane], xo, yo, \ + dst_width, dst_height, \ + src_width, src_height, \ + in_bit_depth, param) + +// FFmpeg passes pitch in bytes, CUDA uses potentially larger types +#define FIXED_PITCH \ + (dst_pitch/sizeof(*dst[0])) + +#define DEFAULT_DST(n) \ + dst[n][yo*FIXED_PITCH+xo] + +// yuv420p->X + +struct Convert_yuv420p_yuv420p +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar in_T_uv; + typedef uchar out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = SUB_F(uv, 1); + DEFAULT_DST(2) = SUB_F(uv, 2); + } +}; + +struct Convert_yuv420p_nv12 +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar in_T_uv; + typedef uchar out_T; + typedef uchar2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = make_uchar2( + SUB_F(uv, 1), + SUB_F(uv, 2) + ); + } +}; + +struct Convert_yuv420p_yuv444p +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar in_T_uv; + typedef uchar out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = SUB_F(uv, 1); + DEFAULT_DST(2) = SUB_F(uv, 2); + } +}; + +struct Convert_yuv420p_p010le +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar in_T_uv; + typedef ushort out_T; + typedef ushort2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_8to16(SUB_F(y, 0), mask_10bit); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = make_ushort2( + conv_8to16(SUB_F(uv, 1), mask_10bit), + conv_8to16(SUB_F(uv, 2), mask_10bit) + ); + } +}; + +struct Convert_yuv420p_p016le +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar in_T_uv; + typedef ushort out_T; + typedef ushort2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_8to16(SUB_F(y, 0), mask_16bit); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = make_ushort2( + conv_8to16(SUB_F(uv, 1), mask_16bit), + conv_8to16(SUB_F(uv, 2), mask_16bit) + ); + } +}; + +struct Convert_yuv420p_yuv444p16le +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar in_T_uv; + typedef ushort out_T; + typedef ushort out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_8to16(SUB_F(y, 0), mask_16bit); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = conv_8to16(SUB_F(uv, 1), mask_16bit); + DEFAULT_DST(2) = conv_8to16(SUB_F(uv, 2), mask_16bit); + } +}; + +// nv12->X + +struct Convert_nv12_yuv420p +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar2 in_T_uv; + typedef uchar out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = res.x; + DEFAULT_DST(2) = res.y; + } +}; + +struct Convert_nv12_nv12 +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar2 in_T_uv; + typedef uchar out_T; + typedef uchar2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = SUB_F(uv, 1); + } +}; + +struct Convert_nv12_yuv444p +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar2 in_T_uv; + typedef uchar out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = res.x; + DEFAULT_DST(2) = res.y; + } +}; + +struct Convert_nv12_p010le +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar2 in_T_uv; + typedef ushort out_T; + typedef ushort2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_8to16(SUB_F(y, 0), mask_10bit); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = make_ushort2( + conv_8to16(res.x, mask_10bit), + conv_8to16(res.y, mask_10bit) + ); + } +}; + +struct Convert_nv12_p016le +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar2 in_T_uv; + typedef ushort out_T; + typedef ushort2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_8to16(SUB_F(y, 0), mask_16bit); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = make_ushort2( + conv_8to16(res.x, mask_16bit), + conv_8to16(res.y, mask_16bit) + ); + } +}; + +struct Convert_nv12_yuv444p16le +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar2 in_T_uv; + typedef ushort out_T; + typedef ushort out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_8to16(SUB_F(y, 0), mask_16bit); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = conv_8to16(res.x, mask_16bit); + DEFAULT_DST(2) = conv_8to16(res.y, mask_16bit); + } +}; + +// yuv444p->X + +struct Convert_yuv444p_yuv420p +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar in_T_uv; + typedef uchar out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = SUB_F(uv, 1); + DEFAULT_DST(2) = SUB_F(uv, 2); + } +}; + +struct Convert_yuv444p_nv12 +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar in_T_uv; + typedef uchar out_T; + typedef uchar2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = make_uchar2( + SUB_F(uv, 1), + SUB_F(uv, 2) + ); + } +}; + +struct Convert_yuv444p_yuv444p +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar in_T_uv; + typedef uchar out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = SUB_F(uv, 1); + DEFAULT_DST(2) = SUB_F(uv, 2); + } +}; + +struct Convert_yuv444p_p010le +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar in_T_uv; + typedef ushort out_T; + typedef ushort2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_8to16(SUB_F(y, 0), mask_10bit); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = make_ushort2( + conv_8to16(SUB_F(uv, 1), mask_10bit), + conv_8to16(SUB_F(uv, 2), mask_10bit) + ); + } +}; + +struct Convert_yuv444p_p016le +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar in_T_uv; + typedef ushort out_T; + typedef ushort2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_8to16(SUB_F(y, 0), mask_16bit); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = make_ushort2( + conv_8to16(SUB_F(uv, 1), mask_16bit), + conv_8to16(SUB_F(uv, 2), mask_16bit) + ); + } +}; + +struct Convert_yuv444p_yuv444p16le +{ + static const int in_bit_depth = 8; + typedef uchar in_T; + typedef uchar in_T_uv; + typedef ushort out_T; + typedef ushort out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_8to16(SUB_F(y, 0), mask_16bit); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = conv_8to16(SUB_F(uv, 1), mask_16bit); + DEFAULT_DST(2) = conv_8to16(SUB_F(uv, 2), mask_16bit); + } +}; + +// p010le->X + +struct Convert_p010le_yuv420p +{ + static const int in_bit_depth = 10; + typedef ushort in_T; + typedef ushort2 in_T_uv; + typedef uchar out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_10to8(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = conv_10to8(res.x); + DEFAULT_DST(2) = conv_10to8(res.y); + } +}; + +struct Convert_p010le_nv12 +{ + static const int in_bit_depth = 10; + typedef ushort in_T; + typedef ushort2 in_T_uv; + typedef uchar out_T; + typedef uchar2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_10to8(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = make_uchar2( + conv_10to8(res.x), + conv_10to8(res.y) + ); + } +}; + +struct Convert_p010le_yuv444p +{ + static const int in_bit_depth = 10; + typedef ushort in_T; + typedef ushort2 in_T_uv; + typedef uchar out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_10to8(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = conv_10to8(res.x); + DEFAULT_DST(2) = conv_10to8(res.y); + } +}; + +struct Convert_p010le_p010le +{ + static const int in_bit_depth = 10; + typedef ushort in_T; + typedef ushort2 in_T_uv; + typedef ushort out_T; + typedef ushort2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = SUB_F(uv, 1); + } +}; + +struct Convert_p010le_p016le +{ + static const int in_bit_depth = 10; + typedef ushort in_T; + typedef ushort2 in_T_uv; + typedef ushort out_T; + typedef ushort2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_10to16(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = make_ushort2( + conv_10to16(res.x), + conv_10to16(res.y) + ); + } +}; + +struct Convert_p010le_yuv444p16le +{ + static const int in_bit_depth = 10; + typedef ushort in_T; + typedef ushort2 in_T_uv; + typedef ushort out_T; + typedef ushort out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_10to16(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = conv_10to16(res.x); + DEFAULT_DST(2) = conv_10to16(res.y); + } +}; + +// p016le->X + +struct Convert_p016le_yuv420p +{ + static const int in_bit_depth = 16; + typedef ushort in_T; + typedef ushort2 in_T_uv; + typedef uchar out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_16to8(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = conv_16to8(res.x); + DEFAULT_DST(2) = conv_16to8(res.y); + } +}; + +struct Convert_p016le_nv12 +{ + static const int in_bit_depth = 16; + typedef ushort in_T; + typedef ushort2 in_T_uv; + typedef uchar out_T; + typedef uchar2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_16to8(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = make_uchar2( + conv_16to8(res.x), + conv_16to8(res.y) + ); + } +}; + +struct Convert_p016le_yuv444p +{ + static const int in_bit_depth = 16; + typedef ushort in_T; + typedef ushort2 in_T_uv; + typedef uchar out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_16to8(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = conv_16to8(res.x); + DEFAULT_DST(2) = conv_16to8(res.y); + } +}; + +struct Convert_p016le_p010le +{ + static const int in_bit_depth = 16; + typedef ushort in_T; + typedef ushort2 in_T_uv; + typedef ushort out_T; + typedef ushort2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_16to10(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = make_ushort2( + conv_16to10(res.x), + conv_16to10(res.y) + ); + } +}; + +struct Convert_p016le_p016le +{ + static const int in_bit_depth = 16; + typedef ushort in_T; + typedef ushort2 in_T_uv; + typedef ushort out_T; + typedef ushort2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = SUB_F(uv, 1); + } +}; + +struct Convert_p016le_yuv444p16le +{ + static const int in_bit_depth = 16; + typedef ushort in_T; + typedef ushort2 in_T_uv; + typedef ushort out_T; + typedef ushort out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + in_T_uv res = SUB_F(uv, 1); + DEFAULT_DST(1) = res.x; + DEFAULT_DST(2) = res.y; + } +}; + +// yuv444p16le->X + +struct Convert_yuv444p16le_yuv420p +{ + static const int in_bit_depth = 16; + typedef ushort in_T; + typedef ushort in_T_uv; + typedef uchar out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_16to8(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = conv_16to8(SUB_F(uv, 1)); + DEFAULT_DST(2) = conv_16to8(SUB_F(uv, 2)); + } +}; + +struct Convert_yuv444p16le_nv12 +{ + static const int in_bit_depth = 16; + typedef ushort in_T; + typedef ushort in_T_uv; + typedef uchar out_T; + typedef uchar2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_16to8(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = make_uchar2( + conv_16to8(SUB_F(uv, 1)), + conv_16to8(SUB_F(uv, 2)) + ); + } +}; + +struct Convert_yuv444p16le_yuv444p +{ + static const int in_bit_depth = 16; + typedef ushort in_T; + typedef ushort in_T_uv; + typedef uchar out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_16to8(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = conv_16to8(SUB_F(uv, 1)); + DEFAULT_DST(2) = conv_16to8(SUB_F(uv, 2)); + } +}; + +struct Convert_yuv444p16le_p010le +{ + static const int in_bit_depth = 16; + typedef ushort in_T; + typedef ushort in_T_uv; + typedef ushort out_T; + typedef ushort2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = conv_16to10(SUB_F(y, 0)); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = make_ushort2( + conv_16to10(SUB_F(uv, 1)), + conv_16to10(SUB_F(uv, 2)) + ); + } +}; + +struct Convert_yuv444p16le_p016le +{ + static const int in_bit_depth = 16; + typedef ushort in_T; + typedef ushort in_T_uv; + typedef ushort out_T; + typedef ushort2 out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = make_ushort2( + SUB_F(uv, 1), + SUB_F(uv, 2) + ); + } +}; + +struct Convert_yuv444p16le_yuv444p16le +{ + static const int in_bit_depth = 16; + typedef ushort in_T; + typedef ushort in_T_uv; + typedef ushort out_T; + typedef ushort out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + DEFAULT_DST(1) = SUB_F(uv, 1); + DEFAULT_DST(2) = SUB_F(uv, 2); + } +}; + +// bgr0->X + +struct Convert_bgr0_bgr0 +{ + static const int in_bit_depth = 8; + typedef uchar4 in_T; + typedef uchar in_T_uv; + typedef uchar4 out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + } +}; + +struct Convert_bgr0_rgb0 +{ + static const int in_bit_depth = 8; + typedef uchar4 in_T; + typedef uchar in_T_uv; + typedef uchar4 out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + uchar4 res = SUB_F(y, 0); + DEFAULT_DST(0) = make_uchar4( + res.z, + res.y, + res.x, + res.w + ); + } + + DEF_F(Convert_uv, out_T_uv) + { + } +}; + +// rgb0->X + +struct Convert_rgb0_bgr0 +{ + static const int in_bit_depth = 8; + typedef uchar4 in_T; + typedef uchar in_T_uv; + typedef uchar4 out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + uchar4 res = SUB_F(y, 0); + DEFAULT_DST(0) = make_uchar4( + res.z, + res.y, + res.x, + res.w + ); + } + + DEF_F(Convert_uv, out_T_uv) + { + } +}; + +struct Convert_rgb0_rgb0 +{ + static const int in_bit_depth = 8; + typedef uchar4 in_T; + typedef uchar in_T_uv; + typedef uchar4 out_T; + typedef uchar out_T_uv; + + DEF_F(Convert, out_T) + { + DEFAULT_DST(0) = SUB_F(y, 0); + } + + DEF_F(Convert_uv, out_T_uv) + { + } +}; + +// --- SCALING LOGIC --- + typedef float4 (*coeffs_function_t)(float, float); -__device__ inline float4 lanczos_coeffs(float x, float param) +__device__ static inline float4 lanczos_coeffs(float x, float param) { const float pi = 3.141592654f; @@ -47,7 +967,7 @@ __device__ inline float4 lanczos_coeffs(float x, float param) return res / (res.x + res.y + res.z + res.w); } -__device__ inline float4 bicubic_coeffs(float x, float param) +__device__ static inline float4 bicubic_coeffs(float x, float param) { const float A = param == SCALE_CUDA_PARAM_DEFAULT ? 0.0f : -param; @@ -61,7 +981,7 @@ __device__ inline float4 bicubic_coeffs(float x, float param) } template -__device__ inline V apply_coeffs(float4 coeffs, V c0, V c1, V c2, V c3) +__device__ static inline V apply_coeffs(float4 coeffs, V c0, V c1, V c2, V c3) { V res = c0 * coeffs.x; res += c1 * coeffs.y; @@ -72,186 +992,245 @@ __device__ inline V apply_coeffs(float4 coeffs, V c0, V c1, V c2, V c3) } template -__device__ inline void Subsample_Nearest(cudaTextureObject_t tex, - T *dst, - int dst_width, int dst_height, int dst_pitch, - int src_width, int src_height, - int bit_depth) +__device__ static inline T Subsample_Nearest(cudaTextureObject_t tex, + int xo, int yo, + int dst_width, int dst_height, + int src_width, int src_height, + int bit_depth, float param) { - int xo = blockIdx.x * blockDim.x + threadIdx.x; - int yo = blockIdx.y * blockDim.y + threadIdx.y; + float hscale = (float)src_width / (float)dst_width; + float vscale = (float)src_height / (float)dst_height; + float xi = (xo + 0.5f) * hscale; + float yi = (yo + 0.5f) * vscale; - if (yo < dst_height && xo < dst_width) - { - float hscale = (float)src_width / (float)dst_width; - float vscale = (float)src_height / (float)dst_height; - float xi = (xo + 0.5f) * hscale; - float yi = (yo + 0.5f) * vscale; - - dst[yo*dst_pitch+xo] = tex2D(tex, xi, yi); - } + return tex2D(tex, xi, yi); } template -__device__ inline void Subsample_Bilinear(cudaTextureObject_t tex, - T *dst, - int dst_width, int dst_height, int dst_pitch, - int src_width, int src_height, - int bit_depth) +__device__ static inline T Subsample_Bilinear(cudaTextureObject_t tex, + int xo, int yo, + int dst_width, int dst_height, + int src_width, int src_height, + int bit_depth, float param) { - int xo = blockIdx.x * blockDim.x + threadIdx.x; - int yo = blockIdx.y * blockDim.y + threadIdx.y; + float hscale = (float)src_width / (float)dst_width; + float vscale = (float)src_height / (float)dst_height; + float xi = (xo + 0.5f) * hscale; + float yi = (yo + 0.5f) * vscale; + // 3-tap filter weights are {wh,1.0,wh} and {wv,1.0,wv} + float wh = min(max(0.5f * (hscale - 1.0f), 0.0f), 1.0f); + float wv = min(max(0.5f * (vscale - 1.0f), 0.0f), 1.0f); + // Convert weights to two bilinear weights -> {wh,1.0,wh} -> {wh,0.5,0} + {0,0.5,wh} + float dx = wh / (0.5f + wh); + float dy = wv / (0.5f + wv); - if (yo < dst_height && xo < dst_width) - { - float hscale = (float)src_width / (float)dst_width; - float vscale = (float)src_height / (float)dst_height; - float xi = (xo + 0.5f) * hscale; - float yi = (yo + 0.5f) * vscale; - // 3-tap filter weights are {wh,1.0,wh} and {wv,1.0,wv} - float wh = min(max(0.5f * (hscale - 1.0f), 0.0f), 1.0f); - float wv = min(max(0.5f * (vscale - 1.0f), 0.0f), 1.0f); - // Convert weights to two bilinear weights -> {wh,1.0,wh} -> {wh,0.5,0} + {0,0.5,wh} - float dx = wh / (0.5f + wh); - float dy = wv / (0.5f + wv); + intT r; + vec_set_scalar(r, 2); + r += tex2D(tex, xi - dx, yi - dy); + r += tex2D(tex, xi + dx, yi - dy); + r += tex2D(tex, xi - dx, yi + dy); + r += tex2D(tex, xi + dx, yi + dy); - intT r = { 0 }; - vec_set_scalar(r, 2); - r += tex2D(tex, xi - dx, yi - dy); - r += tex2D(tex, xi + dx, yi - dy); - r += tex2D(tex, xi - dx, yi + dy); - r += tex2D(tex, xi + dx, yi + dy); - vec_set(dst[yo*dst_pitch+xo], r >> 2); - } + T res; + vec_set(res, r >> 2); + + return res; } -template -__device__ inline void Subsample_Bicubic(coeffs_function_t coeffs_function, - cudaTextureObject_t tex, - T *dst, - int dst_width, int dst_height, int dst_pitch, - int src_width, int src_height, - int bit_depth, float param) +template +__device__ static inline T Subsample_Bicubic(cudaTextureObject_t tex, + int xo, int yo, + int dst_width, int dst_height, + int src_width, int src_height, + int bit_depth, float param) { - int xo = blockIdx.x * blockDim.x + threadIdx.x; - int yo = blockIdx.y * blockDim.y + threadIdx.y; + float hscale = (float)src_width / (float)dst_width; + float vscale = (float)src_height / (float)dst_height; + float xi = (xo + 0.5f) * hscale - 0.5f; + float yi = (yo + 0.5f) * vscale - 0.5f; + float px = floor(xi); + float py = floor(yi); + float fx = xi - px; + float fy = yi - py; - if (yo < dst_height && xo < dst_width) - { - float hscale = (float)src_width / (float)dst_width; - float vscale = (float)src_height / (float)dst_height; - float xi = (xo + 0.5f) * hscale - 0.5f; - float yi = (yo + 0.5f) * vscale - 0.5f; - float px = floor(xi); - float py = floor(yi); - float fx = xi - px; - float fy = yi - py; + float factor = bit_depth > 8 ? 0xFFFF : 0xFF; - float factor = bit_depth > 8 ? 0xFFFF : 0xFF; - - float4 coeffsX = coeffs_function(fx, param); - float4 coeffsY = coeffs_function(fy, param); + float4 coeffsX = coeffs_function(fx, param); + float4 coeffsY = coeffs_function(fy, param); #define PIX(x, y) tex2D(tex, (x), (y)) - dst[yo * dst_pitch + xo] = from_floatN( - apply_coeffs(coeffsY, - apply_coeffs(coeffsX, PIX(px - 1, py - 1), PIX(px, py - 1), PIX(px + 1, py - 1), PIX(px + 2, py - 1)), - apply_coeffs(coeffsX, PIX(px - 1, py ), PIX(px, py ), PIX(px + 1, py ), PIX(px + 2, py )), - apply_coeffs(coeffsX, PIX(px - 1, py + 1), PIX(px, py + 1), PIX(px + 1, py + 1), PIX(px + 2, py + 1)), - apply_coeffs(coeffsX, PIX(px - 1, py + 2), PIX(px, py + 2), PIX(px + 1, py + 2), PIX(px + 2, py + 2)) - ) * factor - ); + return from_floatN( + apply_coeffs(coeffsY, + apply_coeffs(coeffsX, PIX(px - 1, py - 1), PIX(px, py - 1), PIX(px + 1, py - 1), PIX(px + 2, py - 1)), + apply_coeffs(coeffsX, PIX(px - 1, py ), PIX(px, py ), PIX(px + 1, py ), PIX(px + 2, py )), + apply_coeffs(coeffsX, PIX(px - 1, py + 1), PIX(px, py + 1), PIX(px + 1, py + 1), PIX(px + 2, py + 1)), + apply_coeffs(coeffsX, PIX(px - 1, py + 2), PIX(px, py + 2), PIX(px + 1, py + 2), PIX(px + 2, py + 2)) + ) * factor + ); #undef PIX - } } +/// --- FUNCTION EXPORTS --- + +#define KERNEL_ARGS(T) \ + cudaTextureObject_t src_tex_0, cudaTextureObject_t src_tex_1, \ + cudaTextureObject_t src_tex_2, cudaTextureObject_t src_tex_3, \ + T *dst_0, T *dst_1, T *dst_2, T *dst_3, \ + int dst_width, int dst_height, int dst_pitch, \ + int src_width, int src_height, float param + +#define SUBSAMPLE(Convert, T) \ + cudaTextureObject_t src_tex[4] = \ + { src_tex_0, src_tex_1, src_tex_2, src_tex_3 }; \ + T *dst[4] = { dst_0, dst_1, dst_2, dst_3 }; \ + int xo = blockIdx.x * blockDim.x + threadIdx.x; \ + int yo = blockIdx.y * blockDim.y + threadIdx.y; \ + if (yo >= dst_height || xo >= dst_width) return; \ + Convert( \ + src_tex, dst, xo, yo, \ + dst_width, dst_height, dst_pitch, \ + src_width, src_height, param); + extern "C" { -#define NEAREST_KERNEL(T) \ - __global__ void Subsample_Nearest_ ## T(cudaTextureObject_t src_tex, \ - T *dst, \ - int dst_width, int dst_height, int dst_pitch, \ - int src_width, int src_height, \ - int bit_depth) \ - { \ - Subsample_Nearest(src_tex, dst, \ - dst_width, dst_height, dst_pitch, \ - src_width, src_height, \ - bit_depth); \ +#define NEAREST_KERNEL(C, S) \ + __global__ void Subsample_Nearest_##C##S( \ + KERNEL_ARGS(Convert_##C::out_T##S)) \ + { \ + SUBSAMPLE((Convert_##C::Convert##S< \ + Subsample_Nearest, \ + Subsample_Nearest >), \ + Convert_##C::out_T##S) \ } -NEAREST_KERNEL(uchar) -NEAREST_KERNEL(uchar2) -NEAREST_KERNEL(uchar4) +#define NEAREST_KERNEL_RAW(C) \ + NEAREST_KERNEL(C,) \ + NEAREST_KERNEL(C,_uv) -NEAREST_KERNEL(ushort) -NEAREST_KERNEL(ushort2) -NEAREST_KERNEL(ushort4) +#define NEAREST_KERNELS(C) \ + NEAREST_KERNEL_RAW(yuv420p_ ## C) \ + NEAREST_KERNEL_RAW(nv12_ ## C) \ + NEAREST_KERNEL_RAW(yuv444p_ ## C) \ + NEAREST_KERNEL_RAW(p010le_ ## C) \ + NEAREST_KERNEL_RAW(p016le_ ## C) \ + NEAREST_KERNEL_RAW(yuv444p16le_ ## C) -#define BILINEAR_KERNEL(T) \ - __global__ void Subsample_Bilinear_ ## T(cudaTextureObject_t src_tex, \ - T *dst, \ - int dst_width, int dst_height, int dst_pitch, \ - int src_width, int src_height, \ - int bit_depth) \ - { \ - Subsample_Bilinear(src_tex, dst, \ - dst_width, dst_height, dst_pitch, \ - src_width, src_height, \ - bit_depth); \ +NEAREST_KERNELS(yuv420p) +NEAREST_KERNELS(nv12) +NEAREST_KERNELS(yuv444p) +NEAREST_KERNELS(p010le) +NEAREST_KERNELS(p016le) +NEAREST_KERNELS(yuv444p16le) + +NEAREST_KERNEL_RAW(bgr0_bgr0) +NEAREST_KERNEL_RAW(rgb0_rgb0) +NEAREST_KERNEL_RAW(bgr0_rgb0) +NEAREST_KERNEL_RAW(rgb0_bgr0) + + +#define BILINEAR_KERNEL(C, S) \ + __global__ void Subsample_Bilinear_##C##S( \ + KERNEL_ARGS(Convert_##C::out_T##S)) \ + { \ + SUBSAMPLE((Convert_##C::Convert##S< \ + Subsample_Bilinear, \ + Subsample_Bilinear >), \ + Convert_##C::out_T##S) \ } -BILINEAR_KERNEL(uchar) -BILINEAR_KERNEL(uchar2) -BILINEAR_KERNEL(uchar4) +#define BILINEAR_KERNEL_RAW(C) \ + BILINEAR_KERNEL(C,) \ + BILINEAR_KERNEL(C,_uv) -BILINEAR_KERNEL(ushort) -BILINEAR_KERNEL(ushort2) -BILINEAR_KERNEL(ushort4) +#define BILINEAR_KERNELS(C) \ + BILINEAR_KERNEL_RAW(yuv420p_ ## C) \ + BILINEAR_KERNEL_RAW(nv12_ ## C) \ + BILINEAR_KERNEL_RAW(yuv444p_ ## C) \ + BILINEAR_KERNEL_RAW(p010le_ ## C) \ + BILINEAR_KERNEL_RAW(p016le_ ## C) \ + BILINEAR_KERNEL_RAW(yuv444p16le_ ## C) -#define BICUBIC_KERNEL(T) \ - __global__ void Subsample_Bicubic_ ## T(cudaTextureObject_t src_tex, \ - T *dst, \ - int dst_width, int dst_height, int dst_pitch, \ - int src_width, int src_height, \ - int bit_depth, float param) \ - { \ - Subsample_Bicubic(&bicubic_coeffs, src_tex, dst, \ - dst_width, dst_height, dst_pitch, \ - src_width, src_height, \ - bit_depth, param); \ +BILINEAR_KERNELS(yuv420p) +BILINEAR_KERNELS(nv12) +BILINEAR_KERNELS(yuv444p) +BILINEAR_KERNELS(p010le) +BILINEAR_KERNELS(p016le) +BILINEAR_KERNELS(yuv444p16le) + +BILINEAR_KERNEL_RAW(bgr0_bgr0) +BILINEAR_KERNEL_RAW(rgb0_rgb0) +BILINEAR_KERNEL_RAW(bgr0_rgb0) +BILINEAR_KERNEL_RAW(rgb0_bgr0) + +#define BICUBIC_KERNEL(C, S) \ + __global__ void Subsample_Bicubic_##C##S( \ + KERNEL_ARGS(Convert_##C::out_T##S)) \ + { \ + SUBSAMPLE((Convert_##C::Convert##S< \ + Subsample_Bicubic, \ + Subsample_Bicubic >), \ + Convert_##C::out_T##S) \ } -BICUBIC_KERNEL(uchar) -BICUBIC_KERNEL(uchar2) -BICUBIC_KERNEL(uchar4) +#define BICUBIC_KERNEL_RAW(C) \ + BICUBIC_KERNEL(C,) \ + BICUBIC_KERNEL(C,_uv) -BICUBIC_KERNEL(ushort) -BICUBIC_KERNEL(ushort2) -BICUBIC_KERNEL(ushort4) +#define BICUBIC_KERNELS(C) \ + BICUBIC_KERNEL_RAW(yuv420p_ ## C) \ + BICUBIC_KERNEL_RAW(nv12_ ## C) \ + BICUBIC_KERNEL_RAW(yuv444p_ ## C) \ + BICUBIC_KERNEL_RAW(p010le_ ## C) \ + BICUBIC_KERNEL_RAW(p016le_ ## C) \ + BICUBIC_KERNEL_RAW(yuv444p16le_ ## C) + +BICUBIC_KERNELS(yuv420p) +BICUBIC_KERNELS(nv12) +BICUBIC_KERNELS(yuv444p) +BICUBIC_KERNELS(p010le) +BICUBIC_KERNELS(p016le) +BICUBIC_KERNELS(yuv444p16le) + +BICUBIC_KERNEL_RAW(bgr0_bgr0) +BICUBIC_KERNEL_RAW(rgb0_rgb0) +BICUBIC_KERNEL_RAW(bgr0_rgb0) +BICUBIC_KERNEL_RAW(rgb0_bgr0) -#define LANCZOS_KERNEL(T) \ - __global__ void Subsample_Lanczos_ ## T(cudaTextureObject_t src_tex, \ - T *dst, \ - int dst_width, int dst_height, int dst_pitch, \ - int src_width, int src_height, \ - int bit_depth, float param) \ - { \ - Subsample_Bicubic(&lanczos_coeffs, src_tex, dst, \ - dst_width, dst_height, dst_pitch, \ - src_width, src_height, \ - bit_depth, param); \ +#define LANCZOS_KERNEL(C, S) \ + __global__ void Subsample_Lanczos_##C##S( \ + KERNEL_ARGS(Convert_##C::out_T##S)) \ + { \ + SUBSAMPLE((Convert_##C::Convert##S< \ + Subsample_Bicubic, \ + Subsample_Bicubic >), \ + Convert_##C::out_T##S) \ } -LANCZOS_KERNEL(uchar) -LANCZOS_KERNEL(uchar2) -LANCZOS_KERNEL(uchar4) +#define LANCZOS_KERNEL_RAW(C) \ + LANCZOS_KERNEL(C,) \ + LANCZOS_KERNEL(C,_uv) -LANCZOS_KERNEL(ushort) -LANCZOS_KERNEL(ushort2) -LANCZOS_KERNEL(ushort4) +#define LANCZOS_KERNELS(C) \ + LANCZOS_KERNEL_RAW(yuv420p_ ## C) \ + LANCZOS_KERNEL_RAW(nv12_ ## C) \ + LANCZOS_KERNEL_RAW(yuv444p_ ## C) \ + LANCZOS_KERNEL_RAW(p010le_ ## C) \ + LANCZOS_KERNEL_RAW(p016le_ ## C) \ + LANCZOS_KERNEL_RAW(yuv444p16le_ ## C) + +LANCZOS_KERNELS(yuv420p) +LANCZOS_KERNELS(nv12) +LANCZOS_KERNELS(yuv444p) +LANCZOS_KERNELS(p010le) +LANCZOS_KERNELS(p016le) +LANCZOS_KERNELS(yuv444p16le) + +LANCZOS_KERNEL_RAW(bgr0_bgr0) +LANCZOS_KERNEL_RAW(rgb0_rgb0) +LANCZOS_KERNEL_RAW(bgr0_rgb0) +LANCZOS_KERNEL_RAW(rgb0_bgr0) }