vf_libplacebo: switch to newer libplacebo helpers

Support for mapping/unmapping hardware frames has been added into
libplacebo itself, so we can scrap this code in favor of using the new
functions. This has the additional benefit of being forwards-compatible
as support for more complicated frame-related state management is added
to libplacebo (e.g. mapping dolby vision metadata).

It's worth pointing out that, technically, this would also allow
`vf_libplacebo` to accept, practically unmodified, other frame types
(e.g. vaapi or drm), or even software input formats. (Although we still
need a vulkan *device* to be available)

To keep things simple, though, retain the current restriction to vulkan
frames. It's possible we could rethink this in a future commit, but for
now I don't want to introduce any more potentially breaking changes.
This commit is contained in:
Niklas Haas 2021-12-14 14:43:33 +01:00 committed by Lynne
parent df46d7cb49
commit 5317a6366f
2 changed files with 13 additions and 80 deletions

2
configure vendored
View File

@ -6566,7 +6566,7 @@ enabled libopus && {
require_pkg_config libopus opus opus_multistream.h opus_multistream_surround_encoder_create
}
}
enabled libplacebo && require_pkg_config libplacebo "libplacebo >= 4.173.0" libplacebo/vulkan.h pl_vulkan_create
enabled libplacebo && require_pkg_config libplacebo "libplacebo >= 4.184.0" libplacebo/vulkan.h pl_vulkan_create
enabled libpulse && require_pkg_config libpulse libpulse pulse/pulseaudio.h pa_context_new
enabled librabbitmq && require_pkg_config librabbitmq "librabbitmq >= 0.7.1" amqp.h amqp_new_connection
enabled librav1e && require_pkg_config librav1e "rav1e >= 0.4.0" rav1e.h rav1e_context_new

View File

@ -275,64 +275,18 @@ static void libplacebo_uninit(AVFilterContext *avctx)
s->gpu = NULL;
}
static int wrap_vkframe(pl_gpu gpu, const AVFrame *frame, int plane, pl_tex *tex)
{
AVVkFrame *vkf = (AVVkFrame *) frame->data[0];
const AVHWFramesContext *hwfc = (AVHWFramesContext *) frame->hw_frames_ctx->data;
const AVVulkanFramesContext *vkfc = hwfc->hwctx;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(hwfc->sw_format);
const VkFormat *vk_fmt = av_vkfmt_from_pixfmt(hwfc->sw_format);
const int chroma = plane == 1 || plane == 2;
*tex = pl_vulkan_wrap(gpu, pl_vulkan_wrap_params(
.image = vkf->img[plane],
.format = vk_fmt[plane],
.width = AV_CEIL_RSHIFT(frame->width, chroma ? desc->log2_chroma_w : 0),
.height = AV_CEIL_RSHIFT(frame->height, chroma ? desc->log2_chroma_h : 0),
.usage = vkfc->usage,
));
if (!*tex)
return AVERROR(ENOMEM);
pl_vulkan_release(gpu, *tex, vkf->layout[plane], (pl_vulkan_sem) {
.sem = vkf->sem[plane],
.value = vkf->sem_value[plane]
});
return 0;
}
static int unwrap_vkframe(pl_gpu gpu, AVFrame *frame, int plane, pl_tex *tex)
{
AVVkFrame *vkf = (AVVkFrame *) frame->data[0];
int ok = pl_vulkan_hold_raw(gpu, *tex, &vkf->layout[plane],
(pl_vulkan_sem) { vkf->sem[plane], vkf->sem_value[plane] + 1 });
vkf->access[plane] = 0;
vkf->sem_value[plane] += !!ok;
return ok ? 0 : AVERROR_EXTERNAL;
}
static void set_sample_depth(struct pl_frame *out_frame, const AVFrame *frame)
{
const AVHWFramesContext *hwfc = (AVHWFramesContext *) frame->hw_frames_ctx->data;
pl_fmt fmt = out_frame->planes[0].texture->params.format;
struct pl_bit_encoding *bits = &out_frame->repr.bits;
bits->sample_depth = fmt->component_depth[0];
switch (hwfc->sw_format) {
case AV_PIX_FMT_P010: bits->bit_shift = 6; break;
default: break;
}
}
static int process_frames(AVFilterContext *avctx, AVFrame *out, AVFrame *in)
{
int err = 0;
int err = 0, ok;
LibplaceboContext *s = avctx->priv;
struct pl_render_params params;
struct pl_frame image, target;
pl_frame_from_avframe(&image, in);
pl_frame_from_avframe(&target, out);
ok = pl_map_avframe(s->gpu, &image, NULL, in);
ok &= pl_map_avframe(s->gpu, &target, NULL, out);
if (!ok) {
err = AVERROR_EXTERNAL;
goto fail;
}
if (!s->apply_filmgrain)
image.film_grain.type = PL_FILM_GRAIN_NONE;
@ -411,38 +365,17 @@ static int process_frames(AVFilterContext *avctx, AVFrame *out, AVFrame *in)
RET(find_scaler(avctx, &params.upscaler, s->upscaler));
RET(find_scaler(avctx, &params.downscaler, s->downscaler));
/* Ideally, we would persistently wrap all of these AVVkFrames into pl_tex
* objects, but for now we'll just create and destroy a wrapper per frame.
* Note that doing it this way is suboptimal, since it results in the
* creation and destruction of a VkSampler and VkFramebuffer per frame.
*
* FIXME: Can we do better? */
for (int i = 0; i < image.num_planes; i++)
RET(wrap_vkframe(s->gpu, in, i, &image.planes[i].texture));
for (int i = 0; i < target.num_planes; i++)
RET(wrap_vkframe(s->gpu, out, i, &target.planes[i].texture));
/* Since we-re mapping vkframes manually, the pl_frame helpers don't know
* about the mismatch between the sample format and the color depth. */
set_sample_depth(&image, in);
set_sample_depth(&target, out);
pl_render_image(s->renderer, &image, &target, &params);
for (int i = 0; i < image.num_planes; i++)
RET(unwrap_vkframe(s->gpu, in, i, &image.planes[i].texture));
for (int i = 0; i < target.num_planes; i++)
RET(unwrap_vkframe(s->gpu, out, i, &target.planes[i].texture));
pl_unmap_avframe(s->gpu, &image);
pl_unmap_avframe(s->gpu, &target);
/* Flush the command queues for performance */
pl_gpu_flush(s->gpu);
return 0;
/* fall through */
fail:
for (int i = 0; i < image.num_planes; i++)
pl_tex_destroy(s->gpu, &image.planes[i].texture);
for (int i = 0; i < target.num_planes; i++)
pl_tex_destroy(s->gpu, &target.planes[i].texture);
pl_unmap_avframe(s->gpu, &image);
pl_unmap_avframe(s->gpu, &target);
return err;
}