hwcontext_vulkan: add support for allocating all planes in a single allocation

VAAPI on Intel can import external frame, but the planes of the external
frames should be in the same drm object. A new option "contiguous_planes"
is added to device. This flag tells device to allocate places in one
memory. When device is derived from vaapi this flag will be enabled.
A new flag frame_flag is also added to AVVulkanFramesContext. User
can use this flag to force enable or disable this behaviour.
A new variable "offset "is added to AVVKFrame. It describe describe the
offset from the memory currently bound to the VkImage.

Signed-off-by: Wenbin Chen <wenbin.chen@intel.com>
Further-modifications-by: Lynne <dev@lynne.ee>
This commit is contained in:
Wenbin Chen 2021-12-07 17:05:51 +08:00 committed by Lynne
parent f3c9847c27
commit bd6ef73399
2 changed files with 103 additions and 4 deletions

View File

@ -103,8 +103,14 @@ typedef struct VulkanDevicePriv {
/* Settings */
int use_linear_images;
/* Option to allocate all image planes in a single allocation */
int contiguous_planes;
/* Nvidia */
int dev_is_nvidia;
/* Intel */
int dev_is_intel;
} VulkanDevicePriv;
typedef struct VulkanFramesPriv {
@ -1374,6 +1380,12 @@ static int vulkan_device_create_internal(AVHWDeviceContext *ctx,
if (opt_d)
p->use_linear_images = strtol(opt_d->value, NULL, 10);
opt_d = av_dict_get(opts, "contiguous_planes", NULL, 0);
if (opt_d)
p->contiguous_planes = strtol(opt_d->value, NULL, 10);
else
p->contiguous_planes = -1;
hwctx->enabled_dev_extensions = dev_info.ppEnabledExtensionNames;
hwctx->nb_enabled_dev_extensions = dev_info.enabledExtensionCount;
@ -1424,6 +1436,7 @@ static int vulkan_device_init(AVHWDeviceContext *ctx)
p->hprops.minImportedHostPointerAlignment);
p->dev_is_nvidia = (p->props.properties.vendorID == 0x10de);
p->dev_is_intel = (p->props.properties.vendorID == 0x8086);
vk->GetPhysicalDeviceQueueFamilyProperties(hwctx->phys_dev, &queue_num, NULL);
if (!queue_num) {
@ -1742,9 +1755,14 @@ static int alloc_bind_mem(AVHWFramesContext *hwfc, AVVkFrame *f,
AVHWDeviceContext *ctx = hwfc->device_ctx;
VulkanDevicePriv *p = ctx->internal->priv;
FFVulkanFunctions *vk = &p->vkfn;
AVVulkanFramesContext *hwfctx = hwfc->hwctx;
const int planes = av_pix_fmt_count_planes(hwfc->sw_format);
VkBindImageMemoryInfo bind_info[AV_NUM_DATA_POINTERS] = { { 0 } };
VkMemoryRequirements cont_memory_requirements = { 0 };
int cont_mem_size_list[AV_NUM_DATA_POINTERS] = { 0 };
int cont_mem_size = 0;
AVVulkanDeviceContext *hwctx = ctx->hwctx;
for (int i = 0; i < planes; i++) {
@ -1771,6 +1789,27 @@ static int alloc_bind_mem(AVHWFramesContext *hwfc, AVVkFrame *f,
req.memoryRequirements.size = FFALIGN(req.memoryRequirements.size,
p->props.properties.limits.minMemoryMapAlignment);
if (hwfctx->flags & AV_VK_FRAME_FLAG_CONTIGUOUS_MEMORY) {
if (ded_req.requiresDedicatedAllocation) {
av_log(hwfc, AV_LOG_ERROR, "Cannot allocate all planes in a single allocation, "
"device requires dedicated image allocation!\n");
return AVERROR(EINVAL);
} else if (!i) {
cont_memory_requirements = req.memoryRequirements;
} else if (cont_memory_requirements.memoryTypeBits !=
req.memoryRequirements.memoryTypeBits) {
av_log(hwfc, AV_LOG_ERROR, "The memory requirements differ between plane 0 "
"and %i, cannot allocate in a single region!\n",
i);
return AVERROR(EINVAL);
}
cont_mem_size_list[i] = FFALIGN(req.memoryRequirements.size,
req.memoryRequirements.alignment);
cont_mem_size += cont_mem_size_list[i];
continue;
}
/* In case the implementation prefers/requires dedicated allocation */
use_ded_mem = ded_req.prefersDedicatedAllocation |
ded_req.requiresDedicatedAllocation;
@ -1792,6 +1831,29 @@ static int alloc_bind_mem(AVHWFramesContext *hwfc, AVVkFrame *f,
bind_info[i].memory = f->mem[i];
}
if (hwfctx->flags & AV_VK_FRAME_FLAG_CONTIGUOUS_MEMORY) {
cont_memory_requirements.size = cont_mem_size;
/* Allocate memory */
if ((err = alloc_mem(ctx, &cont_memory_requirements,
f->tiling == VK_IMAGE_TILING_LINEAR ?
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
(void *)(((uint8_t *)alloc_pnext)),
&f->flags, &f->mem[0])))
return err;
f->size[0] = cont_memory_requirements.size;
for (int i = 0; i < planes; i++) {
bind_info[i].sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bind_info[i].image = f->img[i];
bind_info[i].memory = f->mem[0];
bind_info[i].memoryOffset = !i ? 0 : cont_mem_size_list[i - 1];
f->offset[i] = bind_info[i].memoryOffset;
}
}
/* Bind the allocated memory to the images */
ret = vk->BindImageMemory2(hwctx->act_dev, planes, bind_info);
if (ret != VK_SUCCESS) {
@ -2154,6 +2216,12 @@ static int vulkan_frames_init(AVHWFramesContext *hwfc)
if (!hwctx->usage)
hwctx->usage = FF_VK_DEFAULT_USAGE_FLAGS;
if (!(hwctx->flags & AV_VK_FRAME_FLAG_NONE)) {
if (p->contiguous_planes == 1 ||
((p->contiguous_planes == -1) && p->dev_is_intel))
hwctx->flags |= AV_VK_FRAME_FLAG_CONTIGUOUS_MEMORY;
}
err = create_exec_ctx(hwfc, &fp->conv_ctx,
dev_hwctx->queue_family_comp_index,
dev_hwctx->nb_comp_queues);
@ -3074,6 +3142,7 @@ static int vulkan_map_to_drm(AVHWFramesContext *hwfc, AVFrame *dst,
FFVulkanFunctions *vk = &p->vkfn;
VulkanFramesPriv *fp = hwfc->internal->priv;
AVVulkanDeviceContext *hwctx = hwfc->device_ctx->hwctx;
AVVulkanFramesContext *hwfctx = hwfc->hwctx;
const int planes = av_pix_fmt_count_planes(hwfc->sw_format);
VkImageDrmFormatModifierPropertiesEXT drm_mod = {
.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT,
@ -3142,8 +3211,11 @@ static int vulkan_map_to_drm(AVHWFramesContext *hwfc, AVFrame *dst,
continue;
vk->GetImageSubresourceLayout(hwctx->act_dev, f->img[i], &sub, &layout);
drm_desc->layers[i].planes[0].offset = layout.offset;
drm_desc->layers[i].planes[0].pitch = layout.rowPitch;
drm_desc->layers[i].planes[0].offset = layout.offset;
drm_desc->layers[i].planes[0].pitch = layout.rowPitch;
if (hwfctx->flags & AV_VK_FRAME_FLAG_CONTIGUOUS_MEMORY)
drm_desc->layers[i].planes[0].offset += f->offset[i];
}
dst->width = src->width;

View File

@ -137,6 +137,20 @@ typedef struct AVVulkanDeviceContext {
int nb_decode_queues;
} AVVulkanDeviceContext;
/**
* Defines the behaviour of frame allocation.
*/
typedef enum AVVkFrameFlags {
/* Unless this flag is set, autodetected flags will be OR'd based on the
* device and tiling during av_hwframe_ctx_init(). */
AV_VK_FRAME_FLAG_NONE = (1ULL << 0),
/* Image planes will be allocated in a single VkDeviceMemory, rather
* than as per-plane VkDeviceMemory allocations. Required for exporting
* to VAAPI on Intel devices. */
AV_VK_FRAME_FLAG_CONTIGUOUS_MEMORY = (1ULL << 1),
} AVVkFrameFlags;
/**
* Allocated as AVHWFramesContext.hwctx, used to set pool-specific options
*/
@ -165,6 +179,13 @@ typedef struct AVVulkanFramesContext {
* extensions are present in enabled_dev_extensions.
*/
void *alloc_pnext[AV_NUM_DATA_POINTERS];
/**
* A combination of AVVkFrameFlags. Unless AV_VK_FRAME_FLAG_NONE is set,
* autodetected flags will be OR'd based on the device and tiling during
* av_hwframe_ctx_init().
*/
AVVkFrameFlags flags;
} AVVulkanFramesContext;
/*
@ -192,8 +213,9 @@ typedef struct AVVkFrame {
VkImageTiling tiling;
/**
* Memory backing the images. Could be less than the amount of images
* if importing from a DRM or VAAPI frame.
* Memory backing the images. Could be less than the amount of planes,
* in which case the offset value will indicate the binding offset of
* each plane in the memory.
*/
VkDeviceMemory mem[AV_NUM_DATA_POINTERS];
size_t size[AV_NUM_DATA_POINTERS];
@ -230,6 +252,11 @@ typedef struct AVVkFrame {
* Internal data.
*/
struct AVVkFrameInternal *internal;
/**
* Describes the binding offset of each plane to the VkDeviceMemory.
*/
ptrdiff_t offset[AV_NUM_DATA_POINTERS];
} AVVkFrame;
/**