libavfilter: Unify Execution Modes in DNN Filters

This commit unifies the async and sync mode from the DNN filters'
perspective. As of this commit, the Native backend only supports
synchronous execution mode.

Now the user can switch between async and sync mode by using the
'async' option in the backend_configs. The values can be 1 for
async and 0 for sync mode of execution.

This commit affects the following filters:
1. vf_dnn_classify
2. vf_dnn_detect
3. vf_dnn_processing
4. vf_sr
5. vf_derain

This commit also updates the filters vf_dnn_detect and vf_dnn_classify
to send only the input frame and send NULL as output frame instead of
input frame to the DNN backends.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
This commit is contained in:
Shubhanshu Saxena 2021-08-26 02:40:45 +05:30 committed by Guo Yejun
parent d39580ac11
commit 60b4d07cf6
17 changed files with 158 additions and 161 deletions

View File

@ -38,7 +38,7 @@ int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func
return AVERROR(EINVAL);
}
if (!exec_params->out_frame) {
if (!exec_params->out_frame && func_type == DFT_PROCESS_FRAME) {
av_log(ctx, AV_LOG_ERROR, "out frame is NULL when execute model.\n");
return AVERROR(EINVAL);
}
@ -138,7 +138,7 @@ DNNReturnType ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_
return DNN_SUCCESS;
}
DNNAsyncStatusType ff_dnn_get_async_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
{
TaskItem *task = ff_queue_peek_front(task_queue);

View File

@ -29,7 +29,8 @@
#include "libavutil/thread.h"
#define DNN_BACKEND_COMMON_OPTIONS \
{ "nireq", "number of request", OFFSET(options.nireq), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
{ "nireq", "number of request", OFFSET(options.nireq), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS }, \
{ "async", "use DNN async inference", OFFSET(options.async), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
// one task for one function call from dnn interface
typedef struct TaskItem {
@ -135,7 +136,7 @@ DNNReturnType ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_
* @retval DAST_NOT_READY if inference not completed yet.
* @retval DAST_SUCCESS if result successfully extracted
*/
DNNAsyncStatusType ff_dnn_get_async_result_common(Queue *task_queue, AVFrame **in, AVFrame **out);
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out);
/**
* Allocate input and output frames and fill the Task

View File

@ -34,6 +34,7 @@
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM
static const AVOption dnn_native_options[] = {
{ "conv2d_threads", "threads num for conv2d layer", OFFSET(options.conv2d_threads), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS },
{ "async", "use DNN async inference", OFFSET(options.async), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
{ NULL },
};
@ -189,6 +190,11 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename, DNNFunctionType f
goto fail;
native_model->model = model;
if (native_model->ctx.options.async) {
av_log(&native_model->ctx, AV_LOG_WARNING, "Async not supported. Rolling back to sync\n");
native_model->ctx.options.async = 0;
}
#if !HAVE_PTHREAD_CANCEL
if (native_model->ctx.options.conv2d_threads > 1){
av_log(&native_model->ctx, AV_LOG_WARNING, "'conv2d_threads' option was set but it is not supported "
@ -212,6 +218,11 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename, DNNFunctionType f
goto fail;
}
native_model->task_queue = ff_queue_create();
if (!native_model->task_queue) {
goto fail;
}
native_model->inference_queue = ff_queue_create();
if (!native_model->inference_queue) {
goto fail;
@ -425,17 +436,30 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNExecBasePara
{
NativeModel *native_model = model->model;
NativeContext *ctx = &native_model->ctx;
TaskItem task;
TaskItem *task;
if (ff_check_exec_params(ctx, DNN_NATIVE, model->func_type, exec_params) != 0) {
return DNN_ERROR;
}
if (ff_dnn_fill_task(&task, exec_params, native_model, 0, 1) != DNN_SUCCESS) {
task = av_malloc(sizeof(*task));
if (!task) {
av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
return DNN_ERROR;
}
if (extract_inference_from_task(&task, native_model->inference_queue) != DNN_SUCCESS) {
if (ff_dnn_fill_task(task, exec_params, native_model, ctx->options.async, 1) != DNN_SUCCESS) {
av_freep(&task);
return DNN_ERROR;
}
if (ff_queue_push_back(native_model->task_queue, task) < 0) {
av_freep(&task);
av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
return DNN_ERROR;
}
if (extract_inference_from_task(task, native_model->inference_queue) != DNN_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
return DNN_ERROR;
}
@ -443,6 +467,26 @@ DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNExecBasePara
return execute_model_native(native_model->inference_queue);
}
DNNReturnType ff_dnn_flush_native(const DNNModel *model)
{
NativeModel *native_model = model->model;
if (ff_queue_size(native_model->inference_queue) == 0) {
// no pending task need to flush
return DNN_SUCCESS;
}
// for now, use sync node with flush operation
// Switch to async when it is supported
return execute_model_native(native_model->inference_queue);
}
DNNAsyncStatusType ff_dnn_get_result_native(const DNNModel *model, AVFrame **in, AVFrame **out)
{
NativeModel *native_model = model->model;
return ff_dnn_get_result_common(native_model->task_queue, in, out);
}
int32_t ff_calculate_operand_dims_count(const DnnOperand *oprd)
{
int32_t result = 1;
@ -497,6 +541,15 @@ void ff_dnn_free_model_native(DNNModel **model)
av_freep(&item);
}
ff_queue_destroy(native_model->inference_queue);
while (ff_queue_size(native_model->task_queue) != 0) {
TaskItem *item = ff_queue_pop_front(native_model->task_queue);
av_frame_free(&item->in_frame);
av_frame_free(&item->out_frame);
av_freep(&item);
}
ff_queue_destroy(native_model->task_queue);
av_freep(&native_model);
}
av_freep(model);

View File

@ -111,6 +111,7 @@ typedef struct InputParams{
} InputParams;
typedef struct NativeOptions{
uint8_t async;
uint32_t conv2d_threads;
} NativeOptions;
@ -127,6 +128,7 @@ typedef struct NativeModel{
int32_t layers_num;
DnnOperand *operands;
int32_t operands_num;
Queue *task_queue;
Queue *inference_queue;
} NativeModel;
@ -134,6 +136,10 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename, DNNFunctionType f
DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNExecBaseParams *exec_params);
DNNAsyncStatusType ff_dnn_get_result_native(const DNNModel *model, AVFrame **in, AVFrame **out);
DNNReturnType ff_dnn_flush_native(const DNNModel *model);
void ff_dnn_free_model_native(DNNModel **model);
// NOTE: User must check for error (return value <= 0) to handle

View File

@ -39,6 +39,7 @@
typedef struct OVOptions{
char *device_type;
int nireq;
uint8_t async;
int batch_size;
int input_resizable;
} OVOptions;
@ -271,14 +272,14 @@ static void infer_completion_callback(void *args)
av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
return;
}
ov_model->model->detect_post_proc(task->out_frame, &output, 1, ov_model->model->filter_ctx);
ov_model->model->detect_post_proc(task->in_frame, &output, 1, ov_model->model->filter_ctx);
break;
case DFT_ANALYTICS_CLASSIFY:
if (!ov_model->model->classify_post_proc) {
av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
return;
}
ov_model->model->classify_post_proc(task->out_frame, &output, request->inferences[i]->bbox_index, ov_model->model->filter_ctx);
ov_model->model->classify_post_proc(task->in_frame, &output, request->inferences[i]->bbox_index, ov_model->model->filter_ctx);
break;
default:
av_assert0(!"should not reach here");
@ -758,55 +759,6 @@ err:
}
DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
{
OVModel *ov_model = model->model;
OVContext *ctx = &ov_model->ctx;
TaskItem task;
OVRequestItem *request;
if (ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params) != 0) {
return DNN_ERROR;
}
if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
// Once we add async support for tensorflow backend and native backend,
// we'll combine the two sync/async functions in dnn_interface.h to
// simplify the code in filter, and async will be an option within backends.
// so, do not support now, and classify filter will not call this function.
return DNN_ERROR;
}
if (ctx->options.batch_size > 1) {
avpriv_report_missing_feature(ctx, "batch mode for sync execution");
return DNN_ERROR;
}
if (!ov_model->exe_network) {
if (init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]) != DNN_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
return DNN_ERROR;
}
}
if (ff_dnn_fill_task(&task, exec_params, ov_model, 0, 1) != DNN_SUCCESS) {
return DNN_ERROR;
}
if (extract_inference_from_task(ov_model->model->func_type, &task, ov_model->inference_queue, exec_params) != DNN_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
return DNN_ERROR;
}
request = ff_safe_queue_pop_front(ov_model->request_queue);
if (!request) {
av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
return DNN_ERROR;
}
return execute_model_ov(request, ov_model->inference_queue);
}
DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
{
OVModel *ov_model = model->model;
OVContext *ctx = &ov_model->ctx;
@ -831,7 +783,8 @@ DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, DNNExecBasePa
return DNN_ERROR;
}
if (ff_dnn_fill_task(task, exec_params, ov_model, 1, 1) != DNN_SUCCESS) {
if (ff_dnn_fill_task(task, exec_params, ov_model, ctx->options.async, 1) != DNN_SUCCESS) {
av_freep(&task);
return DNN_ERROR;
}
@ -846,26 +799,48 @@ DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, DNNExecBasePa
return DNN_ERROR;
}
while (ff_queue_size(ov_model->inference_queue) >= ctx->options.batch_size) {
if (ctx->options.async) {
while (ff_queue_size(ov_model->inference_queue) >= ctx->options.batch_size) {
request = ff_safe_queue_pop_front(ov_model->request_queue);
if (!request) {
av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
return DNN_ERROR;
}
ret = execute_model_ov(request, ov_model->inference_queue);
if (ret != DNN_SUCCESS) {
return ret;
}
}
return DNN_SUCCESS;
}
else {
if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
// Classification filter has not been completely
// tested with the sync mode. So, do not support now.
avpriv_report_missing_feature(ctx, "classify for sync execution");
return DNN_ERROR;
}
if (ctx->options.batch_size > 1) {
avpriv_report_missing_feature(ctx, "batch mode for sync execution");
return DNN_ERROR;
}
request = ff_safe_queue_pop_front(ov_model->request_queue);
if (!request) {
av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
return DNN_ERROR;
}
ret = execute_model_ov(request, ov_model->inference_queue);
if (ret != DNN_SUCCESS) {
return ret;
}
return execute_model_ov(request, ov_model->inference_queue);
}
return DNN_SUCCESS;
}
DNNAsyncStatusType ff_dnn_get_async_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
DNNAsyncStatusType ff_dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
{
OVModel *ov_model = model->model;
return ff_dnn_get_async_result_common(ov_model->task_queue, in, out);
return ff_dnn_get_result_common(ov_model->task_queue, in, out);
}
DNNReturnType ff_dnn_flush_ov(const DNNModel *model)

View File

@ -32,8 +32,7 @@
DNNModel *ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx);
DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params);
DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, DNNExecBaseParams *exec_params);
DNNAsyncStatusType ff_dnn_get_async_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out);
DNNAsyncStatusType ff_dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out);
DNNReturnType ff_dnn_flush_ov(const DNNModel *model);
void ff_dnn_free_model_ov(DNNModel **model);

View File

@ -42,6 +42,7 @@
typedef struct TFOptions{
char *sess_config;
uint8_t async;
uint32_t nireq;
} TFOptions;
@ -1061,7 +1062,7 @@ static void infer_completion_callback(void *args) {
av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post proc\n");
return;
}
tf_model->model->detect_post_proc(task->out_frame, outputs, task->nb_output, tf_model->model->filter_ctx);
tf_model->model->detect_post_proc(task->in_frame, outputs, task->nb_output, tf_model->model->filter_ctx);
break;
default:
av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this kind of dnn filter now\n");
@ -1121,34 +1122,6 @@ err:
DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
{
TFModel *tf_model = model->model;
TFContext *ctx = &tf_model->ctx;
TaskItem task;
TFRequestItem *request;
if (ff_check_exec_params(ctx, DNN_TF, model->func_type, exec_params) != 0) {
return DNN_ERROR;
}
if (ff_dnn_fill_task(&task, exec_params, tf_model, 0, 1) != DNN_SUCCESS) {
return DNN_ERROR;
}
if (extract_inference_from_task(&task, tf_model->inference_queue) != DNN_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
return DNN_ERROR;
}
request = ff_safe_queue_pop_front(tf_model->request_queue);
if (!request) {
av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
return DNN_ERROR;
}
return execute_model_tf(request, tf_model->inference_queue);
}
DNNReturnType ff_dnn_execute_model_async_tf(const DNNModel *model, DNNExecBaseParams *exec_params) {
TFModel *tf_model = model->model;
TFContext *ctx = &tf_model->ctx;
TaskItem *task;
@ -1164,7 +1137,7 @@ DNNReturnType ff_dnn_execute_model_async_tf(const DNNModel *model, DNNExecBasePa
return DNN_ERROR;
}
if (ff_dnn_fill_task(task, exec_params, tf_model, 1, 1) != DNN_SUCCESS) {
if (ff_dnn_fill_task(task, exec_params, tf_model, ctx->options.async, 1) != DNN_SUCCESS) {
av_freep(&task);
return DNN_ERROR;
}
@ -1188,10 +1161,10 @@ DNNReturnType ff_dnn_execute_model_async_tf(const DNNModel *model, DNNExecBasePa
return execute_model_tf(request, tf_model->inference_queue);
}
DNNAsyncStatusType ff_dnn_get_async_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out)
DNNAsyncStatusType ff_dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out)
{
TFModel *tf_model = model->model;
return ff_dnn_get_async_result_common(tf_model->task_queue, in, out);
return ff_dnn_get_result_common(tf_model->task_queue, in, out);
}
DNNReturnType ff_dnn_flush_tf(const DNNModel *model)

View File

@ -32,8 +32,7 @@
DNNModel *ff_dnn_load_model_tf(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx);
DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params);
DNNReturnType ff_dnn_execute_model_async_tf(const DNNModel *model, DNNExecBaseParams *exec_params);
DNNAsyncStatusType ff_dnn_get_async_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out);
DNNAsyncStatusType ff_dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out);
DNNReturnType ff_dnn_flush_tf(const DNNModel *model);
void ff_dnn_free_model_tf(DNNModel **model);

View File

@ -42,14 +42,15 @@ DNNModule *ff_get_dnn_module(DNNBackendType backend_type)
case DNN_NATIVE:
dnn_module->load_model = &ff_dnn_load_model_native;
dnn_module->execute_model = &ff_dnn_execute_model_native;
dnn_module->get_result = &ff_dnn_get_result_native;
dnn_module->flush = &ff_dnn_flush_native;
dnn_module->free_model = &ff_dnn_free_model_native;
break;
case DNN_TF:
#if (CONFIG_LIBTENSORFLOW == 1)
dnn_module->load_model = &ff_dnn_load_model_tf;
dnn_module->execute_model = &ff_dnn_execute_model_tf;
dnn_module->execute_model_async = &ff_dnn_execute_model_async_tf;
dnn_module->get_async_result = &ff_dnn_get_async_result_tf;
dnn_module->get_result = &ff_dnn_get_result_tf;
dnn_module->flush = &ff_dnn_flush_tf;
dnn_module->free_model = &ff_dnn_free_model_tf;
#else
@ -61,8 +62,7 @@ DNNModule *ff_get_dnn_module(DNNBackendType backend_type)
#if (CONFIG_LIBOPENVINO == 1)
dnn_module->load_model = &ff_dnn_load_model_ov;
dnn_module->execute_model = &ff_dnn_execute_model_ov;
dnn_module->execute_model_async = &ff_dnn_execute_model_async_ov;
dnn_module->get_async_result = &ff_dnn_get_async_result_ov;
dnn_module->get_result = &ff_dnn_get_result_ov;
dnn_module->flush = &ff_dnn_flush_ov;
dnn_module->free_model = &ff_dnn_free_model_ov;
#else

View File

@ -84,11 +84,6 @@ int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *fil
return AVERROR(EINVAL);
}
if (!ctx->dnn_module->execute_model_async && ctx->async) {
ctx->async = 0;
av_log(filter_ctx, AV_LOG_WARNING, "this backend does not support async execution, roll back to sync.\n");
}
#if !HAVE_PTHREAD_CANCEL
if (ctx->async) {
ctx->async = 0;
@ -141,18 +136,6 @@ DNNReturnType ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *
return (ctx->dnn_module->execute_model)(ctx->model, &exec_params);
}
DNNReturnType ff_dnn_execute_model_async(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)
{
DNNExecBaseParams exec_params = {
.input_name = ctx->model_inputname,
.output_names = (const char **)ctx->model_outputnames,
.nb_output = ctx->nb_outputs,
.in_frame = in_frame,
.out_frame = out_frame,
};
return (ctx->dnn_module->execute_model_async)(ctx->model, &exec_params);
}
DNNReturnType ff_dnn_execute_model_classification(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char *target)
{
DNNExecClassificationParams class_params = {
@ -165,12 +148,12 @@ DNNReturnType ff_dnn_execute_model_classification(DnnContext *ctx, AVFrame *in_f
},
.target = target,
};
return (ctx->dnn_module->execute_model_async)(ctx->model, &class_params.base);
return (ctx->dnn_module->execute_model)(ctx->model, &class_params.base);
}
DNNAsyncStatusType ff_dnn_get_async_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame)
DNNAsyncStatusType ff_dnn_get_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame)
{
return (ctx->dnn_module->get_async_result)(ctx->model, in_frame, out_frame);
return (ctx->dnn_module->get_result)(ctx->model, in_frame, out_frame);
}
DNNReturnType ff_dnn_flush(DnnContext *ctx)

View File

@ -56,9 +56,8 @@ int ff_dnn_set_classify_post_proc(DnnContext *ctx, ClassifyPostProc post_proc);
DNNReturnType ff_dnn_get_input(DnnContext *ctx, DNNData *input);
DNNReturnType ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height);
DNNReturnType ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame);
DNNReturnType ff_dnn_execute_model_async(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame);
DNNReturnType ff_dnn_execute_model_classification(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char *target);
DNNAsyncStatusType ff_dnn_get_async_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame);
DNNAsyncStatusType ff_dnn_get_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame);
DNNReturnType ff_dnn_flush(DnnContext *ctx);
void ff_dnn_uninit(DnnContext *ctx);

View File

@ -114,10 +114,8 @@ typedef struct DNNModule{
DNNModel *(*load_model)(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx);
// Executes model with specified input and output. Returns DNN_ERROR otherwise.
DNNReturnType (*execute_model)(const DNNModel *model, DNNExecBaseParams *exec_params);
// Executes model with specified input and output asynchronously. Returns DNN_ERROR otherwise.
DNNReturnType (*execute_model_async)(const DNNModel *model, DNNExecBaseParams *exec_params);
// Retrieve inference result.
DNNAsyncStatusType (*get_async_result)(const DNNModel *model, AVFrame **in, AVFrame **out);
DNNAsyncStatusType (*get_result)(const DNNModel *model, AVFrame **in, AVFrame **out);
// Flush all the pending tasks.
DNNReturnType (*flush)(const DNNModel *model);
// Frees memory allocated for model.

View File

@ -68,6 +68,7 @@ static int query_formats(AVFilterContext *ctx)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
DNNAsyncStatusType async_state = 0;
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
DRContext *dr_context = ctx->priv;
@ -88,6 +89,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
av_frame_free(&in);
return AVERROR(EIO);
}
do {
async_state = ff_dnn_get_result(&dr_context->dnnctx, &in, &out);
} while (async_state == DAST_NOT_READY);
if (async_state != DAST_SUCCESS)
return AVERROR(EINVAL);
av_frame_free(&in);

View File

@ -224,14 +224,13 @@ static int dnn_classify_flush_frame(AVFilterLink *outlink, int64_t pts, int64_t
do {
AVFrame *in_frame = NULL;
AVFrame *out_frame = NULL;
async_state = ff_dnn_get_async_result(&ctx->dnnctx, &in_frame, &out_frame);
if (out_frame) {
av_assert0(in_frame == out_frame);
ret = ff_filter_frame(outlink, out_frame);
async_state = ff_dnn_get_result(&ctx->dnnctx, &in_frame, &out_frame);
if (async_state == DAST_SUCCESS) {
ret = ff_filter_frame(outlink, in_frame);
if (ret < 0)
return ret;
if (out_pts)
*out_pts = out_frame->pts + pts;
*out_pts = in_frame->pts + pts;
}
av_usleep(5000);
} while (async_state >= DAST_NOT_READY);
@ -258,7 +257,7 @@ static int dnn_classify_activate(AVFilterContext *filter_ctx)
if (ret < 0)
return ret;
if (ret > 0) {
if (ff_dnn_execute_model_classification(&ctx->dnnctx, in, in, ctx->target) != DNN_SUCCESS) {
if (ff_dnn_execute_model_classification(&ctx->dnnctx, in, NULL, ctx->target) != DNN_SUCCESS) {
return AVERROR(EIO);
}
}
@ -268,10 +267,9 @@ static int dnn_classify_activate(AVFilterContext *filter_ctx)
do {
AVFrame *in_frame = NULL;
AVFrame *out_frame = NULL;
async_state = ff_dnn_get_async_result(&ctx->dnnctx, &in_frame, &out_frame);
if (out_frame) {
av_assert0(in_frame == out_frame);
ret = ff_filter_frame(outlink, out_frame);
async_state = ff_dnn_get_result(&ctx->dnnctx, &in_frame, &out_frame);
if (async_state == DAST_SUCCESS) {
ret = ff_filter_frame(outlink, in_frame);
if (ret < 0)
return ret;
got_frame = 1;

View File

@ -424,14 +424,13 @@ static int dnn_detect_flush_frame(AVFilterLink *outlink, int64_t pts, int64_t *o
do {
AVFrame *in_frame = NULL;
AVFrame *out_frame = NULL;
async_state = ff_dnn_get_async_result(&ctx->dnnctx, &in_frame, &out_frame);
if (out_frame) {
av_assert0(in_frame == out_frame);
ret = ff_filter_frame(outlink, out_frame);
async_state = ff_dnn_get_result(&ctx->dnnctx, &in_frame, &out_frame);
if (async_state == DAST_SUCCESS) {
ret = ff_filter_frame(outlink, in_frame);
if (ret < 0)
return ret;
if (out_pts)
*out_pts = out_frame->pts + pts;
*out_pts = in_frame->pts + pts;
}
av_usleep(5000);
} while (async_state >= DAST_NOT_READY);
@ -458,7 +457,7 @@ static int dnn_detect_activate_async(AVFilterContext *filter_ctx)
if (ret < 0)
return ret;
if (ret > 0) {
if (ff_dnn_execute_model_async(&ctx->dnnctx, in, in) != DNN_SUCCESS) {
if (ff_dnn_execute_model(&ctx->dnnctx, in, NULL) != DNN_SUCCESS) {
return AVERROR(EIO);
}
}
@ -468,10 +467,9 @@ static int dnn_detect_activate_async(AVFilterContext *filter_ctx)
do {
AVFrame *in_frame = NULL;
AVFrame *out_frame = NULL;
async_state = ff_dnn_get_async_result(&ctx->dnnctx, &in_frame, &out_frame);
if (out_frame) {
av_assert0(in_frame == out_frame);
ret = ff_filter_frame(outlink, out_frame);
async_state = ff_dnn_get_result(&ctx->dnnctx, &in_frame, &out_frame);
if (async_state == DAST_SUCCESS) {
ret = ff_filter_frame(outlink, in_frame);
if (ret < 0)
return ret;
got_frame = 1;
@ -496,7 +494,7 @@ static int dnn_detect_activate_async(AVFilterContext *filter_ctx)
return 0;
}
static int dnn_detect_activate(AVFilterContext *filter_ctx)
static av_unused int dnn_detect_activate(AVFilterContext *filter_ctx)
{
DnnDetectContext *ctx = filter_ctx->priv;
@ -537,5 +535,5 @@ const AVFilter ff_vf_dnn_detect = {
FILTER_INPUTS(dnn_detect_inputs),
FILTER_OUTPUTS(dnn_detect_outputs),
.priv_class = &dnn_detect_class,
.activate = dnn_detect_activate,
.activate = dnn_detect_activate_async,
};

View File

@ -328,7 +328,7 @@ static int flush_frame(AVFilterLink *outlink, int64_t pts, int64_t *out_pts)
do {
AVFrame *in_frame = NULL;
AVFrame *out_frame = NULL;
async_state = ff_dnn_get_async_result(&ctx->dnnctx, &in_frame, &out_frame);
async_state = ff_dnn_get_result(&ctx->dnnctx, &in_frame, &out_frame);
if (out_frame) {
if (isPlanarYUV(in_frame->format))
copy_uv_planes(ctx, out_frame, in_frame);
@ -370,7 +370,7 @@ static int activate_async(AVFilterContext *filter_ctx)
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
if (ff_dnn_execute_model_async(&ctx->dnnctx, in, out) != DNN_SUCCESS) {
if (ff_dnn_execute_model(&ctx->dnnctx, in, out) != DNN_SUCCESS) {
return AVERROR(EIO);
}
}
@ -380,7 +380,7 @@ static int activate_async(AVFilterContext *filter_ctx)
do {
AVFrame *in_frame = NULL;
AVFrame *out_frame = NULL;
async_state = ff_dnn_get_async_result(&ctx->dnnctx, &in_frame, &out_frame);
async_state = ff_dnn_get_result(&ctx->dnnctx, &in_frame, &out_frame);
if (out_frame) {
if (isPlanarYUV(in_frame->format))
copy_uv_planes(ctx, out_frame, in_frame);
@ -410,7 +410,7 @@ static int activate_async(AVFilterContext *filter_ctx)
return 0;
}
static int activate(AVFilterContext *filter_ctx)
static av_unused int activate(AVFilterContext *filter_ctx)
{
DnnProcessingContext *ctx = filter_ctx->priv;
@ -454,5 +454,5 @@ const AVFilter ff_vf_dnn_processing = {
FILTER_INPUTS(dnn_processing_inputs),
FILTER_OUTPUTS(dnn_processing_outputs),
.priv_class = &dnn_processing_class,
.activate = activate,
.activate = activate_async,
};

View File

@ -119,6 +119,7 @@ static int config_output(AVFilterLink *outlink)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
DNNAsyncStatusType async_state = 0;
AVFilterContext *context = inlink->dst;
SRContext *ctx = context->priv;
AVFilterLink *outlink = context->outputs[0];
@ -148,6 +149,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
return AVERROR(EIO);
}
do {
async_state = ff_dnn_get_result(&ctx->dnnctx, &in, &out);
} while (async_state == DAST_NOT_READY);
if (async_state != DAST_SUCCESS)
return AVERROR(EINVAL);
if (ctx->sws_uv_scale) {
sws_scale(ctx->sws_uv_scale, (const uint8_t **)(in->data + 1), in->linesize + 1,
0, ctx->sws_uv_height, out->data + 1, out->linesize + 1);