Adds TensorFlow backend for dnn inference module.

Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
This commit is contained in:
Sergey Lavrushkin 2018-06-03 20:22:50 +03:00 committed by Pedro Arthur
parent ddf6ff9dc6
commit d8c0bbb0aa
9 changed files with 3262 additions and 9 deletions

View File

@ -9,10 +9,9 @@ version <next>:
- aderivative and aintegral audio filters
- pal75bars and pal100bars video filter sources
- support mbedTLS based TLS
- DNN inference interface
- Reimplemented SRCNN filter using DNN inference interface
- adeclick filter
- adeclip filter
- libtensorflow backend for DNN based filters like srcnn
version 4.0:

7
configure vendored
View File

@ -259,6 +259,8 @@ External library support:
--enable-libspeex enable Speex de/encoding via libspeex [no]
--enable-libsrt enable Haivision SRT protocol via libsrt [no]
--enable-libssh enable SFTP protocol via libssh [no]
--enable-libtensorflow enable TensorFlow as a DNN module backend
for DNN based filters like srcnn [no]
--enable-libtesseract enable Tesseract, needed for ocr filter [no]
--enable-libtheora enable Theora encoding via libtheora [no]
--enable-libtls enable LibreSSL (via libtls), needed for https support
@ -1713,6 +1715,7 @@ EXTERNAL_LIBRARY_LIST="
libspeex
libsrt
libssh
libtensorflow
libtesseract
libtheora
libtwolame
@ -2244,6 +2247,7 @@ CONFIG_EXTRA="
cbs_mpeg2
cbs_vp9
dirac_parse
dnn
dvprofile
exif
faandct
@ -2507,6 +2511,7 @@ cbs_mpeg2_select="cbs"
cbs_vp9_select="cbs"
dct_select="rdft"
dirac_parse_select="golomb"
dnn_suggest="libtensorflow"
error_resilience_select="me_cmp"
faandct_deps="faan"
faandct_select="fdctdsp"
@ -3396,6 +3401,7 @@ spectrumsynth_filter_select="fft"
spp_filter_deps="gpl avcodec"
spp_filter_select="fft idctdsp fdctdsp me_cmp pixblockdsp"
srcnn_filter_deps="avformat"
srcnn_filter_select="dnn"
stereo3d_filter_deps="gpl"
subtitles_filter_deps="avformat avcodec libass"
super2xsai_filter_deps="gpl"
@ -6055,6 +6061,7 @@ enabled libsoxr && require libsoxr soxr.h soxr_create -lsoxr
enabled libssh && require_pkg_config libssh libssh libssh/sftp.h sftp_init
enabled libspeex && require_pkg_config libspeex speex speex/speex.h speex_decoder_init
enabled libsrt && require_pkg_config libsrt "srt >= 1.2.0" srt/srt.h srt_socket
enabled libtensorflow && require libtensorflow tensorflow/c/c_api.h TF_Version -ltensorflow
enabled libtesseract && require_pkg_config libtesseract tesseract tesseract/capi.h TessBaseAPICreate
enabled libtheora && require libtheora theora/theoraenc.h th_info_init -ltheoraenc -ltheoradec -logg
enabled libtls && require_pkg_config libtls libtls tls.h tls_configure

View File

@ -26,6 +26,8 @@ OBJS-$(HAVE_THREADS) += pthread.o
# subsystems
OBJS-$(CONFIG_QSVVPP) += qsvvpp.o
DNN-OBJS-$(CONFIG_LIBTENSORFLOW) += dnn_backend_tf.o
OBJS-$(CONFIG_DNN) += dnn_interface.o dnn_backend_native.o $(DNN-OBJS-yes)
# audio filters
OBJS-$(CONFIG_ABENCH_FILTER) += f_bench.o
@ -336,7 +338,7 @@ OBJS-$(CONFIG_SMARTBLUR_FILTER) += vf_smartblur.o
OBJS-$(CONFIG_SOBEL_FILTER) += vf_convolution.o
OBJS-$(CONFIG_SPLIT_FILTER) += split.o
OBJS-$(CONFIG_SPP_FILTER) += vf_spp.o
OBJS-$(CONFIG_SRCNN_FILTER) += vf_srcnn.o dnn_interface.o dnn_backend_native.o
OBJS-$(CONFIG_SRCNN_FILTER) += vf_srcnn.o
OBJS-$(CONFIG_SSIM_FILTER) += vf_ssim.o framesync.o
OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o
OBJS-$(CONFIG_STREAMSELECT_FILTER) += f_streamselect.o framesync.o

View File

@ -0,0 +1,309 @@
/*
* Copyright (c) 2018 Sergey Lavrushkin
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DNN tensorflow backend implementation.
*/
#include "dnn_backend_tf.h"
#include "dnn_srcnn.h"
#include "libavformat/avio.h"
#include <tensorflow/c/c_api.h>
typedef struct TFModel{
TF_Graph* graph;
TF_Session* session;
TF_Status* status;
TF_Output input, output;
TF_Tensor* input_tensor;
TF_Tensor* output_tensor;
const DNNData* input_data;
const DNNData* output_data;
} TFModel;
static void free_buffer(void* data, size_t length)
{
av_freep(&data);
}
static TF_Buffer* read_graph(const char* model_filename)
{
TF_Buffer* graph_buf;
unsigned char* graph_data = NULL;
AVIOContext* model_file_context;
long size, bytes_read;
if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
return NULL;
}
size = avio_size(model_file_context);
graph_data = av_malloc(size);
if (!graph_data){
avio_closep(&model_file_context);
return NULL;
}
bytes_read = avio_read(model_file_context, graph_data, size);
avio_closep(&model_file_context);
if (bytes_read != size){
av_freep(&graph_data);
return NULL;
}
graph_buf = TF_NewBuffer();
graph_buf->data = (void*)graph_data;
graph_buf->length = size;
graph_buf->data_deallocator = free_buffer;
return graph_buf;
}
static DNNReturnType set_input_output_tf(void* model, const DNNData* input, const DNNData* output)
{
TFModel* tf_model = (TFModel*)model;
int64_t input_dims[] = {1, input->height, input->width, input->channels};
int64_t output_dims[] = {1, output->height, output->width, output->channels};
TF_SessionOptions* sess_opts;
const TF_Operation* init_op = TF_GraphOperationByName(tf_model->graph, "init");
// Input operation should be named 'x'
tf_model->input.oper = TF_GraphOperationByName(tf_model->graph, "x");
if (!tf_model->input.oper){
return DNN_ERROR;
}
tf_model->input.index = 0;
if (tf_model->input_tensor){
TF_DeleteTensor(tf_model->input_tensor);
}
tf_model->input_tensor = TF_AllocateTensor(TF_FLOAT, input_dims, 4,
input_dims[1] * input_dims[2] * input_dims[3] * sizeof(float));
if (!tf_model->input_tensor){
return DNN_ERROR;
}
// Output operation should be named 'y'
tf_model->output.oper = TF_GraphOperationByName(tf_model->graph, "y");
if (!tf_model->output.oper){
return DNN_ERROR;
}
tf_model->output.index = 0;
if (tf_model->output_tensor){
TF_DeleteTensor(tf_model->output_tensor);
}
tf_model->output_tensor = TF_AllocateTensor(TF_FLOAT, output_dims, 4,
output_dims[1] * output_dims[2] * output_dims[3] * sizeof(float));
if (!tf_model->output_tensor){
return DNN_ERROR;
}
tf_model->input_data = input;
tf_model->output_data = output;
if (tf_model->session){
TF_CloseSession(tf_model->session, tf_model->status);
TF_DeleteSession(tf_model->session, tf_model->status);
}
sess_opts = TF_NewSessionOptions();
tf_model->session = TF_NewSession(tf_model->graph, sess_opts, tf_model->status);
TF_DeleteSessionOptions(sess_opts);
if (TF_GetCode(tf_model->status) != TF_OK)
{
return DNN_ERROR;
}
// Run initialization operation with name "init" if it is present in graph
if (init_op){
TF_SessionRun(tf_model->session, NULL,
NULL, NULL, 0,
NULL, NULL, 0,
&init_op, 1, NULL, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK)
{
return DNN_ERROR;
}
}
return DNN_SUCCESS;
}
DNNModel* ff_dnn_load_model_tf(const char* model_filename)
{
DNNModel* model = NULL;
TFModel* tf_model = NULL;
TF_Buffer* graph_def;
TF_ImportGraphDefOptions* graph_opts;
model = av_malloc(sizeof(DNNModel));
if (!model){
return NULL;
}
tf_model = av_malloc(sizeof(TFModel));
if (!tf_model){
av_freep(&model);
return NULL;
}
tf_model->session = NULL;
tf_model->input_tensor = NULL;
tf_model->output_tensor = NULL;
graph_def = read_graph(model_filename);
if (!graph_def){
av_freep(&tf_model);
av_freep(&model);
return NULL;
}
tf_model->graph = TF_NewGraph();
tf_model->status = TF_NewStatus();
graph_opts = TF_NewImportGraphDefOptions();
TF_GraphImportGraphDef(tf_model->graph, graph_def, graph_opts, tf_model->status);
TF_DeleteImportGraphDefOptions(graph_opts);
TF_DeleteBuffer(graph_def);
if (TF_GetCode(tf_model->status) != TF_OK){
TF_DeleteGraph(tf_model->graph);
TF_DeleteStatus(tf_model->status);
av_freep(&tf_model);
av_freep(&model);
return NULL;
}
model->model = (void*)tf_model;
model->set_input_output = &set_input_output_tf;
return model;
}
DNNModel* ff_dnn_load_default_model_tf(DNNDefaultModel model_type)
{
DNNModel* model = NULL;
TFModel* tf_model = NULL;
TF_Buffer* graph_def;
unsigned char* graph_data = NULL;
TF_ImportGraphDefOptions* graph_opts;
graph_def = TF_NewBuffer();
switch (model_type){
case DNN_SRCNN:
graph_data = av_malloc(srcnn_tf_size);
if (!graph_data){
TF_DeleteBuffer(graph_def);
return NULL;
}
memcpy(graph_data, srcnn_tf_model, srcnn_tf_size);
graph_def->data = (void*)graph_data;
graph_def->length = srcnn_tf_size;
graph_def->data_deallocator = free_buffer;
break;
default:
TF_DeleteBuffer(graph_def);
return NULL;
}
model = av_malloc(sizeof(DNNModel));
if (!model){
TF_DeleteBuffer(graph_def);
return NULL;
}
tf_model = av_malloc(sizeof(TFModel));
if (!tf_model){
TF_DeleteBuffer(graph_def);
av_freep(&model);
return NULL;
}
tf_model->session = NULL;
tf_model->input_tensor = NULL;
tf_model->output_tensor = NULL;
tf_model->graph = TF_NewGraph();
tf_model->status = TF_NewStatus();
graph_opts = TF_NewImportGraphDefOptions();
TF_GraphImportGraphDef(tf_model->graph, graph_def, graph_opts, tf_model->status);
TF_DeleteImportGraphDefOptions(graph_opts);
TF_DeleteBuffer(graph_def);
if (TF_GetCode(tf_model->status) != TF_OK){
TF_DeleteGraph(tf_model->graph);
TF_DeleteStatus(tf_model->status);
av_freep(&tf_model);
av_freep(&model);
return NULL;
}
model->model = (void*)tf_model;
model->set_input_output = &set_input_output_tf;
return model;
}
DNNReturnType ff_dnn_execute_model_tf(const DNNModel* model)
{
TFModel* tf_model = (TFModel*)model->model;
memcpy(TF_TensorData(tf_model->input_tensor), tf_model->input_data->data,
tf_model->input_data->height * tf_model->input_data->width *
tf_model->input_data->channels * sizeof(float));
TF_SessionRun(tf_model->session, NULL,
&tf_model->input, &tf_model->input_tensor, 1,
&tf_model->output, &tf_model->output_tensor, 1,
NULL, 0, NULL, tf_model->status);
if (TF_GetCode(tf_model->status) != TF_OK){
return DNN_ERROR;
}
else{
memcpy(tf_model->output_data->data, TF_TensorData(tf_model->output_tensor),
tf_model->output_data->height * tf_model->output_data->width *
tf_model->output_data->channels * sizeof(float));
return DNN_SUCCESS;
}
}
void ff_dnn_free_model_tf(DNNModel** model)
{
TFModel* tf_model;
if (*model){
tf_model = (TFModel*)(*model)->model;
if (tf_model->graph){
TF_DeleteGraph(tf_model->graph);
}
if (tf_model->session){
TF_CloseSession(tf_model->session, tf_model->status);
TF_DeleteSession(tf_model->session, tf_model->status);
}
if (tf_model->status){
TF_DeleteStatus(tf_model->status);
}
if (tf_model->input_tensor){
TF_DeleteTensor(tf_model->input_tensor);
}
if (tf_model->output_tensor){
TF_DeleteTensor(tf_model->output_tensor);
}
av_freep(&tf_model);
av_freep(model);
}
}

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2018 Sergey Lavrushkin
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DNN inference functions interface for TensorFlow backend.
*/
#ifndef AVFILTER_DNN_BACKEND_TF_H
#define AVFILTER_DNN_BACKEND_TF_H
#include "dnn_interface.h"
DNNModel* ff_dnn_load_model_tf(const char* model_filename);
DNNModel* ff_dnn_load_default_model_tf(DNNDefaultModel model_type);
DNNReturnType ff_dnn_execute_model_tf(const DNNModel* model);
void ff_dnn_free_model_tf(DNNModel** model);
#endif

View File

@ -25,6 +25,7 @@
#include "dnn_interface.h"
#include "dnn_backend_native.h"
#include "dnn_backend_tf.h"
#include "libavutil/mem.h"
DNNModule* ff_get_dnn_module(DNNBackendType backend_type)
@ -42,6 +43,17 @@ DNNModule* ff_get_dnn_module(DNNBackendType backend_type)
dnn_module->load_default_model = &ff_dnn_load_default_model_native;
dnn_module->execute_model = &ff_dnn_execute_model_native;
dnn_module->free_model = &ff_dnn_free_model_native;
break;
case DNN_TF:
#if (CONFIG_LIBTENSORFLOW == 1)
dnn_module->load_model = &ff_dnn_load_model_tf;
dnn_module->load_default_model = &ff_dnn_load_default_model_tf;
dnn_module->execute_model = &ff_dnn_execute_model_tf;
dnn_module->free_model = &ff_dnn_free_model_tf;
#else
av_freep(dnn_module);
return NULL;
#endif
}
return dnn_module;

View File

@ -28,7 +28,7 @@
typedef enum {DNN_SUCCESS, DNN_ERROR} DNNReturnType;
typedef enum {DNN_NATIVE} DNNBackendType;
typedef enum {DNN_NATIVE, DNN_TF} DNNBackendType;
typedef enum {DNN_SRCNN} DNNDefaultModel;
@ -37,7 +37,6 @@ typedef struct DNNData{
int width, height, channels;
} DNNData;
typedef struct DNNModel{
// Stores model that can be different for different backends.
void* model;

File diff suppressed because it is too large Load Diff

View File

@ -41,7 +41,6 @@ typedef struct SRCNNContext {
DNNData input_output;
} SRCNNContext;
#define OFFSET(x) offsetof(SRCNNContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
static const AVOption srcnn_options[] = {
@ -55,10 +54,19 @@ static av_cold int init(AVFilterContext* context)
{
SRCNNContext* srcnn_context = context->priv;
srcnn_context->dnn_module = ff_get_dnn_module(DNN_NATIVE);
srcnn_context->dnn_module = ff_get_dnn_module(DNN_TF);
if (!srcnn_context->dnn_module){
av_log(context, AV_LOG_ERROR, "could not create dnn module\n");
return AVERROR(ENOMEM);
srcnn_context->dnn_module = ff_get_dnn_module(DNN_NATIVE);
if (!srcnn_context->dnn_module){
av_log(context, AV_LOG_ERROR, "could not create dnn module\n");
return AVERROR(ENOMEM);
}
else{
av_log(context, AV_LOG_INFO, "using native backend for DNN inference\n");
}
}
else{
av_log(context, AV_LOG_INFO, "using tensorflow backend for DNN inference\n");
}
if (!srcnn_context->model_filename){
av_log(context, AV_LOG_INFO, "model file for network was not specified, using default network for x2 upsampling\n");