ffmpeg/libavfilter/dnn/dnn_interface.c

81 lines
2.5 KiB
C
Raw Normal View History

/*
* Copyright (c) 2018 Sergey Lavrushkin
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Implements DNN module initialization with specified backend.
*/
#include "../dnn_interface.h"
#include "dnn_backend_native.h"
#include "dnn_backend_tf.h"
dnn: add openvino as one of dnn backend OpenVINO is a Deep Learning Deployment Toolkit at https://github.com/openvinotoolkit/openvino, it supports CPU, GPU and heterogeneous plugins to accelerate deep learning inferencing. Please refer to https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md to build openvino (c library is built at the same time). Please add option -DENABLE_MKL_DNN=ON for cmake to enable CPU path. The header files and libraries are installed to /usr/local/deployment_tools/inference_engine/ with default options on my system. To build FFmpeg with openvion, take my system as an example, run with: $ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/deployment_tools/inference_engine/lib/intel64/:/usr/local/deployment_tools/inference_engine/external/tbb/lib/ $ ../ffmpeg/configure --enable-libopenvino --extra-cflags=-I/usr/local/deployment_tools/inference_engine/include/ --extra-ldflags=-L/usr/local/deployment_tools/inference_engine/lib/intel64 $ make Here are the features provided by OpenVINO inference engine: - support more DNN model formats It supports TensorFlow, Caffe, ONNX, MXNet and Kaldi by converting them into OpenVINO format with a python script. And torth model can be first converted into ONNX and then to OpenVINO format. see the script at https://github.com/openvinotoolkit/openvino/tree/master/model-optimizer/mo.py which also does some optimization at model level. - optimize at inference stage It optimizes for X86 CPUs with SSE, AVX etc. It also optimizes based on OpenCL for Intel GPUs. (only Intel GPU supported becuase Intel OpenCL extension is used for optimization) Signed-off-by: Guo, Yejun <yejun.guo@intel.com> Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
2020-05-25 07:38:09 +00:00
#include "dnn_backend_openvino.h"
#include "libavutil/mem.h"
DNNModule *ff_get_dnn_module(DNNBackendType backend_type)
{
DNNModule *dnn_module;
dnn_module = av_mallocz(sizeof(DNNModule));
if(!dnn_module){
return NULL;
}
switch(backend_type){
case DNN_NATIVE:
dnn_module->load_model = &ff_dnn_load_model_native;
dnn_module->execute_model = &ff_dnn_execute_model_native;
dnn_module->get_result = &ff_dnn_get_result_native;
dnn_module->flush = &ff_dnn_flush_native;
dnn_module->free_model = &ff_dnn_free_model_native;
break;
case DNN_TF:
#if (CONFIG_LIBTENSORFLOW == 1)
dnn_module->load_model = &ff_dnn_load_model_tf;
dnn_module->execute_model = &ff_dnn_execute_model_tf;
dnn_module->get_result = &ff_dnn_get_result_tf;
dnn_module->flush = &ff_dnn_flush_tf;
dnn_module->free_model = &ff_dnn_free_model_tf;
#else
av_freep(&dnn_module);
return NULL;
#endif
break;
dnn: add openvino as one of dnn backend OpenVINO is a Deep Learning Deployment Toolkit at https://github.com/openvinotoolkit/openvino, it supports CPU, GPU and heterogeneous plugins to accelerate deep learning inferencing. Please refer to https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md to build openvino (c library is built at the same time). Please add option -DENABLE_MKL_DNN=ON for cmake to enable CPU path. The header files and libraries are installed to /usr/local/deployment_tools/inference_engine/ with default options on my system. To build FFmpeg with openvion, take my system as an example, run with: $ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/deployment_tools/inference_engine/lib/intel64/:/usr/local/deployment_tools/inference_engine/external/tbb/lib/ $ ../ffmpeg/configure --enable-libopenvino --extra-cflags=-I/usr/local/deployment_tools/inference_engine/include/ --extra-ldflags=-L/usr/local/deployment_tools/inference_engine/lib/intel64 $ make Here are the features provided by OpenVINO inference engine: - support more DNN model formats It supports TensorFlow, Caffe, ONNX, MXNet and Kaldi by converting them into OpenVINO format with a python script. And torth model can be first converted into ONNX and then to OpenVINO format. see the script at https://github.com/openvinotoolkit/openvino/tree/master/model-optimizer/mo.py which also does some optimization at model level. - optimize at inference stage It optimizes for X86 CPUs with SSE, AVX etc. It also optimizes based on OpenCL for Intel GPUs. (only Intel GPU supported becuase Intel OpenCL extension is used for optimization) Signed-off-by: Guo, Yejun <yejun.guo@intel.com> Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
2020-05-25 07:38:09 +00:00
case DNN_OV:
#if (CONFIG_LIBOPENVINO == 1)
dnn_module->load_model = &ff_dnn_load_model_ov;
dnn_module->execute_model = &ff_dnn_execute_model_ov;
dnn_module->get_result = &ff_dnn_get_result_ov;
dnn_module->flush = &ff_dnn_flush_ov;
dnn: add openvino as one of dnn backend OpenVINO is a Deep Learning Deployment Toolkit at https://github.com/openvinotoolkit/openvino, it supports CPU, GPU and heterogeneous plugins to accelerate deep learning inferencing. Please refer to https://github.com/openvinotoolkit/openvino/blob/master/build-instruction.md to build openvino (c library is built at the same time). Please add option -DENABLE_MKL_DNN=ON for cmake to enable CPU path. The header files and libraries are installed to /usr/local/deployment_tools/inference_engine/ with default options on my system. To build FFmpeg with openvion, take my system as an example, run with: $ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/deployment_tools/inference_engine/lib/intel64/:/usr/local/deployment_tools/inference_engine/external/tbb/lib/ $ ../ffmpeg/configure --enable-libopenvino --extra-cflags=-I/usr/local/deployment_tools/inference_engine/include/ --extra-ldflags=-L/usr/local/deployment_tools/inference_engine/lib/intel64 $ make Here are the features provided by OpenVINO inference engine: - support more DNN model formats It supports TensorFlow, Caffe, ONNX, MXNet and Kaldi by converting them into OpenVINO format with a python script. And torth model can be first converted into ONNX and then to OpenVINO format. see the script at https://github.com/openvinotoolkit/openvino/tree/master/model-optimizer/mo.py which also does some optimization at model level. - optimize at inference stage It optimizes for X86 CPUs with SSE, AVX etc. It also optimizes based on OpenCL for Intel GPUs. (only Intel GPU supported becuase Intel OpenCL extension is used for optimization) Signed-off-by: Guo, Yejun <yejun.guo@intel.com> Signed-off-by: Pedro Arthur <bygrandao@gmail.com>
2020-05-25 07:38:09 +00:00
dnn_module->free_model = &ff_dnn_free_model_ov;
#else
av_freep(&dnn_module);
return NULL;
#endif
break;
default:
av_log(NULL, AV_LOG_ERROR, "Module backend_type is not native or tensorflow\n");
av_freep(&dnn_module);
return NULL;
}
return dnn_module;
}