dnn_backend_native_layer_mathbinary: add sub support

more math binary operations will be added here

Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
This commit is contained in:
Guo, Yejun 2020-03-20 20:55:38 +08:00
parent 2114c42418
commit ffa1561608
7 changed files with 219 additions and 4 deletions

View File

@ -5,6 +5,7 @@ OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_pad
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_conv2d.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_depth2space.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_maximum.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_mathbinary.o
DNN-OBJS-$(CONFIG_LIBTENSORFLOW) += dnn/dnn_backend_tf.o

View File

@ -41,6 +41,7 @@ typedef enum {
DLT_DEPTH_TO_SPACE = 2,
DLT_MIRROR_PAD = 3,
DLT_MAXIMUM = 4,
DLT_MATH_BINARY = 5,
DLT_COUNT
} DNNLayerType;

View File

@ -0,0 +1,113 @@
/*
* Copyright (c) 2020
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DNN native backend implementation.
*/
#include "dnn_backend_native.h"
#include "libavutil/avassert.h"
#include "dnn_backend_native_layer_mathbinary.h"
int dnn_load_layer_math_binary(Layer *layer, AVIOContext *model_file_context, int file_size)
{
DnnLayerMathBinaryParams *params;
int dnn_size = 0;
int input_index = 0;
params = av_malloc(sizeof(*params));
if (!params)
return 0;
params->bin_op = (int32_t)avio_rl32(model_file_context);
dnn_size += 4;
params->input0_broadcast = (int32_t)avio_rl32(model_file_context);
dnn_size += 4;
if (params->input0_broadcast) {
params->v = av_int2float(avio_rl32(model_file_context));
} else {
layer->input_operand_indexes[input_index] = (int32_t)avio_rl32(model_file_context);
input_index++;
}
dnn_size += 4;
params->input1_broadcast = (int32_t)avio_rl32(model_file_context);
dnn_size += 4;
if (params->input1_broadcast) {
params->v = av_int2float(avio_rl32(model_file_context));
} else {
layer->input_operand_indexes[input_index] = (int32_t)avio_rl32(model_file_context);
input_index++;
}
dnn_size += 4;
layer->output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 4;
layer->params = params;
return dnn_size;
}
int dnn_execute_layer_math_binary(DnnOperand *operands, const int32_t *input_operand_indexes,
int32_t output_operand_index, const void *parameters)
{
const DnnOperand *input = &operands[input_operand_indexes[0]];
DnnOperand *output = &operands[output_operand_index];
const DnnLayerMathBinaryParams *params = (const DnnLayerMathBinaryParams *)parameters;
int dims_count;
const float *src;
float *dst;
for (int i = 0; i < 4; ++i)
output->dims[i] = input->dims[i];
output->data_type = input->data_type;
output->length = calculate_operand_data_length(output);
output->data = av_realloc(output->data, output->length);
if (!output->data)
return DNN_ERROR;
dims_count = calculate_operand_dims_count(output);
src = input->data;
dst = output->data;
switch (params->bin_op) {
case DMBO_SUB:
if (params->input0_broadcast) {
for (int i = 0; i < dims_count; ++i) {
dst[i] = params->v - src[i];
}
} else if (params->input1_broadcast) {
for (int i = 0; i < dims_count; ++i) {
dst[i] = src[i] - params->v;
}
} else {
const DnnOperand *input1 = &operands[input_operand_indexes[1]];
const float *src1 = input1->data;
for (int i = 0; i < dims_count; ++i) {
dst[i] = src[i] - src1[i];
}
}
return 0;
default:
return -1;
}
}

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2020
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DNN inference functions interface for native backend.
*/
#ifndef AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_MATHBINARY_H
#define AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_MATHBINARY_H
#include "libavformat/avio.h"
#include "dnn_backend_native.h"
typedef enum {
DMBO_SUB = 0,
DMBO_COUNT
} DNNMathBinaryOperation;
typedef struct DnnLayerMathBinaryParams{
DNNMathBinaryOperation bin_op;
int input0_broadcast;
int input1_broadcast;
float v;
} DnnLayerMathBinaryParams;
int dnn_load_layer_math_binary(Layer *layer, AVIOContext *model_file_context, int file_size);
int dnn_execute_layer_math_binary(DnnOperand *operands, const int32_t *input_operand_indexes,
int32_t output_operand_index, const void *parameters);
#endif

View File

@ -24,6 +24,7 @@
#include "dnn_backend_native_layer_conv2d.h"
#include "dnn_backend_native_layer_depth2space.h"
#include "dnn_backend_native_layer_maximum.h"
#include "dnn_backend_native_layer_mathbinary.h"
LayerFunc layer_funcs[DLT_COUNT] = {
{NULL, NULL},
@ -31,4 +32,5 @@ LayerFunc layer_funcs[DLT_COUNT] = {
{dnn_execute_layer_depth2space, dnn_load_layer_depth2space},
{dnn_execute_layer_pad, dnn_load_layer_pad},
{dnn_execute_layer_maximum, dnn_load_layer_maximum},
{dnn_execute_layer_math_binary, dnn_load_layer_math_binary},
};

View File

@ -70,7 +70,8 @@ class TFConverter:
self.converted_nodes = set()
self.conv2d_scope_names = set()
self.conv2d_scopename_inputname_dict = {}
self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3, 'Maximum':4}
self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3, 'Maximum':4, 'MathBinary':5}
self.mathbin2code = {'Sub':0}
self.mirrorpad_mode = {'CONSTANT':0, 'REFLECT':1, 'SYMMETRIC':2}
self.name_operand_dict = {}
@ -113,6 +114,8 @@ class TFConverter:
# if activation is None, and BiasAdd.next is the last op which is Identity
if conv2d_scope_name + '/BiasAdd' in self.edges:
anode = self.edges[conv2d_scope_name + '/BiasAdd'][0]
if anode.op not in self.conv_activations:
anode = None
else:
anode = None
return knode, bnode, dnode, anode
@ -252,14 +255,47 @@ class TFConverter:
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_sub_to_file(self, node, f):
assert(node.op == 'Sub')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
i0_node = self.name_node_dict[node.input[0]]
i1_node = self.name_node_dict[node.input[1]]
np.array([self.op2code['MathBinary'], self.mathbin2code[node.op]], dtype=np.uint32).tofile(f)
if i0_node.op == 'Const':
scalar = i0_node.attr['value'].tensor.float_val[0]
assert(i0_node.name.find('sub/x'))
np.array([1], dtype=np.uint32).tofile(f)
np.array([scalar], dtype=np.float32).tofile(f)
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i1_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
elif i1_node.op == 'Const':
scalar = i1_node.attr['value'].tensor.float_val[0]
assert(i1_node.name.find('sub/y'))
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
np.array([1], dtype=np.uint32).tofile(f)
np.array([scalar], dtype=np.float32).tofile(f)
else:
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i1_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([output_operand_index], dtype=np.uint32).tofile(f)
def dump_layers_to_file(self, f):
for node in self.nodes:
if node.name in self.converted_nodes:
continue
# conv2d with dilation generates very complex nodes, so handle it in special
scope_name = TFConverter.get_scope_name(node.name)
if scope_name in self.conv2d_scope_names:
if self.in_conv2d_scope(node.name):
if node.op == 'Conv2D':
self.dump_complex_conv2d_to_file(node, f)
continue
@ -272,6 +308,8 @@ class TFConverter:
self.dump_mirrorpad_to_file(node, f)
elif node.op == 'Maximum':
self.dump_maximum_to_file(node, f)
elif node.op == 'Sub':
self.dump_sub_to_file(node, f)
def dump_operands_to_file(self, f):
@ -352,6 +390,17 @@ class TFConverter:
return name[0:index]
def in_conv2d_scope(self, name):
inner_scope = TFConverter.get_scope_name(name)
if inner_scope == "":
return False;
for scope in self.conv2d_scope_names:
index = inner_scope.find(scope)
if index == 0:
return True
return False
def generate_conv2d_scope_info(self):
# mostly, conv2d is a sub block in graph, get the scope name
for node in self.nodes:

View File

@ -23,4 +23,4 @@ str = 'FFMPEGDNNNATIVE'
major = 1
# increase minor when we don't have to re-convert the model file
minor = 0
minor = 1