dnn/native: add native support for avg_pool

Not support pooling strides in channel dimension yet.

Signed-off-by: Ting Fu <ting.fu@intel.com>
Reviewed-by: Guo, Yejun <yejun.guo@intel.com>
This commit is contained in:
Ting Fu 2020-08-10 00:33:13 +08:00 committed by Guo, Yejun
parent 40597add98
commit 91efc41a69
7 changed files with 223 additions and 3 deletions

View File

@ -1,6 +1,7 @@
OBJS-$(CONFIG_DNN) += dnn/dnn_interface.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layers.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_avgpool.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_pad.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_conv2d.o
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_depth2space.o

View File

@ -43,10 +43,12 @@ typedef enum {
DLT_MAXIMUM = 4,
DLT_MATH_BINARY = 5,
DLT_MATH_UNARY = 6,
DLT_AVG_POOL = 7,
DLT_COUNT
} DNNLayerType;
typedef enum {DOT_INPUT = 1, DOT_OUTPUT = 2, DOT_INTERMEDIATE = DOT_INPUT | DOT_OUTPUT} DNNOperandType;
typedef enum {VALID, SAME, SAME_CLAMP_TO_EDGE} DNNPaddingParam;
typedef struct Layer{
DNNLayerType type;

View File

@ -0,0 +1,141 @@
/*
* Copyright (c) 2020
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DNN native backend implementation.
*/
#include "libavutil/avassert.h"
#include "dnn_backend_native_layer_avgpool.h"
int dnn_load_layer_avg_pool(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num)
{
AvgPoolParams *avgpool_params;
int dnn_size = 0;
avgpool_params = av_malloc(sizeof(*avgpool_params));
if(!avgpool_params)
return 0;
avgpool_params->strides = (int32_t)avio_rl32(model_file_context);
avgpool_params->padding_method = (int32_t)avio_rl32(model_file_context);
avgpool_params->kernel_size = (int32_t)avio_rl32(model_file_context);
dnn_size += 12;
if (dnn_size > file_size || avgpool_params->kernel_size <= 0 || avgpool_params->strides <=0){
av_freep(&avgpool_params);
return 0;
}
layer->params = avgpool_params;
layer->input_operand_indexes[0] = (int32_t)avio_rl32(model_file_context);
layer->output_operand_index = (int32_t)avio_rl32(model_file_context);
dnn_size += 8;
if (layer->input_operand_indexes[0] >= operands_num || layer->output_operand_index >= operands_num) {
return 0;
}
return dnn_size;
}
int dnn_execute_layer_avg_pool(DnnOperand *operands, const int32_t *input_operand_indexes,
int32_t output_operand_index, const void *parameters)
{
float *output;
int height_end, width_end, height_radius, width_radius, output_height, output_width, kernel_area;
int32_t input_operand_index = input_operand_indexes[0];
int number = operands[input_operand_index].dims[0];
int height = operands[input_operand_index].dims[1];
int width = operands[input_operand_index].dims[2];
int channel = operands[input_operand_index].dims[3];
const float *input = operands[input_operand_index].data;
const AvgPoolParams *avgpool_params = (const AvgPoolParams *)parameters;
int kernel_strides = avgpool_params->strides;
int src_linesize = width * channel;
DnnOperand *output_operand = &operands[output_operand_index];
/**
* When padding_method = SAME, the tensorflow will only padding the hald number of 0 pxiels
* except the remainders.
* Eg: assuming the input height = 1080, the strides = 11, so the remainders = 1080 % 11 = 2
* and if ksize = 5: it will fill (5 - 2) >> 1 = 1 line before the first line of input image,
* and 5 - 2 - 1 = 2 lines after the last line of input image.
* and if ksize = 7: it will fill (7 - 2) >> 1 = 2 lines before the first line of input image,
* and 7 - 2 - 2 = 3 lines after the last line of input image.
*/
if (avgpool_params->padding_method == SAME) {
height_end = height;
width_end = width;
height_radius = avgpool_params->kernel_size - ((height - 1) % kernel_strides + 1);
width_radius = avgpool_params->kernel_size - ((width - 1) % kernel_strides + 1);
height_radius = height_radius < 0 ? 0 : height_radius >> 1;
width_radius = width_radius < 0 ? 0 : width_radius >> 1;
output_height = ceil(height / (kernel_strides * 1.0));
output_width = ceil(width / (kernel_strides * 1.0));
} else {
assert(avgpool_params->padding_method = VALID);
height_end = height - avgpool_params->kernel_size + 1;
width_end = width - avgpool_params->kernel_size + 1;
height_radius = 0;
width_radius = 0;
output_height = ceil((height - avgpool_params->kernel_size + 1) / (kernel_strides * 1.0));
output_width = ceil((width - avgpool_params->kernel_size + 1) / (kernel_strides * 1.0));
}
output_operand->dims[0] = number;
output_operand->dims[1] = output_height;
output_operand->dims[2] = output_width;
// not support pooling in channel dimension now
output_operand->dims[3] = channel;
output_operand->data_type = operands[input_operand_index].data_type;
output_operand->length = calculate_operand_data_length(output_operand);
output_operand->data = av_realloc(output_operand->data, output_operand->length);
if (!output_operand->data)
return -1;
output = output_operand->data;
for (int y = 0; y < height_end; y += kernel_strides) {
for (int x = 0; x < width_end; x += kernel_strides) {
for (int n_channel = 0; n_channel < channel; ++n_channel) {
output[n_channel] = 0.0;
kernel_area = 0;
for (int kernel_y = 0; kernel_y < avgpool_params->kernel_size; ++kernel_y) {
for (int kernel_x = 0; kernel_x < avgpool_params->kernel_size; ++kernel_x) {
float input_pel;
int y_pos = y + (kernel_y - height_radius);
int x_pos = x + (kernel_x - width_radius);
if (x_pos < 0 || x_pos >= width || y_pos < 0 || y_pos >= height) {
input_pel = 0.0;
} else {
kernel_area++;
input_pel = input[y_pos * src_linesize + x_pos * channel + n_channel];
}
output[n_channel] += input_pel;
}
}
output[n_channel] /= kernel_area;
}
output += channel;
}
}
return 0;
}

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2020
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DNN inference functions interface for native backend.
*/
#ifndef AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_AVGPOOL_H
#define AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_AVGPOOL_H
#include "dnn_backend_native.h"
typedef struct AvgPoolParams{
int32_t strides, kernel_size;
DNNPaddingParam padding_method;
} AvgPoolParams;
int dnn_load_layer_avg_pool(Layer *layer, AVIOContext *model_file_context, int file_size, int operands_num);
int dnn_execute_layer_avg_pool(DnnOperand *operands, const int32_t *input_operand_indexes,
int32_t output_operand_index, const void *parameters);
#endif

View File

@ -24,12 +24,11 @@
#include "dnn_backend_native.h"
typedef enum {RELU, TANH, SIGMOID, NONE, LEAKY_RELU} DNNActivationFunc;
typedef enum {VALID, SAME, SAME_CLAMP_TO_EDGE} DNNConvPaddingParam;
typedef struct ConvolutionalParams{
int32_t input_num, output_num, kernel_size;
DNNActivationFunc activation;
DNNConvPaddingParam padding_method;
DNNPaddingParam padding_method;
int32_t dilation;
int32_t has_bias;
float *kernel;

View File

@ -26,6 +26,7 @@
#include "dnn_backend_native_layer_maximum.h"
#include "dnn_backend_native_layer_mathbinary.h"
#include "dnn_backend_native_layer_mathunary.h"
#include "dnn_backend_native_layer_avgpool.h"
LayerFunc layer_funcs[DLT_COUNT] = {
{NULL, NULL},
@ -35,4 +36,5 @@ LayerFunc layer_funcs[DLT_COUNT] = {
{dnn_execute_layer_maximum, dnn_load_layer_maximum},
{dnn_execute_layer_math_binary, dnn_load_layer_math_binary},
{dnn_execute_layer_math_unary, dnn_load_layer_math_unary},
{dnn_execute_layer_avg_pool, dnn_load_layer_avg_pool},
};

View File

@ -67,10 +67,12 @@ class TFConverter:
self.edges = {}
self.conv_activations = {'Relu':0, 'Tanh':1, 'Sigmoid':2, 'None':3, 'LeakyRelu':4}
self.conv_paddings = {'VALID':0, 'SAME':1}
self.pool_paddings = {'VALID':0, 'SAME':1}
self.converted_nodes = set()
self.conv2d_scope_names = set()
self.conv2d_scopename_inputname_dict = {}
self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3, 'Maximum':4, 'MathBinary':5, 'MathUnary':6}
self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3, 'Maximum':4,
'MathBinary':5, 'MathUnary':6, 'AvgPool':7}
self.mathbin2code = {'Sub':0, 'Add':1, 'Mul':2, 'RealDiv':3, 'Minimum':4}
self.mathun2code = {'Abs':0, 'Sin':1, 'Cos':2, 'Tan':3, 'Asin':4,
'Acos':5, 'Atan':6, 'Sinh':7, 'Cosh':8, 'Tanh':9, 'Asinh':10,
@ -300,6 +302,37 @@ class TFConverter:
np.array([output_operand_index],dtype=np.uint32).tofile(f)
def dump_avg_pool_to_file(self, node, f):
assert(node.op == 'AvgPool')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
node0 = self.name_node_dict[node.input[0]]
strides = node.attr['strides']
# Tensorflow do not support pooling strides in batch dimension and
# current native NN do not support pooling strides in channel dimension, added assert() here.
assert(strides.list.i[1]==strides.list.i[2])
assert(strides.list.i[0]==1)
assert(strides.list.i[3]==1)
strides = strides.list.i[1]
filter_node = node.attr['ksize']
input_name = node.input[0]
# Tensorflow do not support pooling ksize in batch dimension and channel dimension.
assert(filter_node.list.i[0]==1)
assert(filter_node.list.i[3]==1)
filter_height = filter_node.list.i[1]
filter_width = filter_node.list.i[2]
padding = node.attr['padding'].s.decode("utf-8")
np.array([self.op2code[node.op], strides, self.pool_paddings[padding], filter_height],
dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index],dtype=np.uint32).tofile(f)
def dump_layers_to_file(self, f):
for node in self.nodes:
if node.name in self.converted_nodes:
@ -313,6 +346,8 @@ class TFConverter:
if node.op == 'Conv2D':
self.dump_simple_conv2d_to_file(node, f)
if node.op == 'AvgPool':
self.dump_avg_pool_to_file(node, f)
elif node.op == 'DepthToSpace':
self.dump_depth2space_to_file(node, f)
elif node.op == 'MirrorPad':