MNN/express/Utils.cpp

136 lines
4.3 KiB
C++
Raw Normal View History

//
// Utils.cpp
// MNN
//
// Created by MNN on 2019/07/26.
// Copyright © 2018, Alibaba Group Holding Limited
//
#include "Utils.hpp"
#include <map>
#include "MNN_generated.h"
2019-12-27 22:16:57 +08:00
#include "core/TensorUtils.hpp"
2020-11-05 16:41:56 +08:00
#include "core/MNNMemoryUtils.h"
#include "core/Backend.hpp"
#include "core/Execution.hpp"
2021-01-06 16:29:37 +08:00
#include "core/ConvolutionCommon.hpp"
namespace MNN {
namespace Express {
2020-11-05 16:41:56 +08:00
Expr::Inside::Inside(int outputSize) {
mOutputInfos.resize(outputSize);
mOutputTensors.resize(outputSize);
for (int i=0; i<outputSize; ++i) {
mOutputTensors[i] = new Tensor;
TensorUtils::getDescribe(mOutputTensors[i])->memoryType = Tensor::InsideDescribe::MEMORY_HOST;
}
}
2021-01-06 16:29:37 +08:00
Expr::Inside::Inside(Tensor* tensor) {
mOutputInfos.resize(1);
mOutputTensors.resize(1);
mOutputTensors[0] = tensor;
Utils::copyTensorToInfo(&mOutputInfos[0], tensor);
mOutputInfos[0].syncSize();
mOutputInfos[0].tensorArrayAttr = TensorUtils::getDescribe(tensor)->tensorArrayAttr;
mOwnTensor = false;
}
2020-11-05 16:41:56 +08:00
Expr::Inside::~Inside() {
2021-01-06 16:29:37 +08:00
if (mOwnTensor) {
for (auto t : mOutputTensors) {
delete t;
}
}
if (nullptr != mHostTensor) {
delete mHostTensor;
2020-11-05 16:41:56 +08:00
}
}
2019-12-27 22:16:57 +08:00
#define CONVERT(src, dst, f)\
if (f == src) return dst;
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
int Utils::convertFormat(Dimensionformat format) {
2019-12-27 22:16:57 +08:00
CONVERT(NCHW, MNN_DATA_FORMAT_NCHW, format);
CONVERT(NHWC, MNN_DATA_FORMAT_NHWC, format);
CONVERT(NC4HW4, MNN_DATA_FORMAT_NC4HW4, format);
return MNN_DATA_FORMAT_UNKNOWN;
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
}
DataType Utils::convertDataType(halide_type_t type) {
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
if (type.code == halide_type_float) {
return DataType_DT_FLOAT;
}
if (type.code == halide_type_uint && type.bits == 8) {
return DataType_DT_UINT8;
}
if (type.code == halide_type_int && type.bits == 8) {
return DataType_DT_INT8;
}
if (type.code == halide_type_int && type.bits == 32) {
return DataType_DT_INT32;
}
return DataType_DT_INVALID;
}
halide_type_t Utils::revertDataType(DataType dataType) {
2019-12-27 22:16:57 +08:00
CONVERT(DataType_DT_FLOAT, halide_type_of<float>(), dataType);
CONVERT(DataType_DT_INT32, halide_type_of<int32_t>(), dataType);
CONVERT(DataType_DT_INT64, halide_type_of<int32_t>(), dataType);
CONVERT(DataType_DT_UINT8, halide_type_of<uint8_t>(), dataType);
CONVERT(DataType_DT_INT8, halide_type_of<int8_t>(), dataType);
return halide_type_of<float>();
}
2019-12-27 22:16:57 +08:00
Express::Dimensionformat Utils::revertFormat(int format) {
CONVERT(MNN_DATA_FORMAT_NCHW, Express::NCHW, format);
CONVERT(MNN_DATA_FORMAT_NHWC, Express::NHWC, format);
CONVERT(MNN_DATA_FORMAT_NC4HW4, Express::NC4HW4, format);
return NCHW;
}
void Utils::copyInfoToTensor(Tensor* dest, const Variable::Info* source) {
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
if (nullptr == source) {
dest->buffer().dimensions = 0;
return;
}
for (int i = 0; i < source->dim.size(); ++i) {
dest->setLength(i, source->dim[i]);
}
dest->buffer().dimensions = (int)source->dim.size();
dest->buffer().type = source->type;
2019-12-27 22:16:57 +08:00
TensorUtils::getDescribe(dest)->dimensionFormat = (MNN_DATA_FORMAT)Utils::convertFormat(source->order);
TensorUtils::setLinearLayout(dest);
}
void Utils::copyTensorToInfo(Variable::Info* shape, const Tensor* tensor) {
shape->type = tensor->getType();
shape->dim = tensor->shape();
shape->size = tensor->elementSize();
2019-12-27 22:16:57 +08:00
shape->order = Utils::revertFormat(TensorUtils::getDescribe(tensor)->dimensionFormat);
2020-11-05 16:41:56 +08:00
}
bool Utils::allocMemoryForHostTensor(Tensor* dest) {
if (nullptr != dest->buffer().host) {
return true;
}
if (TensorUtils::getDescribe(dest)->memoryType != Tensor::InsideDescribe::MEMORY_HOST) {
return false;
}
auto size = dest->size();
if (0 >= size) {
return false;
}
dest->buffer().host = (uint8_t*)MNNMemoryAllocAlign(size, MNN_MEMORY_ALIGN_DEFAULT);
return dest->buffer().host != nullptr;
}
bool Utils::releaseMemoryForHostTensor(Tensor* dest) {
if (nullptr == dest->buffer().host) {
return true;
}
if (TensorUtils::getDescribe(dest)->memoryType != Tensor::InsideDescribe::MEMORY_HOST) {
return false;
}
MNNMemoryFreeAlign(dest->buffer().host);
dest->buffer().host = nullptr;
return true;
}
2019-12-27 22:16:57 +08:00
} // namespace Express
} // namespace MNN