MNN/demo/exec/expressDemo.cpp

214 lines
6.2 KiB
C++
Raw Normal View History

2019-12-27 22:16:57 +08:00
#include <MNN/expr/Expr.hpp>
#include <MNN/expr/ExprCreator.hpp>
#include <MNN/expr/Executor.hpp>
#include <string>
#include <map>
#include <fstream>
#include <sstream>
#define MNN_OPEN_TIME_TRACE
2019-12-27 22:16:57 +08:00
#include <MNN/AutoTime.hpp>
using namespace MNN::Express;
#define UP_DIV(x) (((x)+3)/4)
static std::pair<VARP, VARP> _makeConvolution(int k, int ic, int oc, int size) {
auto input = _Input({1, ic, size, size}, NC4HW4);
return std::make_pair(input, _Conv(0.0f, 0.0f, input, {ic, oc}, {k, k}, SAME));
}
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
static std::pair<VARP, VARP> _makeGEMMByMatMul(int e, int l, int h) {
auto a = _Input({e, l});
std::vector<float> weight(l*h);
auto b = _Const(weight.data(), {l, h});
auto c = _MatMul(a, b);
return std::make_pair(a, c);
}
static std::pair<VARP, VARP> _makeGEMMByConvolution(int e, int l, int h) {
auto icC4 = UP_DIV(l);
auto ocC4 = UP_DIV(h);
2019-12-27 22:16:57 +08:00
auto input = _Input({1, icC4*4, 1, e});
return std::make_pair(input, _Conv(0.0f, 0.0f, input, {icC4*4, ocC4*4}, {1, 1}));
}
static void _testConvolution() {
std::vector<std::vector<int>> size = {
{2, 3, 16, 224},
{3, 3, 16, 224},
{5, 3, 16, 224},
{2, 16, 4, 224},
{3, 16, 4, 224},
{5, 16, 4, 224},
{2, 16, 16, 224},
{3, 16, 16, 224},
{5, 16, 16, 224},
{2, 64, 64, 112},
{3, 64, 64, 112},
{5, 64, 64, 112},
{2, 512, 512, 4},
{3, 512, 512, 4},
{5, 512, 512, 4},
{2, 512, 512, 16},
{3, 512, 512, 16},
{5, 512, 512, 16},
{2, 512, 512, 32},
{3, 512, 512, 32},
{5, 512, 512, 32},
};
auto conv = _makeGEMMByConvolution(1024, 1024, 1024);
for (int v=0; v<10; ++v) {
conv.first->writeMap<float>();
conv.first->unMap();
conv.second->readMap<float>();
conv.second->unMap();
}
for (int i=0; i<size.size(); ++i) {
conv = _makeConvolution(size[i][0], size[i][1], size[i][2], size[i][3]);
MNN_PRINT("%d, %d, %d, %d: ", size[i][0], size[i][1], size[i][2], size[i][3]);
AUTOTIME;
for (int v=0; v<10; ++v) {
conv.first->writeMap<float>();
conv.first->unMap();
conv.second->readMap<float>();
conv.second->unMap();
}
}
}
static void _testGEMM() {
std::vector<std::vector<int>> size = {
{64, 64, 64},
{64, 64, 128},
{128, 128, 128},
{128, 128, 256},
{256, 256, 256},
{256, 256, 512},
{512, 512, 512},
{512, 512, 1024},
{1024, 1024, 1024},
};
for (int i=0; i<size.size(); ++i) {
auto x = size[i][0];
auto y = size[i][1];
auto z = size[i][2];
auto flops = (float)x * (float)y * (float)z / 1024.0f / 1024.0f;
FUNC_PRINT_ALL(flops, f);
}
2019-12-27 22:16:57 +08:00
auto conv = _makeGEMMByConvolution(1024, 1024, 1024);
for (int v=0; v<10; ++v) {
conv.first->writeMap<float>();
conv.first->unMap();
conv.second->readMap<float>();
conv.second->unMap();
}
for (int i=0; i<size.size(); ++i) {
conv = _makeGEMMByConvolution(size[i][0], size[i][1], size[i][2]);
AUTOTIME;
for (int v=0; v<10; ++v) {
conv.first->writeMap<float>();
conv.first->unMap();
conv.second->readMap<float>();
conv.second->unMap();
}
}
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
for (int i=0; i<size.size(); ++i) {
conv = _makeGEMMByMatMul(size[i][0], size[i][1], size[i][2]);
AUTOTIME;
for (int v=0; v<10; ++v) {
conv.first->writeMap<float>();
conv.first->unMap();
conv.second->readMap<float>();
conv.second->unMap();
}
}
}
int main(int argc, const char* argv[]) {
if (argc < 2) {
MNN_ERROR("./expressDemo.out model_path type testTime\n");
return 0;
}
auto modelFileName = argv[1];
FUNC_PRINT_ALL(modelFileName, s);
auto exe = Executor::getGlobalExecutor();
MNN::BackendConfig config;
config.precision = MNN::BackendConfig::Precision_Low;
MNNForwardType forwardType = MNN_FORWARD_CPU;
if (argc >= 3) {
forwardType = (MNNForwardType)atoi(argv[2]);
}
exe->setGlobalExecutorConfig(forwardType, config, 4);
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
auto model = Variable::loadMap(modelFileName);
auto inputOutput = Variable::getInputAndOutput(model);
auto inputs = inputOutput.first;
auto outputs = inputOutput.second;
int testTime = 10;
if (argc >= 4) {
testTime = atoi(argv[3]);
}
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
Variable::save(Variable::mapToSequence(outputs), "temp.mnn");
auto input = inputs.begin()->second;
auto output = outputs.begin()->second;
//input->resize({1, 224, 224, 3});
auto inputInfo = input->getInfo();
if (nullptr == inputInfo) {
return 0;
}
{
AUTOTIME;
input = _ChangeInputFormat(input, NCHW);
inputInfo = input->getInfo();
if (output->getInfo()->order == NC4HW4) {
output = _Convert(output, NCHW);
}
}
auto outputInfo = output->getInfo();
if (nullptr == outputInfo) {
MNN_ERROR("Output Not valid\n");
return 0;
}
auto size = outputInfo->size;
exe->gc(Executor::FULL);
//Test Speed
if (testTime > 0){
//Let the frequence up
for (int i=0; i<3; ++i) {
input->writeMap<float>();
input->unMap();
output->readMap<float>();
output->unMap();
}
AUTOTIME;
for (int i=0; i<testTime; ++i) {
input->writeMap<float>();
input->unMap();
output->readMap<float>();
output->unMap();
}
}
{
auto size = inputInfo->size;
auto inputPtr = input->writeMap<float>();
std::ifstream inputOs("input_0.txt");
for (int i=0; i<size; ++i) {
inputOs >> inputPtr[i];
}
input->unMap();
}
2019-12-27 22:16:57 +08:00
{
auto outputPtr = output->readMap<float>();
if (nullptr == outputPtr) {
MNN_ERROR("Output Not valid read error\n");
return 0;
}
std::ofstream outputOs("output.txt");
for (int i=0; i<size; ++i) {
outputOs << outputPtr[i] << "\n";
}
output->unMap();
}
return 0;
}