- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
|
|
|
|
#include "Expr.hpp"
|
|
|
|
#include "ExprCreator.hpp"
|
|
|
|
#include "Optimizer.hpp"
|
|
|
|
#include <string>
|
|
|
|
#include <map>
|
|
|
|
#include <fstream>
|
|
|
|
#include <sstream>
|
|
|
|
#define MNN_OPEN_TIME_TRACE
|
|
|
|
#include "AutoTime.hpp"
|
|
|
|
using namespace MNN::Express;
|
|
|
|
#define UP_DIV(x) (((x)+3)/4)
|
|
|
|
|
|
|
|
static std::pair<VARP, VARP> _makeConvolution(int k, int ic, int oc, int size) {
|
|
|
|
auto input = _Input({1, ic, size, size}, NC4HW4);
|
|
|
|
return std::make_pair(input, _Conv(0.0f, 0.0f, input, {ic, oc}, {k, k}, SAME));
|
|
|
|
}
|
- build:
- unify schema building in core and converter;
- add more build script for android;
- add linux build script for python;
- ops impl:
- add floor mod support in binary;
- use eltwise impl in add/max/sub/mul binary for optimization;
- remove fake double support in cast;
- fix 5d support for concat;
- add adjX and adjY support for batch matmul;
- optimize conv2d back prop filter;
- add pad mode support for conv3d;
- fix bug in conv2d & conv depthwise with very small feature map;
- optimize binary without broacast;
- add data types support for gather;
- add gather ND support;
- use uint8 data type in gather v2;
- add transpose support for matmul;
- add matrix band part;
- add dim != 4 support for padding, reshape & tensor convert;
- add pad type support for pool3d;
- make ops based on TensorFlow Lite quantization optional;
- add all & any support for reduction;
- use type in parameter as output type in reduction;
- add int support for unary;
- add variable weight support for conv2d;
- fix conv2d depthwise weights initialization;
- fix type support for transpose;
- fix grad outputs count for reduce grad and reshape grad;
- fix priorbox & detection output;
- fix metal softmax error;
- python:
- add runSessionWithCallBackInfo interface;
- add max nodes limit (1400) for visualization tool;
- fix save error in python3;
- align default dim;
- convert:
- add extra design for optimization;
- add more post converting optimizers;
- add caffe v1 weights blob support;
- add cast, unary, conv transpose support for onnx model;
- optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model;
- add cos/sin/atan/tan support for unary for tensorflow model;
- add any/all support for reduction for tensorflow model;
- add elu, conv3d, pool3d support for tensorflow model;
- optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model;
- others:
- fix size computer lock;
- fix thread pool deadlock;
- add express & parameters in express;
- rewrite blitter chooser without static map;
- add tests for expr;
2019-10-29 13:37:26 +08:00
|
|
|
static std::pair<VARP, VARP> _makeGEMMByMatMul(int e, int l, int h) {
|
|
|
|
auto a = _Input({e, l});
|
|
|
|
std::vector<float> weight(l*h);
|
|
|
|
auto b = _Const(weight.data(), {l, h});
|
|
|
|
auto c = _MatMul(a, b);
|
|
|
|
return std::make_pair(a, c);
|
|
|
|
}
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
|
|
|
|
static std::pair<VARP, VARP> _makeGEMMByConvolution(int e, int l, int h) {
|
|
|
|
auto icC4 = UP_DIV(l);
|
|
|
|
auto ocC4 = UP_DIV(h);
|
|
|
|
|
|
|
|
auto input = _Input({1, icC4*4, 1, e});
|
|
|
|
return std::make_pair(input, _Conv(0.0f, 0.0f, input, {icC4*4, ocC4*4}, {1, 1}));
|
|
|
|
}
|
|
|
|
static void _testConvolution() {
|
|
|
|
std::vector<std::vector<int>> size = {
|
|
|
|
{2, 3, 16, 224},
|
|
|
|
{3, 3, 16, 224},
|
|
|
|
{5, 3, 16, 224},
|
|
|
|
{2, 16, 4, 224},
|
|
|
|
{3, 16, 4, 224},
|
|
|
|
{5, 16, 4, 224},
|
|
|
|
{2, 16, 16, 224},
|
|
|
|
{3, 16, 16, 224},
|
|
|
|
{5, 16, 16, 224},
|
|
|
|
{2, 64, 64, 112},
|
|
|
|
{3, 64, 64, 112},
|
|
|
|
{5, 64, 64, 112},
|
|
|
|
{2, 512, 512, 4},
|
|
|
|
{3, 512, 512, 4},
|
|
|
|
{5, 512, 512, 4},
|
|
|
|
{2, 512, 512, 16},
|
|
|
|
{3, 512, 512, 16},
|
|
|
|
{5, 512, 512, 16},
|
|
|
|
{2, 512, 512, 32},
|
|
|
|
{3, 512, 512, 32},
|
|
|
|
{5, 512, 512, 32},
|
|
|
|
};
|
|
|
|
|
|
|
|
auto conv = _makeGEMMByConvolution(1024, 1024, 1024);
|
|
|
|
for (int v=0; v<10; ++v) {
|
|
|
|
conv.first->writeMap<float>();
|
|
|
|
conv.first->unMap();
|
|
|
|
conv.second->readMap<float>();
|
|
|
|
conv.second->unMap();
|
|
|
|
}
|
|
|
|
for (int i=0; i<size.size(); ++i) {
|
|
|
|
conv = _makeConvolution(size[i][0], size[i][1], size[i][2], size[i][3]);
|
|
|
|
MNN_PRINT("%d, %d, %d, %d: ", size[i][0], size[i][1], size[i][2], size[i][3]);
|
|
|
|
AUTOTIME;
|
|
|
|
for (int v=0; v<10; ++v) {
|
|
|
|
conv.first->writeMap<float>();
|
|
|
|
conv.first->unMap();
|
|
|
|
conv.second->readMap<float>();
|
|
|
|
conv.second->unMap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void _testGEMM() {
|
|
|
|
std::vector<std::vector<int>> size = {
|
|
|
|
{64, 64, 64},
|
|
|
|
{64, 64, 128},
|
|
|
|
{128, 128, 128},
|
|
|
|
{128, 128, 256},
|
|
|
|
{256, 256, 256},
|
|
|
|
{256, 256, 512},
|
|
|
|
{512, 512, 512},
|
|
|
|
{512, 512, 1024},
|
|
|
|
{1024, 1024, 1024},
|
|
|
|
};
|
|
|
|
for (int i=0; i<size.size(); ++i) {
|
|
|
|
auto x = size[i][0];
|
|
|
|
auto y = size[i][1];
|
|
|
|
auto z = size[i][2];
|
|
|
|
auto flops = (float)x * (float)y * (float)z / 1024.0f / 1024.0f;
|
|
|
|
FUNC_PRINT_ALL(flops, f);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto conv = _makeGEMMByConvolution(1024, 1024, 1024);
|
|
|
|
for (int v=0; v<10; ++v) {
|
|
|
|
conv.first->writeMap<float>();
|
|
|
|
conv.first->unMap();
|
|
|
|
conv.second->readMap<float>();
|
|
|
|
conv.second->unMap();
|
|
|
|
}
|
|
|
|
for (int i=0; i<size.size(); ++i) {
|
|
|
|
conv = _makeGEMMByConvolution(size[i][0], size[i][1], size[i][2]);
|
|
|
|
AUTOTIME;
|
|
|
|
for (int v=0; v<10; ++v) {
|
|
|
|
conv.first->writeMap<float>();
|
|
|
|
conv.first->unMap();
|
|
|
|
conv.second->readMap<float>();
|
|
|
|
conv.second->unMap();
|
|
|
|
}
|
|
|
|
}
|
- build:
- unify schema building in core and converter;
- add more build script for android;
- add linux build script for python;
- ops impl:
- add floor mod support in binary;
- use eltwise impl in add/max/sub/mul binary for optimization;
- remove fake double support in cast;
- fix 5d support for concat;
- add adjX and adjY support for batch matmul;
- optimize conv2d back prop filter;
- add pad mode support for conv3d;
- fix bug in conv2d & conv depthwise with very small feature map;
- optimize binary without broacast;
- add data types support for gather;
- add gather ND support;
- use uint8 data type in gather v2;
- add transpose support for matmul;
- add matrix band part;
- add dim != 4 support for padding, reshape & tensor convert;
- add pad type support for pool3d;
- make ops based on TensorFlow Lite quantization optional;
- add all & any support for reduction;
- use type in parameter as output type in reduction;
- add int support for unary;
- add variable weight support for conv2d;
- fix conv2d depthwise weights initialization;
- fix type support for transpose;
- fix grad outputs count for reduce grad and reshape grad;
- fix priorbox & detection output;
- fix metal softmax error;
- python:
- add runSessionWithCallBackInfo interface;
- add max nodes limit (1400) for visualization tool;
- fix save error in python3;
- align default dim;
- convert:
- add extra design for optimization;
- add more post converting optimizers;
- add caffe v1 weights blob support;
- add cast, unary, conv transpose support for onnx model;
- optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model;
- add cos/sin/atan/tan support for unary for tensorflow model;
- add any/all support for reduction for tensorflow model;
- add elu, conv3d, pool3d support for tensorflow model;
- optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model;
- others:
- fix size computer lock;
- fix thread pool deadlock;
- add express & parameters in express;
- rewrite blitter chooser without static map;
- add tests for expr;
2019-10-29 13:37:26 +08:00
|
|
|
for (int i=0; i<size.size(); ++i) {
|
|
|
|
conv = _makeGEMMByMatMul(size[i][0], size[i][1], size[i][2]);
|
|
|
|
AUTOTIME;
|
|
|
|
for (int v=0; v<10; ++v) {
|
|
|
|
conv.first->writeMap<float>();
|
|
|
|
conv.first->unMap();
|
|
|
|
conv.second->readMap<float>();
|
|
|
|
conv.second->unMap();
|
|
|
|
}
|
|
|
|
}
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int main(int argc, const char* argv[]) {
|
|
|
|
if (argc < 2) {
|
|
|
|
MNN_ERROR("./expressDemo.out model_path type testTime\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
auto modelFileName = argv[1];
|
|
|
|
FUNC_PRINT_ALL(modelFileName, s);
|
|
|
|
auto device = Optimizer::CPU;
|
|
|
|
if (argc >= 3) {
|
|
|
|
device = (Optimizer::Device)atoi(argv[2]);
|
|
|
|
}
|
- build:
- unify schema building in core and converter;
- add more build script for android;
- add linux build script for python;
- ops impl:
- add floor mod support in binary;
- use eltwise impl in add/max/sub/mul binary for optimization;
- remove fake double support in cast;
- fix 5d support for concat;
- add adjX and adjY support for batch matmul;
- optimize conv2d back prop filter;
- add pad mode support for conv3d;
- fix bug in conv2d & conv depthwise with very small feature map;
- optimize binary without broacast;
- add data types support for gather;
- add gather ND support;
- use uint8 data type in gather v2;
- add transpose support for matmul;
- add matrix band part;
- add dim != 4 support for padding, reshape & tensor convert;
- add pad type support for pool3d;
- make ops based on TensorFlow Lite quantization optional;
- add all & any support for reduction;
- use type in parameter as output type in reduction;
- add int support for unary;
- add variable weight support for conv2d;
- fix conv2d depthwise weights initialization;
- fix type support for transpose;
- fix grad outputs count for reduce grad and reshape grad;
- fix priorbox & detection output;
- fix metal softmax error;
- python:
- add runSessionWithCallBackInfo interface;
- add max nodes limit (1400) for visualization tool;
- fix save error in python3;
- align default dim;
- convert:
- add extra design for optimization;
- add more post converting optimizers;
- add caffe v1 weights blob support;
- add cast, unary, conv transpose support for onnx model;
- optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model;
- add cos/sin/atan/tan support for unary for tensorflow model;
- add any/all support for reduction for tensorflow model;
- add elu, conv3d, pool3d support for tensorflow model;
- optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model;
- others:
- fix size computer lock;
- fix thread pool deadlock;
- add express & parameters in express;
- rewrite blitter chooser without static map;
- add tests for expr;
2019-10-29 13:37:26 +08:00
|
|
|
auto model = Variable::loadMap(modelFileName);
|
|
|
|
auto inputOutput = Variable::getInputAndOutput(model);
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
auto optimizer = Optimizer::create(device);
|
- build:
- unify schema building in core and converter;
- add more build script for android;
- add linux build script for python;
- ops impl:
- add floor mod support in binary;
- use eltwise impl in add/max/sub/mul binary for optimization;
- remove fake double support in cast;
- fix 5d support for concat;
- add adjX and adjY support for batch matmul;
- optimize conv2d back prop filter;
- add pad mode support for conv3d;
- fix bug in conv2d & conv depthwise with very small feature map;
- optimize binary without broacast;
- add data types support for gather;
- add gather ND support;
- use uint8 data type in gather v2;
- add transpose support for matmul;
- add matrix band part;
- add dim != 4 support for padding, reshape & tensor convert;
- add pad type support for pool3d;
- make ops based on TensorFlow Lite quantization optional;
- add all & any support for reduction;
- use type in parameter as output type in reduction;
- add int support for unary;
- add variable weight support for conv2d;
- fix conv2d depthwise weights initialization;
- fix type support for transpose;
- fix grad outputs count for reduce grad and reshape grad;
- fix priorbox & detection output;
- fix metal softmax error;
- python:
- add runSessionWithCallBackInfo interface;
- add max nodes limit (1400) for visualization tool;
- fix save error in python3;
- align default dim;
- convert:
- add extra design for optimization;
- add more post converting optimizers;
- add caffe v1 weights blob support;
- add cast, unary, conv transpose support for onnx model;
- optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model;
- add cos/sin/atan/tan support for unary for tensorflow model;
- add any/all support for reduction for tensorflow model;
- add elu, conv3d, pool3d support for tensorflow model;
- optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model;
- others:
- fix size computer lock;
- fix thread pool deadlock;
- add express & parameters in express;
- rewrite blitter chooser without static map;
- add tests for expr;
2019-10-29 13:37:26 +08:00
|
|
|
auto inputs = inputOutput.first;
|
|
|
|
auto outputs = inputOutput.second;
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
if (nullptr == optimizer) {
|
|
|
|
MNN_ERROR("Can't find optimizer for %d\n", device);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
int testTime = 10;
|
|
|
|
if (argc >= 4) {
|
|
|
|
testTime = atoi(argv[3]);
|
|
|
|
}
|
- build:
- unify schema building in core and converter;
- add more build script for android;
- add linux build script for python;
- ops impl:
- add floor mod support in binary;
- use eltwise impl in add/max/sub/mul binary for optimization;
- remove fake double support in cast;
- fix 5d support for concat;
- add adjX and adjY support for batch matmul;
- optimize conv2d back prop filter;
- add pad mode support for conv3d;
- fix bug in conv2d & conv depthwise with very small feature map;
- optimize binary without broacast;
- add data types support for gather;
- add gather ND support;
- use uint8 data type in gather v2;
- add transpose support for matmul;
- add matrix band part;
- add dim != 4 support for padding, reshape & tensor convert;
- add pad type support for pool3d;
- make ops based on TensorFlow Lite quantization optional;
- add all & any support for reduction;
- use type in parameter as output type in reduction;
- add int support for unary;
- add variable weight support for conv2d;
- fix conv2d depthwise weights initialization;
- fix type support for transpose;
- fix grad outputs count for reduce grad and reshape grad;
- fix priorbox & detection output;
- fix metal softmax error;
- python:
- add runSessionWithCallBackInfo interface;
- add max nodes limit (1400) for visualization tool;
- fix save error in python3;
- align default dim;
- convert:
- add extra design for optimization;
- add more post converting optimizers;
- add caffe v1 weights blob support;
- add cast, unary, conv transpose support for onnx model;
- optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model;
- add cos/sin/atan/tan support for unary for tensorflow model;
- add any/all support for reduction for tensorflow model;
- add elu, conv3d, pool3d support for tensorflow model;
- optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model;
- others:
- fix size computer lock;
- fix thread pool deadlock;
- add express & parameters in express;
- rewrite blitter chooser without static map;
- add tests for expr;
2019-10-29 13:37:26 +08:00
|
|
|
optimizer->onExecute(Variable::mapToSequence(outputs));
|
|
|
|
Variable::save(Variable::mapToSequence(outputs), "temp.mnn");
|
|
|
|
auto input = inputs.begin()->second;
|
|
|
|
auto output = outputs.begin()->second;
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
//input->resize({1, 224, 224, 3});
|
|
|
|
auto inputInfo = input->getInfo();
|
|
|
|
if (nullptr == inputInfo) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
{
|
|
|
|
AUTOTIME;
|
|
|
|
input = _ChangeInputFormat(input, NCHW);
|
|
|
|
inputInfo = input->getInfo();
|
|
|
|
if (output->getInfo()->order == NC4HW4) {
|
|
|
|
output = _Convert(output, NCHW);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
auto outputInfo = output->getInfo();
|
|
|
|
if (nullptr == outputInfo) {
|
|
|
|
MNN_ERROR("Output Not valid\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
auto size = outputInfo->size;
|
|
|
|
//Test Speed
|
|
|
|
if (testTime > 0){
|
|
|
|
//Let the frequence up
|
|
|
|
for (int i=0; i<3; ++i) {
|
|
|
|
input->writeMap<float>();
|
|
|
|
input->unMap();
|
|
|
|
output->readMap<float>();
|
|
|
|
output->unMap();
|
|
|
|
}
|
|
|
|
AUTOTIME;
|
|
|
|
for (int i=0; i<testTime; ++i) {
|
|
|
|
input->writeMap<float>();
|
|
|
|
input->unMap();
|
|
|
|
output->readMap<float>();
|
|
|
|
output->unMap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
{
|
|
|
|
auto size = inputInfo->size;
|
|
|
|
auto inputPtr = input->writeMap<float>();
|
|
|
|
std::ifstream inputOs("input_0.txt");
|
|
|
|
for (int i=0; i<size; ++i) {
|
|
|
|
inputOs >> inputPtr[i];
|
|
|
|
}
|
|
|
|
input->unMap();
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
auto outputPtr = output->readMap<float>();
|
|
|
|
if (nullptr == outputPtr) {
|
|
|
|
MNN_ERROR("Output Not valid read error\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
std::ofstream outputOs("output.txt");
|
|
|
|
for (int i=0; i<size; ++i) {
|
|
|
|
outputOs << outputPtr[i] << "\n";
|
|
|
|
}
|
|
|
|
output->unMap();
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|