2019-04-17 10:49:11 +08:00
|
|
|
//
|
|
|
|
// timeProfile.cpp
|
|
|
|
// MNN
|
|
|
|
//
|
|
|
|
// Created by MNN on 2019/01/22.
|
|
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
|
|
//
|
|
|
|
|
|
|
|
#define MNN_OPEN_TIME_TRACE
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <cstring>
|
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
2019-12-27 22:16:57 +08:00
|
|
|
#include <MNN/AutoTime.hpp>
|
|
|
|
#include <MNN/Interpreter.hpp>
|
|
|
|
#include <MNN/MNNDefine.h>
|
|
|
|
#include "core/Macro.h"
|
2019-04-17 10:49:11 +08:00
|
|
|
#include "Profiler.hpp"
|
2019-12-27 22:16:57 +08:00
|
|
|
#include <MNN/Tensor.hpp>
|
2019-04-17 10:49:11 +08:00
|
|
|
#include "revertMNNModel.hpp"
|
|
|
|
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
#define MNN_PRINT_TIME_BY_NAME
|
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
using namespace MNN;
|
|
|
|
|
|
|
|
int main(int argc, const char* argv[]) {
|
|
|
|
std::string cmd = argv[0];
|
|
|
|
std::string pwd = "./";
|
|
|
|
auto rslash = cmd.rfind("/");
|
|
|
|
if (rslash != std::string::npos) {
|
|
|
|
pwd = cmd.substr(0, rslash + 1);
|
|
|
|
}
|
2021-06-11 17:17:13 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
// read args
|
|
|
|
const char* fileName = argv[1];
|
|
|
|
int runTime = 100;
|
|
|
|
if (argc > 2) {
|
|
|
|
runTime = ::atoi(argv[2]);
|
|
|
|
}
|
|
|
|
auto type = MNN_FORWARD_CPU;
|
|
|
|
if (argc > 3) {
|
|
|
|
type = (MNNForwardType)atoi(argv[3]);
|
|
|
|
printf("Use extra forward type: %d\n", type);
|
|
|
|
}
|
2021-06-11 17:17:13 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
// input dims
|
|
|
|
std::vector<int> inputDims;
|
|
|
|
if (argc > 4) {
|
|
|
|
std::string inputShape(argv[4]);
|
|
|
|
const char* delim = "x";
|
|
|
|
std::ptrdiff_t p1 = 0, p2;
|
|
|
|
while (1) {
|
|
|
|
p2 = inputShape.find(delim, p1);
|
|
|
|
if (p2 != std::string::npos) {
|
|
|
|
inputDims.push_back(atoi(inputShape.substr(p1, p2 - p1).c_str()));
|
|
|
|
p1 = p2 + 1;
|
|
|
|
} else {
|
|
|
|
inputDims.push_back(atoi(inputShape.substr(p1).c_str()));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (auto dim : inputDims) {
|
|
|
|
MNN_PRINT("%d ", dim);
|
|
|
|
}
|
|
|
|
MNN_PRINT("\n");
|
2021-04-08 15:34:23 +08:00
|
|
|
int threadNumber = 4;
|
|
|
|
if (argc > 5) {
|
2021-04-28 18:02:10 +08:00
|
|
|
threadNumber = ::atoi(argv[5]);
|
2021-04-08 15:34:23 +08:00
|
|
|
MNN_PRINT("Set ThreadNumber = %d\n", threadNumber);
|
|
|
|
}
|
|
|
|
|
2021-06-11 17:17:13 +08:00
|
|
|
float sparsity = 0.0f;
|
|
|
|
if(argc >= 8) {
|
|
|
|
sparsity = atof(argv[7]);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
// revert MNN model if necessary
|
|
|
|
auto revertor = std::unique_ptr<Revert>(new Revert(fileName));
|
2021-06-11 17:17:13 +08:00
|
|
|
revertor->initialize(sparsity);
|
2019-04-17 10:49:11 +08:00
|
|
|
auto modelBuffer = revertor->getBuffer();
|
|
|
|
auto bufferSize = revertor->getBufferSize();
|
2021-06-11 17:17:13 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
// create net
|
|
|
|
MNN_PRINT("Open Model %s\n", fileName);
|
|
|
|
auto net = std::shared_ptr<Interpreter>(Interpreter::createFromBuffer(modelBuffer, bufferSize));
|
|
|
|
if (nullptr == net) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
revertor.reset();
|
2020-11-05 16:41:56 +08:00
|
|
|
net->setSessionMode(Interpreter::Session_Debug);
|
2021-06-11 17:17:13 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
// create session
|
|
|
|
MNN::ScheduleConfig config;
|
|
|
|
config.type = type;
|
2021-04-08 15:34:23 +08:00
|
|
|
config.numThread = threadNumber;
|
2019-04-17 10:49:11 +08:00
|
|
|
MNN::Session* session = NULL;
|
|
|
|
session = net->createSession(config);
|
|
|
|
auto inputTensor = net->getSessionInput(session, NULL);
|
|
|
|
if (!inputDims.empty()) {
|
|
|
|
net->resizeTensor(inputTensor, inputDims);
|
|
|
|
net->resizeSession(session);
|
|
|
|
}
|
2021-06-11 17:17:13 +08:00
|
|
|
auto allInput = net->getSessionInputAll(session);
|
|
|
|
for (auto& iter : allInput) {
|
|
|
|
auto inputTensor = iter.second;
|
|
|
|
auto size = inputTensor->size();
|
|
|
|
if (size <= 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
MNN::Tensor tempTensor(inputTensor, inputTensor->getDimensionType());
|
|
|
|
::memset(tempTensor.host<void>(), 0, tempTensor.size());
|
|
|
|
inputTensor->copyFromHostTensor(&tempTensor);
|
|
|
|
}
|
2019-06-10 21:08:55 +08:00
|
|
|
net->releaseModel();
|
2019-05-24 11:26:54 +08:00
|
|
|
std::shared_ptr<MNN::Tensor> inputTensorUser(MNN::Tensor::createHostTensorFromDevice(inputTensor, false));
|
2019-04-17 10:49:11 +08:00
|
|
|
auto outputTensor = net->getSessionOutput(session, NULL);
|
|
|
|
if (outputTensor->size() <= 0) {
|
|
|
|
MNN_ERROR("Output not available\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2019-05-24 11:26:54 +08:00
|
|
|
std::shared_ptr<MNN::Tensor> outputTensorUser(MNN::Tensor::createHostTensorFromDevice(outputTensor, false));
|
2021-06-11 17:17:13 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
auto profiler = MNN::Profiler::getInstance();
|
|
|
|
auto beginCallBack = [&](const std::vector<Tensor*>& inputs, const OperatorInfo* info) {
|
|
|
|
profiler->start(info);
|
|
|
|
return true;
|
|
|
|
};
|
2022-01-04 10:50:40 +08:00
|
|
|
auto afterCallBack = [&](const std::vector<Tensor*>& tensors, const OperatorInfo* info) {
|
|
|
|
for (auto o : tensors) {
|
|
|
|
o->wait(MNN::Tensor::MAP_TENSOR_READ, true);
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
profiler->end(info);
|
|
|
|
return true;
|
|
|
|
};
|
2021-06-11 17:17:13 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
AUTOTIME;
|
|
|
|
// just run
|
|
|
|
for (int i = 0; i < runTime; ++i) {
|
2019-05-24 11:26:54 +08:00
|
|
|
inputTensor->copyFromHostTensor(inputTensorUser.get());
|
2019-04-17 10:49:11 +08:00
|
|
|
net->runSessionWithCallBackInfo(session, beginCallBack, afterCallBack);
|
2019-05-24 11:26:54 +08:00
|
|
|
outputTensor->copyToHostTensor(outputTensorUser.get());
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2021-06-11 17:17:13 +08:00
|
|
|
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
#ifdef MNN_PRINT_TIME_BY_NAME
|
|
|
|
profiler->printTimeByName(runTime);
|
|
|
|
#endif
|
2021-09-18 15:52:30 +08:00
|
|
|
profiler->printSlowOp("Convolution", 20, 0.03f);
|
2019-04-17 10:49:11 +08:00
|
|
|
profiler->printTimeByType(runTime);
|
|
|
|
return 0;
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
}
|