mirror of https://github.com/alibaba/MNN.git
423 lines
15 KiB
C++
423 lines
15 KiB
C++
//
|
|
// ModuleBasic.cpp
|
|
// MNN
|
|
//
|
|
// Created by MNN on 2021/10/15.
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
//
|
|
|
|
#include "MNN_generated.h"
|
|
#include <MNN/expr/Expr.hpp>
|
|
#include <MNN/expr/Module.hpp>
|
|
#include <MNN/expr/ExprCreator.hpp>
|
|
#include <MNN/AutoTime.hpp>
|
|
#include "rapidjson/document.h"
|
|
#include <fstream>
|
|
#include <sstream>
|
|
#include <cmath>
|
|
using namespace MNN::Express;
|
|
using namespace MNN;
|
|
|
|
#define DUMP_NUM_DATA(type) \
|
|
auto data = tensor->host<type>(); \
|
|
for (int z = 0; z < outside; ++z) { \
|
|
for (int x = 0; x < width; ++x) { \
|
|
outputOs << data[x + z * width] << "\t"; \
|
|
} \
|
|
outputOs << "\n"; \
|
|
}
|
|
|
|
#define DUMP_CHAR_DATA(type) \
|
|
auto data = tensor->host<type>(); \
|
|
for (int z = 0; z < outside; ++z) { \
|
|
for (int x = 0; x < width; ++x) { \
|
|
outputOs << static_cast<int>(data[x + z * width]) << "\t"; \
|
|
} \
|
|
outputOs << "\n"; \
|
|
}
|
|
|
|
static void dumpTensor2File(const Tensor* tensor, const char* file, std::ofstream& orderFile) {
|
|
orderFile << file << std::endl;
|
|
std::ofstream outputOs(file);
|
|
auto type = tensor->getType();
|
|
|
|
int dimension = tensor->buffer().dimensions;
|
|
int width = 1;
|
|
if (dimension > 1) {
|
|
width = tensor->length(dimension - 1);
|
|
}
|
|
|
|
const int outside = tensor->elementSize() / width;
|
|
|
|
const auto dataType = type.code;
|
|
const auto dataBytes = type.bytes();
|
|
|
|
if (dataType == halide_type_float) {
|
|
DUMP_NUM_DATA(float);
|
|
}
|
|
if (dataType == halide_type_int && dataBytes == 4) {
|
|
DUMP_NUM_DATA(int32_t);
|
|
}
|
|
if (dataType == halide_type_uint && dataBytes == 1) {
|
|
DUMP_CHAR_DATA(uint8_t);
|
|
}
|
|
if (dataType == halide_type_int && dataBytes == 1) {
|
|
#ifdef MNN_USE_SSE
|
|
auto data = tensor->host<uint8_t>();
|
|
for (int z = 0; z < outside; ++z) {
|
|
for (int x = 0; x < width; ++x) {
|
|
outputOs << (static_cast<int>(data[x + z * width]) - 128) << "\t";
|
|
}
|
|
outputOs << "\n";
|
|
}
|
|
#else
|
|
DUMP_CHAR_DATA(int8_t);
|
|
#endif
|
|
}
|
|
}
|
|
std::ofstream gOrderFile;
|
|
static void _initDebug() {
|
|
gOrderFile.open("order.txt");
|
|
MNN::TensorCallBackWithInfo beforeCallBack = [&](const std::vector<MNN::Tensor*>& ntensors, const OperatorInfo* info) {
|
|
auto opName = info->name();
|
|
auto opCopyName = opName;
|
|
for (int j = 0; j < opCopyName.size(); ++j) {
|
|
if (opCopyName[j] == '/') {
|
|
opCopyName[j] = '_';
|
|
}
|
|
}
|
|
for (int i = 0; i < ntensors.size(); ++i) {
|
|
auto ntensor = ntensors[i];
|
|
auto outDimType = ntensor->getDimensionType();
|
|
auto expectTensor = new MNN::Tensor(ntensor, outDimType);
|
|
ntensor->copyToHostTensor(expectTensor);
|
|
std::ostringstream outputFileName;
|
|
outputFileName << "output/Input_" << opCopyName << "_" << i;
|
|
dumpTensor2File(expectTensor, outputFileName.str().c_str(), gOrderFile);
|
|
delete expectTensor;
|
|
}
|
|
return true;
|
|
};
|
|
MNN::TensorCallBackWithInfo callBack = [&](const std::vector<MNN::Tensor*>& ntensors, const OperatorInfo* info) {
|
|
auto opName = info->name();
|
|
for (int i = 0; i < ntensors.size(); ++i) {
|
|
auto ntensor = ntensors[i];
|
|
auto outDimType = ntensor->getDimensionType();
|
|
auto expectTensor = new MNN::Tensor(ntensor, outDimType);
|
|
ntensor->copyToHostTensor(expectTensor);
|
|
|
|
auto tensor = expectTensor;
|
|
|
|
std::ostringstream outputFileName;
|
|
auto opCopyName = opName;
|
|
for (int j = 0; j < opCopyName.size(); ++j) {
|
|
if (opCopyName[j] == '/') {
|
|
opCopyName[j] = '_';
|
|
}
|
|
}
|
|
if (tensor->dimensions() == 4) {
|
|
MNN_PRINT("Dimensions: 4, W,H,C,B: %d X %d X %d X %d, OP name %s : %d\n",
|
|
tensor->width(), tensor->height(), tensor->channel(), tensor->batch(), opName.c_str(), i);
|
|
} else {
|
|
std::ostringstream oss;
|
|
for (int i = 0; i < tensor->dimensions(); i++) {
|
|
oss << (i ? " X " : "") << tensor->length(i);
|
|
}
|
|
|
|
MNN_PRINT("Dimensions: %d, %s, OP name %s : %d\n", tensor->dimensions(), oss.str().c_str(), opName.c_str(), i);
|
|
}
|
|
|
|
outputFileName << "output/" << opCopyName << "_" << i;
|
|
dumpTensor2File(expectTensor, outputFileName.str().c_str(), gOrderFile);
|
|
delete expectTensor;
|
|
}
|
|
return true;
|
|
};
|
|
Express::Executor::getGlobalExecutor()->setCallBack(std::move(beforeCallBack), std::move(callBack));
|
|
}
|
|
|
|
static bool compareOutput(VARP output, const std::string& directName, const std::string& name, Dimensionformat dataFormat, int order) {
|
|
auto info = output->getInfo();
|
|
auto ptr = output->readMap<float>();
|
|
if (nullptr == info || nullptr == ptr) {
|
|
MNN_ERROR("TESTERROR ptr / info nullptr\n");
|
|
return false;
|
|
}
|
|
std::ifstream outputOrigin;
|
|
// First find key
|
|
{
|
|
std::ostringstream outputFileOs;
|
|
outputFileOs << directName << "/" << name <<".txt";
|
|
outputOrigin.open(outputFileOs.str().c_str());
|
|
}
|
|
// Second find order
|
|
if (outputOrigin.fail()) {
|
|
std::ostringstream outputFileOs;
|
|
outputFileOs << directName << "/" << order <<".txt";
|
|
outputOrigin.open(outputFileOs.str().c_str());
|
|
}
|
|
if (outputOrigin.fail()) {
|
|
MNN_PRINT("Skip check %s\n", name.c_str());
|
|
return true;
|
|
}
|
|
MNN_PRINT("%s: (", name.c_str());
|
|
for (int i=0; i<info->dim.size(); ++i) {
|
|
MNN_PRINT("%d, ", info->dim[i]);
|
|
}
|
|
MNN_PRINT(")\n");
|
|
auto targetValue = _Input({info->dim}, info->order, info->type);
|
|
auto targetPtr = targetValue->writeMap<float>();
|
|
auto outputPtr = output->readMap<float>();
|
|
#define MNN_IS_INF(x) (fabs(x) == INFINITY)
|
|
#define MNN_IS_NAN(x) ((x) != (x))
|
|
|
|
for (int i=0; i<info->size; ++i) {
|
|
outputOrigin >> targetPtr[i];
|
|
if (MNN_IS_INF(outputPtr[i]) || MNN_IS_NAN(outputPtr[i])) {
|
|
MNN_ERROR("TESTERROR %s value error:%f\n", name.c_str(), outputPtr[i]);
|
|
return false;
|
|
}
|
|
}
|
|
auto absMax = _ReduceMax(_Abs(targetValue), {});
|
|
absMax = _Maximum(absMax, _Scalar<float>(0.0001f));
|
|
auto diff = _Abs(targetValue - output);
|
|
auto diffAbsMax = _ReduceMax(diff);
|
|
auto absMaxV = absMax->readMap<float>()[0];
|
|
auto diffAbsMaxV = diffAbsMax->readMap<float>()[0];
|
|
if (absMaxV * 0.01f < diffAbsMaxV || MNN_IS_NAN(absMaxV)) {
|
|
MNN_ERROR("TESTERROR %s value error : absMaxV:%f - DiffMax %f\n", name.c_str(), absMaxV, diffAbsMaxV);
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
int main(int argc, char *argv[]) {
|
|
if (argc < 3) {
|
|
MNN_ERROR("Usage: ./ModuleBasic.out ${test.mnn} ${Dir} [dump] [forwardType] [runLoops] [numberThread] [precision] [cacheFile]\n");
|
|
return 0;
|
|
}
|
|
std::string modelName = argv[1];
|
|
std::string directName = argv[2];
|
|
MNN_PRINT("Test %s from input info: %s\n", modelName.c_str(), directName.c_str());
|
|
rapidjson::Document document;
|
|
std::map<std::string, float> inputInfo;
|
|
std::map<std::string, std::vector<int>> inputShape;
|
|
std::vector<std::string> inputNames;
|
|
std::vector<std::string> outputNames;
|
|
bool checkOutput = false;
|
|
int dump = 0;
|
|
if (argc > 3) {
|
|
dump = atoi(argv[3]);
|
|
if (dump > 0) {
|
|
_initDebug();
|
|
}
|
|
}
|
|
bool shapeMutable = true;
|
|
{
|
|
std::ostringstream jsonNameOs;
|
|
jsonNameOs << directName << "/input.json";
|
|
std::ifstream fileNames(jsonNameOs.str().c_str());
|
|
std::ostringstream output;
|
|
output << fileNames.rdbuf();
|
|
auto outputStr = output.str();
|
|
document.Parse(outputStr.c_str());
|
|
if (document.HasParseError()) {
|
|
MNN_ERROR("Invalid json\n");
|
|
return 0;
|
|
}
|
|
if (document.HasMember("inputs")) {
|
|
auto inputsInfo = document["inputs"].GetArray();
|
|
for (auto iter = inputsInfo.begin(); iter !=inputsInfo.end(); iter++) {
|
|
auto obj = iter->GetObject();
|
|
std::string name = obj["name"].GetString();
|
|
inputNames.emplace_back(name);
|
|
MNN_PRINT("%s\n", name.c_str());
|
|
if (obj.HasMember("value")) {
|
|
float value = obj["value"].GetFloat();
|
|
inputInfo.insert(std::make_pair(name, value));
|
|
}
|
|
if (obj.HasMember("shape")) {
|
|
auto dims = obj["shape"].GetArray();
|
|
std::vector<int> shapes;
|
|
for (auto iter = dims.begin(); iter != dims.end(); iter++) {
|
|
shapes.emplace_back(iter->GetInt());
|
|
}
|
|
inputShape.insert(std::make_pair(name, shapes));
|
|
}
|
|
}
|
|
}
|
|
if (document.HasMember("outputs")) {
|
|
checkOutput = true;
|
|
auto array = document["outputs"].GetArray();
|
|
for (auto iter = array.begin(); iter !=array.end(); iter++) {
|
|
std::string name = iter->GetString();
|
|
MNN_PRINT("output: %s\n", name.c_str());
|
|
outputNames.emplace_back(name);
|
|
}
|
|
}
|
|
if (document.HasMember("shapeMutable")) {
|
|
shapeMutable = document["shapeMutable"].GetBool();
|
|
}
|
|
}
|
|
auto type = MNN_FORWARD_CPU;
|
|
if (argc > 4) {
|
|
type = (MNNForwardType)atoi(argv[4]);
|
|
MNN_PRINT("Use extra forward type: %d\n", type);
|
|
}
|
|
|
|
// Default single thread
|
|
int modeNum = 1;
|
|
if (argc > 6) {
|
|
modeNum = ::atoi(argv[6]);
|
|
}
|
|
|
|
int precision = BackendConfig::Precision_Normal;
|
|
if (argc > 7) {
|
|
precision = atoi(argv[7]);
|
|
}
|
|
const char* cacheFileName = nullptr;
|
|
if (argc > 8) {
|
|
cacheFileName = argv[8];
|
|
}
|
|
// create session
|
|
MNN::ScheduleConfig config;
|
|
config.type = type;
|
|
/*modeNum means gpuMode for GPU usage, Or means numThread for CPU usage.*/
|
|
config.numThread = modeNum;
|
|
// If type not fount, let it failed
|
|
config.backupType = type;
|
|
BackendConfig backendConfig;
|
|
// config.path.outputs.push_back("ResizeBilinear_2");
|
|
// backendConfig.power = BackendConfig::Power_High;
|
|
backendConfig.precision = static_cast<MNN::BackendConfig::PrecisionMode>(precision);
|
|
// backendConfig.memory = BackendConfig::Memory_High;
|
|
config.backendConfig = &backendConfig;
|
|
|
|
MNN::Express::Module::Config mConfig;
|
|
mConfig.shapeMutable = shapeMutable;
|
|
std::shared_ptr<Executor::RuntimeManager> rtmgr(Executor::RuntimeManager::createRuntimeManager(config));
|
|
if (nullptr != cacheFileName) {
|
|
rtmgr->setCache(cacheFileName);
|
|
}
|
|
std::shared_ptr<Module> net(Module::load(inputNames, outputNames, modelName.c_str(), rtmgr, &mConfig));
|
|
if (net == nullptr) {
|
|
MNN_PRINT("Error: can't load module\n");
|
|
return 0;
|
|
}
|
|
auto mInfo = net->getInfo();
|
|
|
|
#define LOAD_DATA(TYPE)\
|
|
if (inputInfo.find(inputName) != inputInfo.end()) {\
|
|
auto value = inputInfo[inputName];\
|
|
for (int i=0; i<info->size; ++i) {\
|
|
ptr[i] = value;\
|
|
}\
|
|
} else {\
|
|
std::ostringstream fileNameOs;\
|
|
fileNameOs << directName << "/" << inputName << ".txt";\
|
|
auto fileName = fileNameOs.str();\
|
|
std::ifstream inputOs(fileName.c_str());\
|
|
if (inputOs.fail()) {\
|
|
MNN_ERROR("TESTERROR Can't open %s\n", fileName.c_str());\
|
|
continue;\
|
|
}\
|
|
for (int i=0; i<info->size; ++i) {\
|
|
inputOs >> ptr[i];\
|
|
}\
|
|
}
|
|
std::vector<VARP> inputs(mInfo->inputs.size());
|
|
for (int i=0; i<inputs.size(); ++i) {
|
|
inputs[i] = _Input(mInfo->inputs[i].dim, mInfo->inputs[i].order, mInfo->inputs[i].type);
|
|
}
|
|
// Load inputs
|
|
for (int i=0; i<inputs.size(); ++i) {
|
|
auto inputName = inputNames[i];
|
|
// Resize
|
|
auto shapeIter = inputShape.find(inputName);
|
|
if (shapeIter != inputShape.end()) {
|
|
auto s = shapeIter->second;
|
|
inputs[i] = _Input(s, mInfo->defaultFormat, mInfo->inputs[i].type);
|
|
}
|
|
auto info = inputs[i]->getInfo();
|
|
if (info->type == halide_type_of<float>()){
|
|
auto ptr = inputs[i]->writeMap<float>();
|
|
LOAD_DATA(float)
|
|
} else {
|
|
auto floatVar = _Input(info->dim, info->order, halide_type_of<float>());
|
|
auto ptr = floatVar->writeMap<float>();
|
|
LOAD_DATA(float)
|
|
auto temp = _Cast(floatVar, info->type);
|
|
inputs[i]->input(temp);
|
|
}
|
|
inputs[i] = _Convert(inputs[i], mInfo->inputs[i].order);
|
|
}
|
|
#undef LOAD_DATA
|
|
bool modelError = false;
|
|
// Module Branch
|
|
auto outputs = net->onForward(inputs);
|
|
for (int i=0; i<outputNames.size(); ++i) {
|
|
auto name = outputNames[i];
|
|
auto v = outputs[i];
|
|
auto info = v->getInfo();
|
|
if (nullptr == info) {
|
|
continue;
|
|
}
|
|
if (info->order == NC4HW4 && info->dim.size() > 1) {
|
|
v = _Convert(v, mInfo->defaultFormat);
|
|
}
|
|
if (info->type.code != halide_type_float) {
|
|
v = _Cast<float>(v);
|
|
}
|
|
v.fix(VARP::CONSTANT);
|
|
outputs[i] = v;
|
|
}
|
|
|
|
if (checkOutput) {
|
|
for (int i=0; i<outputNames.size(); ++i) {
|
|
auto output = outputs[i];
|
|
bool success = compareOutput(output, directName, outputNames[i], mInfo->defaultFormat, i);
|
|
if (!success) {
|
|
modelError = true;
|
|
MNN_ERROR("Error for output %s\n", outputNames[i].c_str());
|
|
}
|
|
}
|
|
}
|
|
for (int i=0; i<outputNames.size(); ++i) {
|
|
auto name = outputNames[i];
|
|
auto v = outputs[i];
|
|
auto info = v->getInfo();
|
|
std::ostringstream fileNameOs;
|
|
fileNameOs << "output/" << i << ".txt";
|
|
auto fileName = fileNameOs.str();
|
|
MNN_PRINT("Write %s output to %s\n", name.c_str(), fileName.c_str());
|
|
std::ofstream _output(fileName.c_str());
|
|
auto ptr = v->readMap<float>();
|
|
for (int v=0; v<info->size; ++v) {
|
|
_output << ptr[v] << "\n";
|
|
}
|
|
}
|
|
// benchmark. for CPU, op time means calc duration; for others, op time means schedule duration.
|
|
int runTime = 0;
|
|
if (argc > 5) {
|
|
runTime = ::atoi(argv[5]);
|
|
}
|
|
|
|
if (runTime > 0 && dump == 0) {
|
|
int t = runTime;
|
|
std::vector<float> times(t, 0.0f);
|
|
for (int i = 0; i < t; ++i) {
|
|
Timer _l;
|
|
auto out = net->onForward(inputs);
|
|
times[i] = _l.durationInUs() / 1000.0f;
|
|
}
|
|
auto minTime = std::min_element(times.begin(), times.end());
|
|
auto maxTime = std::max_element(times.begin(), times.end());
|
|
float sum = 0.0f;
|
|
for (auto time : times) {
|
|
sum += time;
|
|
}
|
|
MNN_PRINT("Avg= %f ms, min= %f ms, max= %f ms\n", sum / (float)t, *minTime, *maxTime);
|
|
}
|
|
return 0;
|
|
}
|
|
|