2019-12-27 22:16:57 +08:00
|
|
|
//
|
|
|
|
// Executor.cpp
|
|
|
|
// MNN
|
|
|
|
//
|
|
|
|
// Created by MNN on 2019/07/26.
|
|
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
|
|
//
|
|
|
|
|
2020-07-04 01:21:30 +08:00
|
|
|
#include <unordered_set>
|
2019-12-27 22:16:57 +08:00
|
|
|
#include <MNN/expr/Executor.hpp>
|
|
|
|
#include "core/Session.hpp"
|
|
|
|
#include "core/TensorUtils.hpp"
|
|
|
|
#include "Utils.hpp"
|
2020-01-15 13:33:47 +08:00
|
|
|
#include <MNN/AutoTime.hpp>
|
2020-02-26 23:08:52 +08:00
|
|
|
#include "core/WrapExecution.hpp"
|
2020-02-26 09:57:17 +08:00
|
|
|
#ifdef MNN_EXPR_ENABLE_PROFILER
|
|
|
|
#define MNN_EXPRESS_ERROR_REPORT
|
|
|
|
#endif
|
2020-05-07 18:19:02 +08:00
|
|
|
#define MNN_EXPRESS_OPEN_MEMORY_REUSE
|
2019-12-27 22:16:57 +08:00
|
|
|
namespace MNN {
|
|
|
|
namespace Express {
|
2020-07-04 01:21:30 +08:00
|
|
|
|
|
|
|
static bool hasNoneOutput(const std::vector<Tensor*>& outputs) {
|
|
|
|
for (const Tensor* t : outputs) {
|
|
|
|
if (t->elementSize() == 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool AllocateTensor(Backend* backend, Tensor* tensor,
|
|
|
|
const Backend::StorageType& storageType) {
|
|
|
|
if (tensor->size() <= 0) {
|
|
|
|
tensor->buffer().host = nullptr;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
TensorUtils::getDescribe(tensor)->backend = backend;
|
|
|
|
return backend->onAcquireBuffer(tensor, storageType);
|
|
|
|
}
|
|
|
|
|
2020-01-15 13:33:47 +08:00
|
|
|
class Executor::Profiler {
|
|
|
|
public:
|
|
|
|
void reset();
|
|
|
|
void dump() const;
|
|
|
|
void add(int opType, float timeInMs);
|
|
|
|
private:
|
|
|
|
std::map<int, float> mTimes;
|
|
|
|
};
|
|
|
|
void Executor::Profiler::reset() {
|
|
|
|
mTimes.clear();
|
|
|
|
}
|
|
|
|
void Executor::Profiler::dump() const {
|
|
|
|
for (auto iter : mTimes) {
|
|
|
|
MNN_PRINT("%s: %f ms\n", EnumNameOpType((OpType)iter.first), iter.second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void Executor::Profiler::add(int opType, float timeInMs) {
|
|
|
|
auto iter = mTimes.find(opType);
|
|
|
|
if (iter == mTimes.end()) {
|
|
|
|
mTimes[opType] = timeInMs;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
iter->second += timeInMs;
|
|
|
|
}
|
|
|
|
|
2019-12-27 22:16:57 +08:00
|
|
|
void Executor::setGlobalExecutorConfig(MNNForwardType type, const BackendConfig& config, int numberThread) {
|
|
|
|
std::lock_guard<std::mutex> _l(mMutex);
|
|
|
|
auto creator = MNNGetExtraBackendCreator(type);
|
|
|
|
if (nullptr == creator) {
|
|
|
|
MNN_ERROR("Error to find creator of %d\n", type);
|
|
|
|
return;
|
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
_resetCache();
|
2019-12-27 22:16:57 +08:00
|
|
|
Backend::Info info;
|
|
|
|
info.type = type;
|
|
|
|
info.numThread = numberThread;
|
2020-06-20 10:26:42 +08:00
|
|
|
BackendConfig cfg = config;
|
|
|
|
info.user = &cfg;
|
2019-12-27 22:16:57 +08:00
|
|
|
std::shared_ptr<Backend> bn(creator->onCreate(info));
|
|
|
|
mBackend = bn;
|
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
void Executor::_resetCache() {
|
|
|
|
}
|
|
|
|
|
2019-12-27 22:16:57 +08:00
|
|
|
void Executor::gc(GCFlag flag) {
|
|
|
|
std::lock_guard<std::mutex> _l(mMutex);
|
2020-01-15 13:33:47 +08:00
|
|
|
_resetCache();
|
|
|
|
if (FULL == flag) {
|
|
|
|
mBackend->onClearBuffer();
|
|
|
|
mBackupBackend->onClearBuffer();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Executor::Executor(std::shared_ptr<Backend> backend) {
|
|
|
|
mBackend = backend;
|
|
|
|
if (mBackend->type() == MNN_FORWARD_CPU) {
|
|
|
|
mBackupBackend = mBackend;
|
|
|
|
} else {
|
|
|
|
Backend::Info info;
|
|
|
|
info.type = MNN_FORWARD_CPU;
|
|
|
|
info.numThread = 1;
|
|
|
|
auto creator = MNNGetExtraBackendCreator(MNN_FORWARD_CPU);
|
|
|
|
mBackupBackend.reset(creator->onCreate(info));
|
|
|
|
}
|
|
|
|
_resetCache();
|
|
|
|
#ifdef MNN_EXPR_ENABLE_PROFILER
|
|
|
|
mProfiler.reset(new Profiler);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
Executor::~Executor(){
|
|
|
|
mBackend = nullptr;
|
|
|
|
mBackupBackend = nullptr;
|
|
|
|
}
|
|
|
|
void Executor::_addToCache(const std::vector<std::shared_ptr<ComputeCache>>& caches) {
|
|
|
|
//FUNC_PRINT(mCaches.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
Executor::Requirement Executor::getRequirement(Expr* expr) const {
|
|
|
|
Executor::Requirement req;
|
|
|
|
auto op = expr->get();
|
|
|
|
auto inputSize = expr->inputs().size();
|
|
|
|
req.contentNeedContent.resize(inputSize);
|
|
|
|
req.shapeNeedContent.resize(inputSize);
|
|
|
|
req.supportError.resize(inputSize);
|
|
|
|
if (op->type() == OpType_Extra) {
|
|
|
|
for (int i = 0; i < inputSize; ++i) {
|
|
|
|
req.contentNeedContent[i] = true;
|
|
|
|
req.shapeNeedContent[i] = false;
|
|
|
|
req.supportError[i] = false;
|
|
|
|
}
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
for (int i = 0; i < inputSize; ++i) {
|
|
|
|
req.contentNeedContent[i] = SizeComputer::opNeedContent(op->type(), i);
|
2020-07-04 01:21:30 +08:00
|
|
|
req.shapeNeedContent[i] = false;
|
2020-01-15 13:33:47 +08:00
|
|
|
if (op->type() != OpType_Concat) {
|
|
|
|
req.supportError[i] = false;
|
|
|
|
} else {
|
|
|
|
req.supportError[i] = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
auto needIndexId = SizeComputer::needInputContent(op);
|
|
|
|
for (auto index : needIndexId) {
|
|
|
|
if (index < req.shapeNeedContent.size()) {
|
|
|
|
req.shapeNeedContent[index] = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return req;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
std::shared_ptr<Executor> Executor::getGlobalExecutor() {
|
|
|
|
static std::once_flag of;
|
|
|
|
static std::shared_ptr<Executor> gExecutor;
|
|
|
|
std::call_once(of, [&]() {
|
|
|
|
auto creator = MNNGetExtraBackendCreator(MNN_FORWARD_CPU);
|
|
|
|
SizeComputerSuite::init();
|
|
|
|
Backend::Info info;
|
|
|
|
info.type = MNN_FORWARD_CPU;
|
|
|
|
info.numThread = 1;
|
|
|
|
std::shared_ptr<Backend> bn(creator->onCreate(info));
|
|
|
|
gExecutor.reset(new Executor(bn));
|
|
|
|
});
|
|
|
|
return gExecutor;
|
|
|
|
}
|
|
|
|
|
2020-01-15 13:33:47 +08:00
|
|
|
ErrorCode Executor::computeInfo(Expr* expr) {
|
|
|
|
MNN_ASSERT(nullptr != expr);
|
|
|
|
MNN_ASSERT(nullptr != expr->get());
|
|
|
|
if (expr->get()->type() == OpType_Extra) {
|
|
|
|
return NOT_SUPPORT;
|
|
|
|
}
|
|
|
|
std::lock_guard<std::mutex> _l(mMutex);
|
2020-02-26 23:08:52 +08:00
|
|
|
mStackInputs.resize(expr->inputs().size());
|
|
|
|
mStackOutputs.resize(expr->outputSize());
|
|
|
|
if (mStack.size() < mStackInputs.size() + mStackOutputs.size()) {
|
2020-01-15 13:33:47 +08:00
|
|
|
int origin = (int)mStack.size();
|
2020-02-26 23:08:52 +08:00
|
|
|
int destSize = (int)(mStackInputs.size() + mStackOutputs.size());
|
2020-01-15 13:33:47 +08:00
|
|
|
for (int i=origin; i<destSize; ++i) {
|
|
|
|
mStack.emplace_back(std::shared_ptr<Tensor>(new Tensor));
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
for (int i=0; i<mStackInputs.size(); ++i) {
|
|
|
|
mStackInputs[i] = mStack[i].get();
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
for (int i=0; i<mStackOutputs.size(); ++i) {
|
|
|
|
mStackOutputs[i] = mStack[i+(int)mStackInputs.size()].get();
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
|
|
|
auto op = expr->get();
|
|
|
|
for (int i = 0; i < expr->inputs().size(); ++i) {
|
|
|
|
auto inputExpr = expr->inputs()[i]->expr();
|
2020-02-26 23:08:52 +08:00
|
|
|
Utils::copyInfoToTensor(mStackInputs[i], inputExpr.first->outputInfo(inputExpr.second));
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2020-07-04 01:21:30 +08:00
|
|
|
|
2020-02-26 23:08:52 +08:00
|
|
|
bool res = SizeComputer::computeOutputSize(op, mStackInputs, mStackOutputs);
|
2020-01-15 13:33:47 +08:00
|
|
|
if (!res) {
|
|
|
|
// Compute Error
|
|
|
|
#ifdef MNN_EXPRESS_ERROR_REPORT
|
2020-02-26 09:57:17 +08:00
|
|
|
if (expr->name().empty()) {
|
|
|
|
MNN_ERROR("Error to compute shape for %s\n", EnumNameOpType(op->type()));
|
|
|
|
} else {
|
|
|
|
MNN_ERROR("Error to compute shape for %s, %s\n", EnumNameOpType(op->type()), expr->name().c_str());
|
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
#endif
|
|
|
|
return COMPUTE_SIZE_ERROR;
|
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
for (int i = 0; i < mStackOutputs.size(); ++i) {
|
|
|
|
auto tensor = mStackOutputs[i];
|
2020-07-04 01:21:30 +08:00
|
|
|
#ifdef MNN_EXPRESS_ERROR_REPORT
|
|
|
|
bool hasNoneOutput = false;
|
|
|
|
// MNN_PRINT("Output(%d): [", i);
|
2020-01-15 13:33:47 +08:00
|
|
|
for (int j = 0; j < tensor->dimensions(); ++j) {
|
2020-07-04 01:21:30 +08:00
|
|
|
// MNN_PRINT("%d, ", tensor->length(j));
|
2020-01-15 13:33:47 +08:00
|
|
|
if (tensor->length(j) <= 0) {
|
2020-07-04 01:21:30 +08:00
|
|
|
hasNoneOutput = true;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
|
|
|
}
|
2020-07-04 01:21:30 +08:00
|
|
|
// MNN_PRINT("]\n");
|
|
|
|
if (hasNoneOutput) {
|
|
|
|
if (nullptr != op->name()) {
|
|
|
|
MNN_PRINT("The output has 0 elements for %s\n", op->name()->c_str());
|
|
|
|
} else {
|
|
|
|
MNN_PRINT("The output has 0 elements for %s\n", EnumNameOpType(op->type()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // MNN_EXPRESS_ERROR_REPORT
|
|
|
|
|
|
|
|
auto shape = expr->outputInfo(i);
|
2020-01-15 13:33:47 +08:00
|
|
|
Utils::copyTensorToInfo(shape, tensor);
|
|
|
|
}
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
2020-02-26 23:08:52 +08:00
|
|
|
void Executor::ComputeCache::syncInput(int offset, const Variable::Info* info) {
|
|
|
|
auto tensor = this->getTensor(offset, true);
|
|
|
|
Utils::copyInfoToTensor(tensor, info);
|
|
|
|
}
|
|
|
|
void Executor::ComputeCache::syncOutput(int offset, Variable::Info* info) {
|
|
|
|
auto tensor = this->getTensor(offset, true);
|
|
|
|
if (nullptr != tensor) {
|
|
|
|
info->ptr = tensor->host<void>();
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
|
|
|
|
2020-02-26 23:08:52 +08:00
|
|
|
void Executor::ComputeCache::setShapeDirty(int offset, Variable::Info* info) {
|
|
|
|
_setShapeDirty();
|
2020-02-27 10:00:19 +08:00
|
|
|
if (nullptr != info) {
|
|
|
|
syncInput(offset, info);
|
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Executor::ComputeCache::_setShapeDirty() {
|
2020-01-15 13:33:47 +08:00
|
|
|
mShapeDirty = true;
|
|
|
|
}
|
2020-02-26 09:57:17 +08:00
|
|
|
void Executor::ComputeCache::setContentReady() {
|
|
|
|
mContentDirty = false;
|
|
|
|
}
|
|
|
|
|
2020-01-15 13:33:47 +08:00
|
|
|
void Executor::ComputeCache::setContentDirty() {
|
|
|
|
mContentDirty = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Executor::ComputeCache::TensorContent::reset() {
|
|
|
|
auto des = TensorUtils::getDescribe(tensor.get());
|
2020-02-26 23:08:52 +08:00
|
|
|
if (nullptr != des->backend && des->useCount >= 0) {
|
2020-07-04 01:21:30 +08:00
|
|
|
Backend::StorageType storageType = Backend::DYNAMIC;
|
|
|
|
if (aliveOutside) {
|
|
|
|
storageType = Backend::STATIC;
|
|
|
|
}
|
|
|
|
des->backend->onReleaseBuffer(tensor.get(), storageType);
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2020-02-29 10:34:24 +08:00
|
|
|
des->backend = nullptr;
|
2020-02-26 23:08:52 +08:00
|
|
|
des->useCount = refCount;
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
|
|
|
|
class InputCache : public Executor::ComputeCache {
|
|
|
|
public:
|
|
|
|
InputCache() {}
|
2020-07-04 01:21:30 +08:00
|
|
|
~InputCache() {}
|
2020-02-26 23:08:52 +08:00
|
|
|
virtual ErrorCode compute() override {
|
|
|
|
if (mContentDirty) {
|
2020-07-04 01:21:30 +08:00
|
|
|
return INPUT_DATA_ERROR;
|
2020-02-26 23:08:52 +08:00
|
|
|
}
|
|
|
|
return NO_ERROR;
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
virtual ErrorCode resize() override {
|
|
|
|
return NO_ERROR;
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
virtual Tensor* getTensor(int offset, bool host) override {
|
|
|
|
return &mTensor;
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
Tensor mTensor;
|
|
|
|
};
|
|
|
|
class PipelineCache : public Executor::ComputeCache {
|
|
|
|
public:
|
|
|
|
PipelineCache();
|
2020-07-04 01:21:30 +08:00
|
|
|
virtual ~PipelineCache();
|
2020-02-26 23:08:52 +08:00
|
|
|
virtual Tensor* getTensor(int offset, bool host) override {
|
|
|
|
auto tensor = mOutputs[offset];
|
2020-02-28 17:26:43 +08:00
|
|
|
if (tensor->host<void>() != nullptr || !host) {
|
2020-02-26 23:08:52 +08:00
|
|
|
return tensor;
|
|
|
|
}
|
|
|
|
auto iter = mCopyOutputs.find(tensor);
|
|
|
|
if (iter == mCopyOutputs.end()) {
|
|
|
|
// First get tensor, create and copy
|
|
|
|
TensorContent content;
|
|
|
|
content.tensor.reset(new Tensor);
|
2020-07-04 01:21:30 +08:00
|
|
|
content.tensor->setType(Utils::convertDataType(tensor->getType()));
|
2020-02-26 23:08:52 +08:00
|
|
|
TensorUtils::copyShape(tensor, content.tensor.get(), true);
|
2020-07-04 01:21:30 +08:00
|
|
|
bool res = AllocateTensor(mBackupBackend.get(), content.tensor.get(), Backend::DYNAMIC);
|
2020-02-26 23:08:52 +08:00
|
|
|
if (!res) {
|
|
|
|
MNN_ERROR("Malloc error when copy out\n");
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
tensor->copyToHostTensor(content.tensor.get());
|
|
|
|
mCopyOutputs.insert(std::make_pair(tensor, content.tensor.get()));
|
|
|
|
mTensors.emplace_back(std::move(content));
|
|
|
|
iter = mCopyOutputs.find(tensor);
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
return iter->second;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
virtual ErrorCode compute() override;
|
|
|
|
virtual ErrorCode resize() override;
|
2020-07-04 01:21:30 +08:00
|
|
|
|
2020-02-26 23:08:52 +08:00
|
|
|
private:
|
2020-07-04 01:21:30 +08:00
|
|
|
void _updateOutputInfo(ComputeCache::Unit* unit);
|
|
|
|
|
2020-02-26 23:08:52 +08:00
|
|
|
std::set<std::shared_ptr<ComputeCache>> mInputs;
|
|
|
|
std::vector<Tensor*> mOutputs;
|
|
|
|
std::vector<TensorContent> mTensors;
|
|
|
|
std::vector<std::shared_ptr<Unit>> mUnits;
|
|
|
|
std::map<Tensor*, Tensor*> mCopyOutputs;
|
|
|
|
std::shared_ptr<Backend> mBackend;
|
|
|
|
std::shared_ptr<Backend> mBackupBackend;
|
|
|
|
friend class Executor;
|
|
|
|
};
|
2019-12-27 22:16:57 +08:00
|
|
|
|
2020-02-26 23:08:52 +08:00
|
|
|
struct Executor::ComputeCache::Unit {
|
|
|
|
std::vector<Tensor*> inputs;
|
|
|
|
std::vector<int> inputsNeedRelease;
|
|
|
|
std::vector<Tensor*> outputs;
|
2020-07-04 01:21:30 +08:00
|
|
|
std::vector<bool> aliveOutside;
|
2020-02-26 23:08:52 +08:00
|
|
|
const Op* op;
|
2020-02-29 10:34:24 +08:00
|
|
|
std::weak_ptr<Expr::Inside> inside;
|
2020-02-26 23:08:52 +08:00
|
|
|
std::shared_ptr<Execution> exe;
|
|
|
|
std::shared_ptr<char> extraBuffer;
|
2020-02-27 10:00:19 +08:00
|
|
|
std::vector<std::pair<Tensor*, const Variable::Info*>> inputOutsides;
|
2020-02-26 23:08:52 +08:00
|
|
|
};
|
|
|
|
PipelineCache::PipelineCache() {
|
|
|
|
// Do nothing
|
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
|
2020-02-26 23:08:52 +08:00
|
|
|
PipelineCache::~PipelineCache() {
|
|
|
|
mUnits.clear();
|
|
|
|
for (auto t : mTensors) {
|
|
|
|
t.reset();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ErrorCode PipelineCache::compute() {
|
2020-01-15 13:33:47 +08:00
|
|
|
if (mShapeDirty) {
|
|
|
|
auto code = resize();
|
|
|
|
if (NO_ERROR != code) {
|
|
|
|
return code;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
|
|
|
if (!mContentDirty) {
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
for (auto c : mInputs) {
|
|
|
|
auto code = c->compute();
|
|
|
|
if (NO_ERROR != code) {
|
|
|
|
return code;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
mBackend->onExecuteBegin();
|
2020-02-26 23:08:52 +08:00
|
|
|
//mBackupBackend->onExecuteBegin();
|
2020-01-15 13:33:47 +08:00
|
|
|
for (int i=0; i<mUnits.size(); ++i) {
|
2020-02-26 23:08:52 +08:00
|
|
|
auto& iter = *mUnits[i];
|
2020-03-03 06:55:38 +08:00
|
|
|
if (nullptr == iter.exe) {
|
2020-07-04 01:21:30 +08:00
|
|
|
// MNN_ERROR("Skip %s\n", iter.op->name()->str().c_str());
|
2020-03-03 06:55:38 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto inside = iter.inside.lock();
|
|
|
|
if (nullptr == inside || inside->mInfoDirty) {
|
2020-07-04 01:21:30 +08:00
|
|
|
// MNN_ERROR("Skip %s\n", iter.op->name()->str().c_str());
|
2020-01-15 13:33:47 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#ifdef MNN_EXPR_ENABLE_PROFILER
|
|
|
|
Timer autoTime;
|
|
|
|
#endif
|
2020-07-04 01:21:30 +08:00
|
|
|
// Skip resize and execute if there is nothing to compute.
|
|
|
|
if (!hasNoneOutput(iter.outputs)) {
|
|
|
|
auto code = iter.exe->onExecute(iter.inputs, iter.outputs);
|
|
|
|
if (NO_ERROR != code) {
|
2020-02-26 09:57:17 +08:00
|
|
|
#ifdef MNN_EXPRESS_ERROR_REPORT
|
2020-07-04 01:21:30 +08:00
|
|
|
MNN_ERROR("Error to execute for %s\n", EnumNameOpType(iter.op->type()));
|
2020-02-26 09:57:17 +08:00
|
|
|
#endif
|
2020-07-04 01:21:30 +08:00
|
|
|
mBackend->onExecuteEnd();
|
|
|
|
return code;
|
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2020-07-04 01:21:30 +08:00
|
|
|
_updateOutputInfo(&iter);
|
|
|
|
|
2020-03-03 06:55:38 +08:00
|
|
|
inside->mContentDirty = false;
|
2020-07-04 01:21:30 +08:00
|
|
|
|
|
|
|
#ifdef MNN_EXPR_ENABLE_PROFILER
|
|
|
|
float costTime = (float)autoTime.durationInUs() / (float)1000;
|
|
|
|
Executor::getGlobalExecutor()->addOpCostTime((int)mUnits[i]->op->type(), costTime);
|
|
|
|
#endif
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
|
|
|
mBackend->onExecuteEnd();
|
2020-02-26 23:08:52 +08:00
|
|
|
//mBackupBackend->onExecuteEnd();
|
|
|
|
for (auto iter : mCopyOutputs) {
|
|
|
|
iter.first->copyToHostTensor(iter.second);
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
|
|
|
mContentDirty = false;
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
2020-07-04 01:21:30 +08:00
|
|
|
void PipelineCache::_updateOutputInfo(ComputeCache::Unit* unit) {
|
|
|
|
for (int i = 0; i < unit->outputs.size(); ++i) {
|
|
|
|
Tensor* output = unit->outputs[i];
|
|
|
|
Variable::Info& info = unit->inside.lock()->mOutputInfos[i];
|
|
|
|
info.dim = output->shape();
|
|
|
|
info.syncSize();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-26 23:08:52 +08:00
|
|
|
ErrorCode PipelineCache::resize() {
|
2020-01-15 13:33:47 +08:00
|
|
|
if (!mShapeDirty) {
|
2019-12-27 22:16:57 +08:00
|
|
|
return NO_ERROR;
|
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
for (auto c : mInputs) {
|
|
|
|
auto code = c->resize();
|
|
|
|
if (NO_ERROR != code) {
|
|
|
|
return code;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
for (auto& t : mTensors) {
|
|
|
|
t.reset();
|
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
for (auto& tensor : mOutputs) {
|
|
|
|
TensorUtils::getDescribe(tensor)->useCount += 1;
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2020-02-28 17:26:43 +08:00
|
|
|
mShapeDirty = false;
|
|
|
|
for (int unitIndex = 0; unitIndex < mUnits.size(); ++unitIndex) {
|
|
|
|
auto& iter = *mUnits[unitIndex];
|
2020-02-29 10:34:24 +08:00
|
|
|
auto inside = iter.inside.lock();
|
|
|
|
if (nullptr == inside || inside->mInfoDirty) {
|
2020-02-28 17:26:43 +08:00
|
|
|
mShapeDirty = true;
|
|
|
|
continue;
|
|
|
|
}
|
2020-03-02 14:09:33 +08:00
|
|
|
for (auto& tensor : iter.inputOutsides) {
|
|
|
|
Utils::copyInfoToTensor(tensor.first, tensor.second);
|
2020-02-27 10:00:19 +08:00
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
for (int i=0; i<iter.outputs.size(); ++i) {
|
2020-02-29 10:34:24 +08:00
|
|
|
Utils::copyInfoToTensor(iter.outputs[i], inside->mOutputInfos.data() + i);
|
2020-02-26 23:08:52 +08:00
|
|
|
iter.outputs[i]->buffer().host = nullptr;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
if (nullptr == iter.exe) {
|
|
|
|
#ifdef MNN_EXPR_ENABLE_PROFILER
|
|
|
|
Timer autoTime;
|
|
|
|
#endif
|
2020-02-26 23:08:52 +08:00
|
|
|
iter.exe.reset(mBackend->onCreate(iter.inputs, iter.outputs, iter.op));
|
|
|
|
if (nullptr == iter.exe) {
|
|
|
|
iter.exe.reset(mBackupBackend->onCreate(iter.inputs, iter.outputs, iter.op));
|
|
|
|
}
|
|
|
|
// Check if need wrap
|
|
|
|
bool needWrap = false;
|
|
|
|
auto bn = iter.exe->backend();
|
|
|
|
auto iterType = bn->type();
|
2020-02-29 10:34:24 +08:00
|
|
|
for (int i=0; i<inside->mReq.contentNeedContent.size(); ++i) {
|
|
|
|
if (!inside->mReq.contentNeedContent[i]) {
|
2020-02-26 23:08:52 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto tensorBn = TensorUtils::getDescribe(iter.inputs[i])->backend;
|
|
|
|
auto type = MNN_FORWARD_CPU;
|
|
|
|
if (nullptr != tensorBn) {
|
|
|
|
type = tensorBn->type();
|
|
|
|
}
|
|
|
|
if (iterType != type) {
|
|
|
|
needWrap = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (needWrap) {
|
|
|
|
iter.exe.reset(new WrapExecution(mBackupBackend.get(), iter.exe));
|
|
|
|
}
|
|
|
|
|
2020-01-15 13:33:47 +08:00
|
|
|
#ifdef MNN_EXPR_ENABLE_PROFILER
|
|
|
|
float costTime = (float)autoTime.durationInUs() / (float)1000;
|
2020-02-26 23:08:52 +08:00
|
|
|
Executor::getGlobalExecutor()->addOpCostTime((int)iter.op->type(), costTime);
|
2020-01-15 13:33:47 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
if (nullptr == iter.exe) {
|
|
|
|
return NOT_SUPPORT;
|
|
|
|
}
|
|
|
|
#ifdef MNN_EXPR_ENABLE_PROFILER
|
|
|
|
Timer autoTime;
|
|
|
|
#endif
|
2020-07-04 01:21:30 +08:00
|
|
|
|
2020-02-26 23:08:52 +08:00
|
|
|
auto bn = iter.exe->backend();
|
|
|
|
for (int i=0; i<iter.outputs.size(); ++i) {
|
2020-07-04 01:21:30 +08:00
|
|
|
Backend::StorageType storageType = Backend::DYNAMIC;
|
|
|
|
if (iter.aliveOutside[i]) {
|
|
|
|
storageType = Backend::STATIC;
|
|
|
|
}
|
|
|
|
bool res = AllocateTensor(bn, iter.outputs[i], storageType);
|
2020-02-26 23:08:52 +08:00
|
|
|
if (!res) {
|
|
|
|
return OUT_OF_MEMORY;
|
|
|
|
}
|
|
|
|
}
|
2020-07-04 01:21:30 +08:00
|
|
|
|
|
|
|
// Skip resize and execute if there is nothing to compute.
|
|
|
|
if (!hasNoneOutput(iter.outputs)) {
|
|
|
|
auto code = iter.exe->onResize(iter.inputs, iter.outputs);
|
|
|
|
if (NO_ERROR != code) {
|
|
|
|
return code;
|
|
|
|
}
|
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
#ifdef MNN_EXPR_ENABLE_PROFILER
|
|
|
|
float costTime = (float)autoTime.durationInUs() / (float)1000;
|
2020-02-26 23:08:52 +08:00
|
|
|
Executor::getGlobalExecutor()->addOpCostTime((int)iter.op->type(), costTime);
|
2020-01-15 13:33:47 +08:00
|
|
|
#endif
|
2020-07-04 01:21:30 +08:00
|
|
|
|
2020-04-14 21:43:02 +08:00
|
|
|
#ifdef MNN_EXPRESS_OPEN_MEMORY_REUSE
|
2020-02-26 23:08:52 +08:00
|
|
|
for (int i=0; i<iter.inputsNeedRelease.size(); ++i) {
|
|
|
|
auto index = iter.inputsNeedRelease[i];
|
|
|
|
auto des = TensorUtils::getDescribe(iter.inputs[index]);
|
2020-01-15 13:33:47 +08:00
|
|
|
des->useCount--;
|
2020-03-26 19:23:23 +08:00
|
|
|
if (des->useCount <= 0 && des->backend != nullptr) {
|
2020-02-26 23:08:52 +08:00
|
|
|
des->backend->onReleaseBuffer(iter.inputs[index], Backend::DYNAMIC);
|
|
|
|
//Set useCount < 0, so tensorContent's reset will not release it
|
|
|
|
des->useCount = -1;
|
2020-07-04 01:21:30 +08:00
|
|
|
des->backend = nullptr;
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-04-14 21:43:02 +08:00
|
|
|
#endif
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
for (auto iter : mCopyOutputs) {
|
|
|
|
TensorUtils::copyShape(iter.first, iter.second, true);
|
2020-07-04 01:21:30 +08:00
|
|
|
bool res = AllocateTensor(mBackupBackend.get(), iter.second, Backend::DYNAMIC);
|
2020-02-26 23:08:52 +08:00
|
|
|
if (!res) {
|
|
|
|
return OUT_OF_MEMORY;
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
mContentDirty = true;
|
2020-01-15 13:33:47 +08:00
|
|
|
return NO_ERROR;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
|
|
|
|
2020-02-26 23:08:52 +08:00
|
|
|
static void _collectExecuteUnit(std::vector<std::shared_ptr<Executor::ComputeCache::Unit>>& dest, EXPRP expr) {
|
2020-01-15 13:33:47 +08:00
|
|
|
auto& inputs = expr->inputs();
|
|
|
|
auto& req = expr->inside()->mReq.contentNeedContent;
|
|
|
|
MNN_ASSERT(inputs.size() == req.size());
|
|
|
|
|
|
|
|
for (int i=0; i<inputs.size(); ++i) {
|
|
|
|
if (!req[i]) {
|
|
|
|
continue;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
auto inputExpr = inputs[i]->expr();
|
2020-02-26 23:08:52 +08:00
|
|
|
auto unit = inputExpr.first->inside()->mUnit;
|
|
|
|
if (nullptr == unit) {
|
2020-01-15 13:33:47 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto inputCache = inputExpr.first->inside()->mCache;
|
|
|
|
if (nullptr != inputCache) {
|
|
|
|
continue;
|
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
_collectExecuteUnit(dest, inputExpr.first);
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
auto unit = expr->inside()->mUnit;
|
|
|
|
if (nullptr == unit) {
|
2020-01-15 13:33:47 +08:00
|
|
|
return;
|
|
|
|
}
|
2020-03-03 06:55:38 +08:00
|
|
|
expr->inside()->mLinkCache = true;
|
2020-02-26 23:08:52 +08:00
|
|
|
dest.emplace_back(std::move(unit));
|
|
|
|
expr->inside()->mUnit = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Executor::_createSingle(EXPRP expr) {
|
|
|
|
MNN_ASSERT(expr->get() == nullptr);
|
|
|
|
auto cache = expr->inside()->mCache;
|
|
|
|
cache.reset(new InputCache);
|
|
|
|
expr->inside()->mCache = cache;
|
|
|
|
expr->inside()->mCacheOffset = 0;
|
|
|
|
cache->syncInput(0, expr->outputInfo(0));
|
|
|
|
if (VARP::INPUT == expr->inputType()) {
|
|
|
|
cache->setContentDirty();
|
|
|
|
} else {
|
|
|
|
cache->setContentReady();
|
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
|
|
|
|
2020-02-27 10:00:19 +08:00
|
|
|
void Executor::_create(const std::vector<EXPRP>& outputs, std::set<std::shared_ptr<Executor::ComputeCache>>&& inputCaches, std::vector<ComputeCache::TensorContent>&& tensors, bool forceCPU) {
|
2020-01-15 13:33:47 +08:00
|
|
|
std::vector<EXPRP> packed;
|
|
|
|
for (auto expr : outputs) {
|
|
|
|
// Make Cache For Single Tensor
|
|
|
|
auto cache = expr->inside()->mCache;
|
|
|
|
if (nullptr != cache) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (nullptr != expr->get()) {
|
|
|
|
packed.emplace_back(expr);
|
|
|
|
continue;
|
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
_createSingle(expr);
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
if (packed.empty()) {
|
|
|
|
return;
|
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
std::shared_ptr<PipelineCache> packedCache(new PipelineCache);
|
2020-02-27 10:00:19 +08:00
|
|
|
if (forceCPU) {
|
|
|
|
packedCache->mBackend = mBackupBackend;
|
|
|
|
} else {
|
|
|
|
packedCache->mBackend = mBackend;
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2020-07-04 01:21:30 +08:00
|
|
|
|
|
|
|
std::unordered_set<Tensor*> aliveOutputs;
|
2020-02-27 10:00:19 +08:00
|
|
|
packedCache->mInputs = std::move(inputCaches);
|
2020-01-15 13:33:47 +08:00
|
|
|
for (auto expr : packed) {
|
2020-02-26 23:08:52 +08:00
|
|
|
expr->inside()->mCacheOffset = (int)packedCache->mOutputs.size();
|
|
|
|
MNN_ASSERT(expr->inside()->mUnit != nullptr);
|
|
|
|
auto& originOutputs = expr->inside()->mUnit->outputs;
|
|
|
|
for (auto t : originOutputs) {
|
|
|
|
packedCache->mOutputs.emplace_back(t);
|
2020-07-04 01:21:30 +08:00
|
|
|
aliveOutputs.insert(t);
|
|
|
|
}
|
|
|
|
auto& aliveOutside = expr->inside()->mUnit->aliveOutside;
|
|
|
|
for (int i = 0; i < aliveOutside.size(); ++i) {
|
|
|
|
aliveOutside[i] = true;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
expr->inside()->mCache = std::static_pointer_cast<ComputeCache>(packedCache);
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-07-04 01:21:30 +08:00
|
|
|
for (auto& content : tensors) {
|
|
|
|
if (aliveOutputs.count(content.tensor.get())) {
|
|
|
|
content.aliveOutside = true;
|
|
|
|
}
|
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
packedCache->mTensors = std::move(tensors);
|
2020-02-26 23:08:52 +08:00
|
|
|
packedCache->mBackupBackend = mBackupBackend;
|
2020-01-15 13:33:47 +08:00
|
|
|
|
|
|
|
// Backup Tensor Refcount
|
|
|
|
for (auto& t : packedCache->mTensors) {
|
|
|
|
t.refCount = TensorUtils::getDescribe(t.tensor.get())->useCount;
|
|
|
|
}
|
|
|
|
// Create Units
|
|
|
|
for (auto expr : packed) {
|
2020-02-26 23:08:52 +08:00
|
|
|
_collectExecuteUnit(packedCache->mUnits, expr);
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
|
|
|
|
2020-02-26 23:08:52 +08:00
|
|
|
void Executor::_visit(EXPRP expr, std::set<std::shared_ptr<Executor::ComputeCache>>& inputCaches, std::vector<ComputeCache::TensorContent>& tensors) {
|
2020-01-15 13:33:47 +08:00
|
|
|
auto& inputs = expr->inputs();
|
|
|
|
auto& req = expr->inside()->mReq.contentNeedContent;
|
|
|
|
MNN_ASSERT(inputs.size() == req.size());
|
|
|
|
|
|
|
|
// Create Input's Unit / Cache
|
|
|
|
for (int i=0; i<inputs.size(); ++i) {
|
|
|
|
if (!req[i]) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto inputExpr = inputs[i]->expr();
|
2020-02-26 23:08:52 +08:00
|
|
|
if (nullptr != inputExpr.first->inside()->mUnit) {
|
2020-01-15 13:33:47 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto inputCache = inputExpr.first->inside()->mCache;
|
|
|
|
if (nullptr != inputCache) {
|
|
|
|
inputCaches.insert(inputCache);
|
|
|
|
continue;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
_visit(inputExpr.first, inputCaches, tensors);
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-02-27 10:00:19 +08:00
|
|
|
|
2020-01-15 13:33:47 +08:00
|
|
|
// Create Self Unit / Cache
|
|
|
|
auto op = expr->get();
|
|
|
|
if (nullptr == op) {
|
|
|
|
// Make Cache For Single Tensor
|
2020-02-26 23:08:52 +08:00
|
|
|
_createSingle(expr);
|
2020-02-26 09:57:17 +08:00
|
|
|
inputCaches.insert(expr->inside()->mCache);
|
2020-01-15 13:33:47 +08:00
|
|
|
return;
|
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
std::shared_ptr<ComputeCache::Unit> unitP(new ComputeCache::Unit);
|
|
|
|
ComputeCache::Unit& unit = *unitP;
|
|
|
|
unit.op = expr->get();
|
|
|
|
unit.extraBuffer = expr->extra().first;
|
2020-02-29 10:34:24 +08:00
|
|
|
unit.inside = std::weak_ptr<Expr::Inside>(expr->inside());
|
2020-01-15 13:33:47 +08:00
|
|
|
unit.inputs.resize(inputs.size());
|
|
|
|
for (int i=0; i<inputs.size(); ++i) {
|
|
|
|
auto inputExpr = inputs[i]->expr();
|
|
|
|
if (!req[i]) {
|
2020-02-26 23:08:52 +08:00
|
|
|
// The compute don't need it, but need shape info for exe's onResize
|
2020-01-15 13:33:47 +08:00
|
|
|
ComputeCache::TensorContent content;
|
|
|
|
content.tensor.reset(new Tensor);
|
2020-02-27 10:00:19 +08:00
|
|
|
unit.inputOutsides.emplace_back(std::make_pair(content.tensor.get(), inputExpr.first->outputInfo(inputExpr.second)));
|
2020-01-15 13:33:47 +08:00
|
|
|
unit.inputs[i] = content.tensor.get();
|
|
|
|
tensors.emplace_back(std::move(content));
|
|
|
|
continue;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
auto inputUnit = inputExpr.first->inside()->mUnit;
|
|
|
|
if (nullptr != inputUnit) {
|
|
|
|
unit.inputs[i] = inputUnit->outputs[inputExpr.second];
|
2020-01-15 13:33:47 +08:00
|
|
|
TensorUtils::getDescribe(unit.inputs[i])->useCount++;
|
2020-02-26 23:08:52 +08:00
|
|
|
unit.inputsNeedRelease.emplace_back(i);
|
2020-01-15 13:33:47 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto inputCache = inputExpr.first->inside()->mCache;
|
|
|
|
if (nullptr != inputCache) {
|
2020-02-26 23:08:52 +08:00
|
|
|
unit.inputs[i] = inputCache->getTensor(inputExpr.first->inside()->mCacheOffset + inputExpr.second, false);
|
2020-01-15 13:33:47 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
MNN_ASSERT(false);
|
|
|
|
}
|
|
|
|
unit.outputs.resize(expr->outputSize());
|
2020-07-04 01:21:30 +08:00
|
|
|
unit.aliveOutside.resize(expr->outputSize());
|
2020-01-15 13:33:47 +08:00
|
|
|
for (int i=0; i<unit.outputs.size(); ++i) {
|
|
|
|
ComputeCache::TensorContent content;
|
|
|
|
content.tensor.reset(new Tensor);
|
|
|
|
unit.outputs[i] = content.tensor.get();
|
2020-07-04 01:21:30 +08:00
|
|
|
unit.aliveOutside[i] = false;
|
2020-01-15 13:33:47 +08:00
|
|
|
tensors.emplace_back(std::move(content));
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-02-26 23:08:52 +08:00
|
|
|
expr->inside()->mUnit = unitP;
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
|
2020-02-27 10:00:19 +08:00
|
|
|
void Executor::makeCache(const std::vector<EXPRP>& expr, bool forceCPU) {
|
2019-12-27 22:16:57 +08:00
|
|
|
std::lock_guard<std::mutex> _l(mMutex);
|
2020-01-15 13:33:47 +08:00
|
|
|
//FUNC_PRINT(mCaches.size());
|
|
|
|
std::set<std::shared_ptr<Executor::ComputeCache>> inputCaches;
|
|
|
|
std::vector<ComputeCache::TensorContent> tensors;
|
|
|
|
for (auto e : expr) {
|
2020-02-26 23:08:52 +08:00
|
|
|
_visit(e, inputCaches, tensors);
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
2020-02-27 10:00:19 +08:00
|
|
|
_create(expr, std::move(inputCaches), std::move(tensors), forceCPU);
|
2020-01-15 13:33:47 +08:00
|
|
|
}
|
|
|
|
void Executor::addOpCostTime(int op, float costTime) {
|
|
|
|
#ifdef MNN_EXPR_ENABLE_PROFILER
|
|
|
|
mProfiler->add(op, costTime);
|
|
|
|
#endif
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2020-01-15 13:33:47 +08:00
|
|
|
|
|
|
|
ErrorCode Executor::runCache(std::shared_ptr<ComputeCache> cache) {
|
2019-12-27 22:16:57 +08:00
|
|
|
std::lock_guard<std::mutex> _l(mMutex);
|
2020-01-15 13:33:47 +08:00
|
|
|
return cache->compute();
|
|
|
|
}
|
|
|
|
void Executor::resetProfile() {
|
|
|
|
#ifdef MNN_EXPR_ENABLE_PROFILER
|
|
|
|
mProfiler->reset();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
void Executor::dumpProfile() {
|
|
|
|
#ifdef MNN_EXPR_ENABLE_PROFILER
|
|
|
|
mProfiler->dump();
|
|
|
|
#endif
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace Express
|
|
|
|
} // namespace MNN
|