2019-04-17 10:49:11 +08:00
|
|
|
//
|
|
|
|
// Pipeline.cpp
|
|
|
|
// MNN
|
|
|
|
//
|
|
|
|
// Created by MNN on 2019/01/14.
|
|
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
|
|
//
|
|
|
|
|
2019-12-27 22:16:57 +08:00
|
|
|
#include "core/Pipeline.hpp"
|
2020-11-05 16:41:56 +08:00
|
|
|
#include <string.h>
|
2019-12-27 22:16:57 +08:00
|
|
|
#include "core/Backend.hpp"
|
|
|
|
#include "core/Macro.h"
|
|
|
|
#include "core/TensorUtils.hpp"
|
|
|
|
#include "core/WrapExecution.hpp"
|
2020-11-05 16:41:56 +08:00
|
|
|
#include "geometry/GeometryComputerUtils.hpp"
|
|
|
|
#include "shape/SizeComputer.hpp"
|
2019-04-17 10:49:11 +08:00
|
|
|
//#define MNN_OPEN_TIME_TRACE
|
2019-12-27 22:16:57 +08:00
|
|
|
#include <MNN/AutoTime.hpp>
|
2019-04-17 10:49:11 +08:00
|
|
|
//#define MNN_DEBUG_TENSOR_SIZE
|
2020-11-05 16:41:56 +08:00
|
|
|
//#define MNN_DEBUG_PREPARE
|
|
|
|
|
|
|
|
#define MNN_FAST_RESIZE
|
2019-04-17 10:49:11 +08:00
|
|
|
namespace MNN {
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
OperatorInfo::OperatorInfo() {
|
|
|
|
mContent = new Info;
|
|
|
|
MNN_ASSERT(nullptr != mContent);
|
|
|
|
}
|
|
|
|
OperatorInfo::~OperatorInfo() {
|
|
|
|
delete mContent;
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string& OperatorInfo::name() const {
|
|
|
|
return mContent->name;
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string& OperatorInfo::type() const {
|
|
|
|
return mContent->type;
|
|
|
|
}
|
|
|
|
|
|
|
|
float OperatorInfo::flops() const {
|
|
|
|
return mContent->flops;
|
|
|
|
}
|
|
|
|
static Backend::StorageType _getTensorStorageType(const Tensor* tensor) {
|
2020-11-05 16:41:56 +08:00
|
|
|
auto des = TensorUtils::getDescribe(tensor);
|
2020-01-15 13:33:47 +08:00
|
|
|
auto usage = des->usage;
|
2020-11-05 16:41:56 +08:00
|
|
|
if (TensorUsage::CONSTANT == usage || TensorUsage::INPUT == usage || TensorUsage::TRAINABLE == usage) {
|
2019-04-17 10:49:11 +08:00
|
|
|
return Backend::DYNAMIC_SEPERATE;
|
|
|
|
}
|
2020-12-15 14:12:35 +08:00
|
|
|
if (tensor->buffer().type.code == halide_type_handle) {
|
2019-04-17 10:49:11 +08:00
|
|
|
return Backend::DYNAMIC_SEPERATE;
|
|
|
|
}
|
|
|
|
return Backend::DYNAMIC;
|
|
|
|
}
|
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
static bool _needRelease(const Tensor* tensor, bool inputOutside) {
|
|
|
|
auto des = TensorUtils::getDescribe(tensor);
|
2020-01-15 13:33:47 +08:00
|
|
|
auto usage = des->usage;
|
2020-11-05 16:41:56 +08:00
|
|
|
if (inputOutside) {
|
|
|
|
return usage == Tensor::InsideDescribe::NORMAL;
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2020-12-15 14:12:35 +08:00
|
|
|
if (tensor->buffer().type.code == halide_type_handle) {
|
2020-11-05 16:41:56 +08:00
|
|
|
return false;
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
if (TensorUsage::CONSTANT == usage || TensorUsage::TRAINABLE == usage || TensorUsage::OUTPUT == usage) {
|
|
|
|
return false;
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2021-02-07 10:45:07 +08:00
|
|
|
static void _releaseTensor(Tensor* origin, bool mAllocInput) {
|
|
|
|
TensorUtils::getDescribe(origin)->useCount -= 1;
|
|
|
|
if (0 == TensorUtils::getDescribe(origin)->useCount &&
|
|
|
|
TensorUtils::getDescribe(origin)->memoryType == Tensor::InsideDescribe::MEMORY_BACKEND) {
|
|
|
|
auto needRelease = _needRelease(origin, !mAllocInput);
|
|
|
|
auto bn = TensorUtils::getDescribe(origin)->backend;
|
|
|
|
if (nullptr != bn && needRelease) {
|
|
|
|
// For zeroshape may not has bn
|
|
|
|
bn->onReleaseBuffer(origin, Backend::DYNAMIC);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool _allocTensor(Tensor* t, Backend* curBackend) {
|
|
|
|
auto memoryType = _getTensorStorageType(t);
|
|
|
|
auto bn = TensorUtils::getDescribe(t)->backend;
|
|
|
|
auto des = TensorUtils::getDescribe(t);
|
|
|
|
if (nullptr == bn) {
|
|
|
|
TensorUtils::setLinearLayout(t);
|
|
|
|
des->backend = curBackend;
|
|
|
|
auto res = curBackend->onAcquireBuffer(t, memoryType);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
void Pipeline::UnitInfo::setUp(const Command& command, int index) {
|
|
|
|
if (nullptr != command.op->name()) {
|
|
|
|
mContent->name = command.op->name()->str();
|
|
|
|
} else {
|
|
|
|
char buffer[20];
|
|
|
|
sprintf(buffer, "%d", index);
|
|
|
|
mContent->name = std::string(EnumNameOpType(command.op->type())) + buffer;
|
|
|
|
}
|
|
|
|
mContent->type = EnumNameOpType(command.op->type());
|
|
|
|
#ifndef MNN_BUILD_MINI
|
|
|
|
mContent->flops = SizeComputer::computeFlops(command.op, command.inputs, command.outputs);
|
|
|
|
#endif
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
Pipeline::Pipeline(std::vector<Schedule::PipelineInfo>&& infos, std::shared_ptr<Backend> backend,
|
|
|
|
std::shared_ptr<Backend> cpuBackend, bool allocInput, bool geometry)
|
|
|
|
#ifndef MNN_BUILD_MINI
|
|
|
|
: mContext(cpuBackend, true), mUseGeometry(geometry) {
|
|
|
|
#else
|
|
|
|
{
|
|
|
|
#endif
|
|
|
|
MNN_ASSERT(nullptr != backend);
|
|
|
|
MNN_ASSERT(nullptr != cpuBackend);
|
|
|
|
mBackupBackend = cpuBackend;
|
|
|
|
mBackend = backend;
|
|
|
|
mAllocInput = allocInput;
|
|
|
|
mInfo = std::move(infos);
|
|
|
|
GeometryComputerUtils::buildConstantTensors(mInfo, mBackupBackend, !mAllocInput, mConstTensors, mMidConstTensors);
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2021-01-06 16:29:37 +08:00
|
|
|
void Pipeline::cloneExecution(const std::map<const Op*, std::shared_ptr<Execution>>& cache) {
|
|
|
|
Execution* dst;
|
|
|
|
for (auto& iter : cache) {
|
|
|
|
dst = nullptr;
|
|
|
|
bool res = iter.second->onClone(mBackend.get(), iter.first, &dst);
|
|
|
|
if (!res) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
MNN_ASSERT(nullptr != dst);
|
|
|
|
mOriginExecution.insert(std::make_pair(iter.first, std::shared_ptr<Execution>(dst)));
|
|
|
|
}
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
ErrorCode Pipeline::encode(bool isStatic) {
|
|
|
|
// Static Model just copy info to command buffer
|
|
|
|
if (isStatic) {
|
|
|
|
for (auto& info : mInfo) {
|
|
|
|
flatbuffers::FlatBufferBuilder builder;
|
|
|
|
auto lastOffset = Op::Pack(builder, info.op->UnPack());
|
|
|
|
builder.Finish(lastOffset);
|
|
|
|
Command cmd;
|
|
|
|
cmd.buffer.resize(builder.GetSize());
|
|
|
|
::memcpy(cmd.buffer.data(), builder.GetBufferPointer(), cmd.buffer.size());
|
|
|
|
cmd.outputs = info.outputs;
|
|
|
|
cmd.inputs = info.inputs;
|
|
|
|
cmd.op = flatbuffers::GetMutableRoot<Op>(cmd.buffer.data());
|
|
|
|
mBuffer.command.push_back(cmd);
|
|
|
|
// mBuffer.command.emplace_back(GeometryComputerUtils::makeCommand(info.op->UnPack(), info.inputs,
|
|
|
|
// info.outputs));
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
return NO_ERROR;
|
2020-11-05 16:41:56 +08:00
|
|
|
} else {
|
|
|
|
#ifndef MNN_BUILD_MINI
|
|
|
|
mContext.clear();
|
|
|
|
mBuffer.command.clear();
|
|
|
|
mBuffer.extras.clear();
|
|
|
|
/** Size Compute and compute Const Begin */
|
|
|
|
for (auto t : mConstTensors) {
|
|
|
|
TensorUtils::getDescribe(t)->backend = mBackupBackend.get();
|
|
|
|
TensorUtils::getDescribe(t)->usage = Tensor::InsideDescribe::CONSTANT;
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
if (mInit) {
|
|
|
|
for (auto t : mMidConstTensors) {
|
|
|
|
if (t->elementSize() > 0) {
|
|
|
|
mBackupBackend->onReleaseBuffer(t, Backend::STATIC);
|
|
|
|
}
|
|
|
|
TensorUtils::getDescribe(t)->backend = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mInit = true;
|
2020-12-15 14:12:35 +08:00
|
|
|
return GeometryComputerUtils::shapeComputeAndGeometryTransform(mInfo, mBuffer, mContext, mBackupBackend, mUseGeometry);
|
2020-11-05 16:41:56 +08:00
|
|
|
#endif
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
ErrorCode Pipeline::allocMemory(bool supportDebug) {
|
|
|
|
mExecutions.clear();
|
|
|
|
mDebugInfos.clear();
|
|
|
|
mBackend->onClearBuffer();
|
|
|
|
mBackupBackend->onClearBuffer();
|
|
|
|
|
|
|
|
/** Prepare Execution And Alloc*/
|
|
|
|
// Compute refCount
|
|
|
|
for (auto& iter : mBuffer.command) {
|
|
|
|
if (!iter.buffer.empty()) {
|
|
|
|
iter.op = flatbuffers::GetMutableRoot<Op>((void*)iter.buffer.data());
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
for (auto t : iter.inputs) {
|
|
|
|
auto des = TensorUtils::getDescribe(t);
|
|
|
|
if (des->memoryType == Tensor::InsideDescribe::MEMORY_VIRTUAL) {
|
|
|
|
for (auto& r : des->regions) {
|
|
|
|
TensorUtils::getDescribe(r.origin)->useCount += 1;
|
2021-02-07 10:45:07 +08:00
|
|
|
if (nullptr != r.offset) {
|
|
|
|
TensorUtils::getDescribe(r.offset)->useCount += 1;
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
des->useCount += 1;
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
// Create Execution and Alloc
|
|
|
|
mBackend->onResizeBegin();
|
|
|
|
mExecutions.resize(mBuffer.command.size());
|
|
|
|
for (int i = 0; i < mBuffer.command.size(); ++i) {
|
|
|
|
auto& iter = mBuffer.command[i];
|
|
|
|
// MNN_PRINT("%d - %s\n", i, EnumNameOpType(iter.op->type()));
|
|
|
|
mExecutions[i] = nullptr;
|
|
|
|
bool cached = false;
|
|
|
|
/** Cache origin execution for fast resize*/
|
|
|
|
auto exeIter = mOriginExecution.find(iter.op);
|
|
|
|
if (exeIter != mOriginExecution.end()) {
|
|
|
|
mExecutions[i] = exeIter->second;
|
|
|
|
cached = true;
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
// Create exe
|
|
|
|
if (nullptr == mExecutions[i]) {
|
|
|
|
mExecutions[i].reset(mBackend->onCreate(iter.inputs, iter.outputs, iter.op));
|
|
|
|
if (nullptr == mExecutions[i]) {
|
|
|
|
mExecutions[i].reset(mBackupBackend->onCreate(iter.inputs, iter.outputs, iter.op));
|
|
|
|
if (nullptr == mExecutions[i]) {
|
|
|
|
MNN_ERROR("Create exection error : %d\n", iter.op->type());
|
|
|
|
return NOT_SUPPORT;
|
|
|
|
}
|
2019-11-15 16:30:33 +08:00
|
|
|
}
|
|
|
|
}
|
2021-02-07 10:45:07 +08:00
|
|
|
// invalid means memory alloc failed
|
|
|
|
if (!mExecutions[i]->valid()) {
|
|
|
|
return OUT_OF_MEMORY;
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
auto curBackend = mExecutions[i]->backend();
|
|
|
|
// Alloc for Tensors
|
|
|
|
bool wrap = false;
|
|
|
|
auto allocFunction = [&](const std::vector<Tensor*>& tensors) {
|
|
|
|
for (auto t : tensors) {
|
|
|
|
auto des = TensorUtils::getDescribe(t);
|
|
|
|
if (des->memoryType == Tensor::InsideDescribe::MEMORY_VIRTUAL) {
|
|
|
|
// Raster's inputs
|
|
|
|
for (auto& r : des->regions) {
|
2021-02-07 10:45:07 +08:00
|
|
|
auto allocRes = _allocTensor(r.origin, curBackend);
|
|
|
|
if (!allocRes) {
|
|
|
|
return OUT_OF_MEMORY;
|
|
|
|
}
|
|
|
|
if (nullptr != r.offset) {
|
|
|
|
allocRes = _allocTensor(r.origin, curBackend);
|
|
|
|
if (!allocRes) {
|
2020-11-05 16:41:56 +08:00
|
|
|
return OUT_OF_MEMORY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2021-02-07 10:45:07 +08:00
|
|
|
auto allocRes = _allocTensor(t, curBackend);
|
|
|
|
if (!allocRes) {
|
|
|
|
return OUT_OF_MEMORY;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
return NO_ERROR;
|
|
|
|
};
|
|
|
|
if (mAllocInput) {
|
|
|
|
auto code = allocFunction(iter.inputs);
|
|
|
|
if (NO_ERROR != code) {
|
|
|
|
return code;
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
}
|
2020-12-15 14:12:35 +08:00
|
|
|
for (auto t : iter.inputs) {
|
|
|
|
auto des = TensorUtils::getDescribe(t);
|
|
|
|
if (des->memoryType == Tensor::InsideDescribe::MEMORY_VIRTUAL) {
|
|
|
|
// Raster's inputs
|
|
|
|
for (auto& r : des->regions) {
|
|
|
|
MNNForwardType type = MNN_FORWARD_CPU;
|
|
|
|
auto origin = r.origin;
|
|
|
|
auto bn = TensorUtils::getDescribe(origin)->backend;
|
|
|
|
if (nullptr != bn) {
|
|
|
|
type = bn->type();
|
|
|
|
}
|
|
|
|
if (type != curBackend->type()) {
|
|
|
|
wrap = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
auto bn = TensorUtils::getDescribe(t)->backend;
|
|
|
|
MNNForwardType type = MNN_FORWARD_CPU;
|
|
|
|
if (nullptr != bn) {
|
|
|
|
type = bn->type();
|
|
|
|
}
|
|
|
|
if (type != curBackend->type()) {
|
|
|
|
wrap = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
{
|
|
|
|
auto code = allocFunction(iter.outputs);
|
|
|
|
if (NO_ERROR != code) {
|
|
|
|
return code;
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
// Wrap If needed
|
|
|
|
if (wrap && (!cached)) {
|
|
|
|
mExecutions[i].reset(new WrapExecution(mBackupBackend.get(), mExecutions[i]));
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
if ((!cached) && iter.buffer.empty() && (iter.op->type() != OpType_Raster)) {
|
|
|
|
mOriginExecution.insert(std::make_pair(iter.op, mExecutions[i]));
|
2020-03-18 16:20:28 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
auto code = mExecutions[i]->onResize(iter.inputs, iter.outputs);
|
|
|
|
if (NO_ERROR != code) {
|
|
|
|
return code;
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
// Free mid tensor
|
|
|
|
for (auto t : iter.inputs) {
|
2019-04-17 10:49:11 +08:00
|
|
|
auto des = TensorUtils::getDescribe(t);
|
2020-11-05 16:41:56 +08:00
|
|
|
if (des->memoryType == Tensor::InsideDescribe::MEMORY_VIRTUAL) {
|
|
|
|
// Raster's inputs
|
|
|
|
for (auto& r : des->regions) {
|
2021-02-07 10:45:07 +08:00
|
|
|
_releaseTensor(r.origin, mAllocInput);
|
|
|
|
if (nullptr != r.offset) {
|
|
|
|
_releaseTensor(r.offset, mAllocInput);
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2021-02-07 10:45:07 +08:00
|
|
|
_releaseTensor(t, mAllocInput);
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
mBackend->onResizeEnd();
|
2019-04-17 10:49:11 +08:00
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
/** Prepare DebugInfo*/
|
|
|
|
if (supportDebug) {
|
|
|
|
mDebugInfos.resize(mBuffer.command.size());
|
|
|
|
for (int i = 0; i < mBuffer.command.size(); ++i) {
|
|
|
|
mDebugInfos[i].setUp(mBuffer.command[i], i);
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
ErrorCode Pipeline::execute() {
|
|
|
|
mBackend->onExecuteBegin();
|
2020-11-05 16:41:56 +08:00
|
|
|
for (int i = 0; i < mBuffer.command.size(); ++i) {
|
|
|
|
auto& cmd = mBuffer.command[i];
|
|
|
|
auto code = mExecutions[i]->onExecute(cmd.inputs, cmd.outputs);
|
|
|
|
if (NO_ERROR != code) {
|
2019-04-17 10:49:11 +08:00
|
|
|
mBackend->onExecuteEnd();
|
|
|
|
return code;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mBackend->onExecuteEnd();
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
ErrorCode Pipeline::executeCallBack(const TensorCallBackWithInfo& before, const TensorCallBackWithInfo& after) {
|
2020-11-05 16:41:56 +08:00
|
|
|
if (mDebugInfos.empty()) {
|
|
|
|
// don't support debug
|
|
|
|
return execute();
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
mBackend->onExecuteBegin();
|
2020-11-05 16:41:56 +08:00
|
|
|
for (int i = 0; i < mBuffer.command.size(); ++i) {
|
|
|
|
auto& cmd = mBuffer.command[i];
|
|
|
|
auto& info = mDebugInfos[i];
|
|
|
|
auto run = before(cmd.inputs, &info);
|
|
|
|
if (run) {
|
|
|
|
auto code = mExecutions[i]->onExecute(cmd.inputs, cmd.outputs);
|
|
|
|
if (NO_ERROR != code) {
|
|
|
|
mBackend->onExecuteEnd();
|
|
|
|
return code;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
auto stop = !(after(cmd.outputs, &info));
|
|
|
|
if (stop) {
|
|
|
|
mBackend->onExecuteEnd();
|
|
|
|
return CALL_BACK_STOP;
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
mBackend->onExecuteEnd();
|
2019-04-17 10:49:11 +08:00
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
Pipeline::~Pipeline() {
|
|
|
|
mExecutions.clear();
|
|
|
|
for (auto t : mConstTensors) {
|
|
|
|
mBackupBackend->onReleaseBuffer(t, Backend::STATIC);
|
|
|
|
}
|
|
|
|
if (mInit) {
|
|
|
|
for (auto t : mMidConstTensors) {
|
|
|
|
if (t->elementSize() > 0) {
|
|
|
|
mBackupBackend->onReleaseBuffer(t, Backend::STATIC);
|
2019-06-05 10:45:59 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
TensorUtils::getDescribe(t)->backend = nullptr;
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
}
|
2021-01-06 16:29:37 +08:00
|
|
|
mOriginExecution.clear();
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace MNN
|