2020-11-05 16:41:56 +08:00
|
|
|
//
|
|
|
|
// GeometryComputerUtils.cpp
|
|
|
|
// MNN
|
|
|
|
//
|
|
|
|
// Created by MNN on 2020/05/11.
|
|
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
|
|
//
|
|
|
|
|
|
|
|
#include "GeometryComputerUtils.hpp"
|
|
|
|
#include "core/OpCommonUtils.hpp"
|
|
|
|
#include "core/RuntimeFactory.hpp"
|
|
|
|
#include "shape/SizeComputer.hpp"
|
2021-04-08 15:34:23 +08:00
|
|
|
#include "core/AutoStorage.h"
|
|
|
|
|
2021-01-06 16:29:37 +08:00
|
|
|
#ifdef MNN_BUILD_CODEGEN
|
|
|
|
#include "OpFuse.hpp"
|
|
|
|
#endif
|
2021-11-30 10:10:53 +08:00
|
|
|
#define DEFAULT_ALLOCATE_SIZE 32
|
2020-11-05 16:41:56 +08:00
|
|
|
namespace MNN {
|
2022-12-30 15:18:58 +08:00
|
|
|
static bool _hasZeroShapeOutput(const Schedule::OpCacheInfo& info) {
|
2020-11-05 16:41:56 +08:00
|
|
|
for (auto t : info.outputs) {
|
|
|
|
for (int v = 0; v < t->dimensions(); ++v) {
|
|
|
|
if (t->length(v) <= 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2021-02-07 10:45:07 +08:00
|
|
|
flatbuffers::Offset<Op> GeometryComputerUtils::makePool(flatbuffers::FlatBufferBuilder& builder, std::pair<int, int> kernel, std::pair<int, int> stride, PoolType type, MNN::PoolPadType pad, std::pair<int, int> pads, bool isglobal, AvgPoolCountType countType) {
|
|
|
|
PoolBuilder poolB(builder);
|
|
|
|
poolB.add_type(type);
|
|
|
|
poolB.add_padType(pad);
|
|
|
|
poolB.add_padX(pads.first);
|
|
|
|
poolB.add_padY(pads.second);
|
|
|
|
poolB.add_kernelX(kernel.first);
|
|
|
|
poolB.add_kernelY(kernel.second);
|
|
|
|
poolB.add_strideX(stride.first);
|
|
|
|
poolB.add_strideY(stride.second);
|
|
|
|
poolB.add_isGlobal(isglobal);
|
|
|
|
if (AvgPoolCountType_DEFAULT != countType) {
|
|
|
|
poolB.add_countType(countType);
|
|
|
|
}
|
|
|
|
auto poolOffset = poolB.Finish();
|
|
|
|
OpBuilder opB(builder);
|
|
|
|
opB.add_type(OpType_Pooling);
|
|
|
|
opB.add_main(poolOffset.Union());
|
|
|
|
opB.add_main_type(OpParameter_Pool);
|
|
|
|
return opB.Finish();
|
|
|
|
}
|
|
|
|
|
2022-12-30 15:18:58 +08:00
|
|
|
int GeometryComputerUtils::buildConstantTensors(std::vector<Schedule::OpCacheInfo>& infos) {
|
2020-11-05 16:41:56 +08:00
|
|
|
// Check Middle Const
|
|
|
|
for (auto& info : infos) {
|
|
|
|
if (info.op->type() == OpType_Const) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
bool isConst = true;
|
|
|
|
for (int i = 0; i < info.inputs.size(); ++i) {
|
|
|
|
if (TensorUtils::getDescribe(info.inputs[i])->usage == Tensor::InsideDescribe::CONSTANT) {
|
|
|
|
continue;
|
|
|
|
}
|
2021-04-08 15:34:23 +08:00
|
|
|
if (OpCommonUtils::opNeedContent(info.op->type(), i)) {
|
2020-11-05 16:41:56 +08:00
|
|
|
isConst = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (isConst) {
|
|
|
|
for (auto t : info.outputs) {
|
|
|
|
TensorUtils::getDescribe(t)->usage = Tensor::InsideDescribe::CONSTANT;
|
|
|
|
}
|
|
|
|
info.type = Schedule::CONSTANT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Check force size compute op
|
2021-11-30 10:10:53 +08:00
|
|
|
int breakIndex = -1;
|
|
|
|
for (int infoIndex=0; infoIndex < infos.size(); ++infoIndex) {
|
|
|
|
auto& info = infos[infoIndex];
|
2020-11-05 16:41:56 +08:00
|
|
|
if (info.op->type() == OpType_Const) {
|
|
|
|
continue;
|
|
|
|
}
|
2021-11-30 10:10:53 +08:00
|
|
|
if (info.op->type() == OpType_Where && info.op->main_type() != OpParameter_Extra) {
|
2021-06-11 17:17:13 +08:00
|
|
|
// For compability old model
|
|
|
|
continue;
|
|
|
|
}
|
2021-04-08 15:34:23 +08:00
|
|
|
auto dims = SizeComputer::needInputContent(info.op, info.inputs.size());
|
2020-11-05 16:41:56 +08:00
|
|
|
for (auto index : dims) {
|
|
|
|
if (index < info.inputs.size()) {
|
2023-06-16 09:42:45 +08:00
|
|
|
TensorUtils::getDescribe(info.inputs[index])->stageMask |= MNN::Tensor::InsideDescribe::StageInfo::GEOMETRY_STAGE;
|
2020-11-05 16:41:56 +08:00
|
|
|
if (TensorUtils::getDescribe(info.inputs[index])->usage != Tensor::InsideDescribe::CONSTANT) {
|
2021-11-30 10:10:53 +08:00
|
|
|
breakIndex = infoIndex;
|
2020-11-05 16:41:56 +08:00
|
|
|
TensorUtils::getDescribe(info.inputs[index])->usage = Tensor::InsideDescribe::CONSTANT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-11-30 10:10:53 +08:00
|
|
|
if (breakIndex >= 0) {
|
2020-11-05 16:41:56 +08:00
|
|
|
bool hasConst = true;
|
|
|
|
while (hasConst) {
|
|
|
|
hasConst = false;
|
|
|
|
for (auto& info : infos) {
|
|
|
|
if (info.type == Schedule::CONSTANT) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
bool turnConst = false;
|
|
|
|
for (auto t : info.outputs) {
|
|
|
|
if (TensorUtils::getDescribe(t)->usage == Tensor::InsideDescribe::CONSTANT) {
|
|
|
|
turnConst = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (turnConst) {
|
|
|
|
for (auto t : info.outputs) {
|
|
|
|
TensorUtils::getDescribe(t)->usage = Tensor::InsideDescribe::CONSTANT;
|
2023-06-16 09:42:45 +08:00
|
|
|
TensorUtils::getDescribe(t)->stageMask |= MNN::Tensor::InsideDescribe::StageInfo::GEOMETRY_STAGE;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
for (auto t : info.inputs) {
|
|
|
|
TensorUtils::getDescribe(t)->usage = Tensor::InsideDescribe::CONSTANT;
|
2023-06-16 09:42:45 +08:00
|
|
|
TensorUtils::getDescribe(t)->stageMask |= MNN::Tensor::InsideDescribe::StageInfo::GEOMETRY_STAGE;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
info.type = Schedule::CONSTANT;
|
|
|
|
hasConst = true;
|
|
|
|
}
|
2023-02-15 10:30:27 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for (auto& info : infos) {
|
|
|
|
if (info.type == Schedule::CONSTANT) {
|
|
|
|
for (auto t : info.outputs) {
|
|
|
|
TensorUtils::getDescribe(t)->usage = Tensor::InsideDescribe::CONSTANT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-11-30 10:10:53 +08:00
|
|
|
return breakIndex;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ErrorCode GeometryComputerUtils::shapeComputeAndGeometryTransform(
|
2022-12-30 15:18:58 +08:00
|
|
|
std::vector<Schedule::OpCacheInfo>& infos,
|
2020-11-05 16:41:56 +08:00
|
|
|
GeometryComputer::Context& geoContext,
|
|
|
|
std::shared_ptr<Backend> backupBackend,
|
2022-12-30 15:18:58 +08:00
|
|
|
Runtime::CompilerType compileType, bool skipShapeCompute) {
|
2020-11-05 16:41:56 +08:00
|
|
|
/** Size Compute and compute Const Begin */
|
2022-12-30 15:18:58 +08:00
|
|
|
GeometryComputer::Context ctx(backupBackend);
|
2020-11-05 16:41:56 +08:00
|
|
|
// Size Compute and compute Const
|
2021-11-30 10:10:53 +08:00
|
|
|
for (int i=0; i<infos.size(); ++i) {
|
|
|
|
auto& info = infos[i];
|
|
|
|
auto& cmdBufferVir = info.executeBuffer;
|
|
|
|
auto& tempBuffer = info.cacheBuffer;
|
|
|
|
// TODO: Optimize
|
|
|
|
cmdBufferVir.command.clear();
|
|
|
|
cmdBufferVir.extras.clear();
|
|
|
|
for (auto t : info.outputs) {
|
|
|
|
if (!TensorUtils::getDescribe(t)->isMutable) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto usage = TensorUtils::getDescribe(t)->usage;
|
|
|
|
auto type = TensorUtils::getDescribe(t)->memoryType;
|
|
|
|
MNN_ASSERT(type != Tensor::InsideDescribe::MEMORY_OUTSIDE);
|
|
|
|
MNN_ASSERT(type != Tensor::InsideDescribe::MEMORY_HOST);
|
|
|
|
if (TensorUtils::getDescribeOrigin(t)->mContent->count() > 1) {
|
|
|
|
TensorUtils::getDescribeOrigin(t)->mContent = new Tensor::InsideDescribe::NativeInsideDescribe;
|
|
|
|
t->buffer().dim = TensorUtils::getDescribe(t)->dims;
|
|
|
|
TensorUtils::getDescribe(t)->usage = usage;
|
|
|
|
} else {
|
2023-06-16 09:42:45 +08:00
|
|
|
if (info.type != Schedule::CONSTANT && usage != Tensor::InsideDescribe::TRAINABLE) {
|
|
|
|
TensorUtils::getDescribeOrigin(t)->mContent->setBackend(nullptr);
|
2023-04-18 18:54:46 +08:00
|
|
|
// TODO: If output is static and length larger than new size, don't clear mem
|
2021-11-30 10:10:53 +08:00
|
|
|
TensorUtils::getDescribeOrigin(t)->mContent->mem.reset(nullptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-12-30 15:18:58 +08:00
|
|
|
if (!skipShapeCompute) {
|
|
|
|
auto res = SizeComputer::computeOutputSize(info.op, info.inputs, info.outputs);
|
|
|
|
if (!res) {
|
|
|
|
if (info.op->name() != nullptr) {
|
|
|
|
MNN_ERROR("Compute Shape Error for %s\n", info.op->name()->c_str());
|
|
|
|
} else {
|
|
|
|
MNN_ERROR("Compute Shape Error for %d\n", info.op->type());
|
|
|
|
}
|
|
|
|
return COMPUTE_SIZE_ERROR;
|
|
|
|
}
|
|
|
|
// FIXME: Find better way to may compability for old model
|
|
|
|
/**
|
|
|
|
For Convolution of 2D / 3D Tensor(Dense / 1D Convolution)
|
|
|
|
Because of old code, we will acces dim[2] / dim[3] to get width and height
|
|
|
|
Set the lenght to 1 for compability
|
|
|
|
*/
|
|
|
|
for (auto t : info.outputs) {
|
|
|
|
TensorUtils::adjustTensorForCompability(t);
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2022-06-10 10:39:50 +08:00
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
if (info.type == Schedule::CONSTANT) {
|
|
|
|
if (_hasZeroShapeOutput(info)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ctx.clear();
|
2021-06-11 17:17:13 +08:00
|
|
|
auto geo = GeometryComputer::search(info.op->type(), Runtime::Compiler_Loop);
|
2020-11-05 16:41:56 +08:00
|
|
|
{
|
2021-11-30 10:10:53 +08:00
|
|
|
auto res = geo->onRecompute(info.op, info.inputs, info.outputs, geoContext, tempBuffer);
|
|
|
|
if (!res) {
|
|
|
|
tempBuffer.command.clear();
|
|
|
|
tempBuffer.extras.clear();
|
|
|
|
res = geo->onCompute(info.op, info.inputs, info.outputs, geoContext, tempBuffer);
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
if (!res) {
|
|
|
|
MNN_ERROR("Const Folder Error in geometry for %s\n", info.op->name()->c_str());
|
|
|
|
return NOT_SUPPORT;
|
|
|
|
}
|
|
|
|
}
|
2021-11-30 10:10:53 +08:00
|
|
|
GeometryComputerUtils::makeRaster(tempBuffer, cmdBufferVir, ctx);
|
|
|
|
for (auto t : info.outputs) {
|
2022-06-10 10:39:50 +08:00
|
|
|
ctx.getRasterCacheCreateRecursive(t, cmdBufferVir);
|
2021-11-30 10:10:53 +08:00
|
|
|
}
|
|
|
|
for (auto& cp : cmdBufferVir.command) {
|
|
|
|
auto& c = *cp;
|
|
|
|
if (nullptr == c.execution) {
|
|
|
|
c.execution.reset(backupBackend->onCreate(c.inputs, c.outputs, c.op));
|
|
|
|
}
|
|
|
|
auto exe = c.execution;
|
2021-04-08 15:34:23 +08:00
|
|
|
if (nullptr == exe.get()) {
|
2020-11-05 16:41:56 +08:00
|
|
|
MNN_ERROR("Const Folder Error for %s\n", info.op->name()->c_str());
|
|
|
|
return NO_EXECUTION;
|
|
|
|
}
|
|
|
|
for (auto t : c.outputs) {
|
|
|
|
auto des = TensorUtils::getDescribe(t);
|
2023-06-16 09:42:45 +08:00
|
|
|
TensorUtils::setLinearLayout(t);
|
|
|
|
auto res = backupBackend->onAcquireBuffer(t, Backend::STATIC);
|
|
|
|
if (!res) {
|
|
|
|
return OUT_OF_MEMORY;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2023-06-16 09:42:45 +08:00
|
|
|
des->setBackend(backupBackend.get());
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
auto code = exe->onResize(c.inputs, c.outputs);
|
|
|
|
if (NO_ERROR != code) {
|
|
|
|
return NOT_SUPPORT;
|
|
|
|
}
|
|
|
|
code = exe->onExecute(c.inputs, c.outputs);
|
|
|
|
if (NO_ERROR != code) {
|
|
|
|
return NOT_SUPPORT;
|
|
|
|
}
|
2022-06-10 10:39:50 +08:00
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2021-11-30 10:10:53 +08:00
|
|
|
// Clear const command
|
|
|
|
ctx.pushCache(cmdBufferVir);
|
|
|
|
cmdBufferVir.command.clear();
|
|
|
|
cmdBufferVir.extras.clear();
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2022-06-10 10:39:50 +08:00
|
|
|
|
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
/** Size Compute and compute Const End */
|
|
|
|
|
|
|
|
/** Geometry Transform */
|
2021-11-30 10:10:53 +08:00
|
|
|
for (int i=0; i<infos.size(); ++i) {
|
|
|
|
auto& info = infos[i];
|
|
|
|
auto& cmdBufferReal = info.executeBuffer;
|
|
|
|
auto& tempBuffer = info.cacheBuffer;
|
|
|
|
// TODO: Optimize
|
2021-06-11 17:17:13 +08:00
|
|
|
if (info.type == Schedule::CONSTANT) {
|
|
|
|
continue;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2021-06-11 17:17:13 +08:00
|
|
|
if (_hasZeroShapeOutput(info)) {
|
|
|
|
continue;
|
2021-02-07 10:45:07 +08:00
|
|
|
}
|
2021-06-11 17:17:13 +08:00
|
|
|
auto geo = GeometryComputer::search(info.op->type(), compileType);
|
|
|
|
{
|
2022-12-30 15:18:58 +08:00
|
|
|
bool res = false;
|
|
|
|
if (!tempBuffer.hasWrap) {
|
|
|
|
res = geo->onRecompute(info.op, info.inputs, info.outputs, geoContext, tempBuffer);
|
|
|
|
}
|
2021-11-30 10:10:53 +08:00
|
|
|
if (!res) {
|
|
|
|
tempBuffer.command.clear();
|
|
|
|
tempBuffer.extras.clear();
|
|
|
|
res = geo->onCompute(info.op, info.inputs, info.outputs, geoContext, tempBuffer);
|
|
|
|
}
|
2021-06-11 17:17:13 +08:00
|
|
|
if (!res) {
|
|
|
|
return NOT_SUPPORT;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2022-12-30 15:18:58 +08:00
|
|
|
tempBuffer.hasWrap = false;
|
2021-11-30 10:10:53 +08:00
|
|
|
GeometryComputerUtils::makeRaster(tempBuffer, cmdBufferReal, geoContext);
|
|
|
|
for (auto t : info.outputs) {
|
|
|
|
auto des = TensorUtils::getDescribe(t);
|
2023-04-18 18:54:46 +08:00
|
|
|
if (des->usage == Tensor::InsideDescribe::OUTPUT || des->usage == Tensor::InsideDescribe::TRAINABLE) {
|
|
|
|
// For output and trainable value, must directly compute the tensor
|
2022-06-10 10:39:50 +08:00
|
|
|
geoContext.getRasterCacheCreateRecursive(t, cmdBufferReal);
|
2021-11-30 10:10:53 +08:00
|
|
|
}
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
}
|
2023-02-15 10:30:27 +08:00
|
|
|
|
|
|
|
|
2021-01-06 16:29:37 +08:00
|
|
|
#ifdef MNN_BUILD_CODEGEN
|
2023-02-15 10:30:27 +08:00
|
|
|
opFuse(infos, geoContext.forwardType());
|
2021-01-06 16:29:37 +08:00
|
|
|
#endif
|
2020-11-05 16:41:56 +08:00
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
void GeometryComputerUtils::makeRaster(const CommandBuffer& srcBuffer, CommandBuffer& dstBuffer,
|
|
|
|
GeometryComputer::Context& ctx) {
|
2021-11-30 10:10:53 +08:00
|
|
|
dstBuffer.extras = srcBuffer.extras;
|
2021-02-07 10:45:07 +08:00
|
|
|
for (int index = 0; index < srcBuffer.command.size(); ++index) {
|
2021-11-30 10:10:53 +08:00
|
|
|
auto& iter = *srcBuffer.command[index];
|
2020-11-05 16:41:56 +08:00
|
|
|
const Op* op = iter.op;
|
2021-11-30 10:10:53 +08:00
|
|
|
auto& cmd = iter;
|
2020-11-05 16:41:56 +08:00
|
|
|
auto type = op->type();
|
2021-02-07 10:45:07 +08:00
|
|
|
MNN_ASSERT(OpType_Raster != type);
|
2020-11-05 16:41:56 +08:00
|
|
|
for (int i = 0; i < iter.inputs.size(); ++i) {
|
2021-04-08 15:34:23 +08:00
|
|
|
if (!OpCommonUtils::opNeedContent(type, i)) {
|
2020-11-05 16:41:56 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto des = TensorUtils::getDescribe(cmd.inputs[i]);
|
2021-09-18 15:52:30 +08:00
|
|
|
//MNN_ASSERT(des->tensorArrayAttr == nullptr);
|
2020-11-05 16:41:56 +08:00
|
|
|
if (des->memoryType == Tensor::InsideDescribe::MEMORY_VIRTUAL) {
|
2022-06-10 10:39:50 +08:00
|
|
|
ctx.getRasterCacheCreateRecursive(cmd.inputs[i], dstBuffer);
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
}
|
2021-11-30 10:10:53 +08:00
|
|
|
dstBuffer.command.emplace_back(srcBuffer.command[index]);
|
2021-02-07 10:45:07 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2021-11-30 10:10:53 +08:00
|
|
|
SharedPtr<Command> GeometryComputerUtils::makeBinary(int type, Tensor* input0, Tensor* input1, Tensor* output) {
|
|
|
|
flatbuffers::FlatBufferBuilder builder(DEFAULT_ALLOCATE_SIZE);
|
2021-02-07 10:45:07 +08:00
|
|
|
BinaryOpBuilder builder_(builder);
|
|
|
|
builder_.add_opType(type);
|
|
|
|
auto mainOffset = builder_.Finish().Union();
|
|
|
|
OpBuilder opB(builder);
|
|
|
|
opB.add_type(OpType_BinaryOp);
|
|
|
|
opB.add_main(mainOffset);
|
|
|
|
opB.add_main_type(OpParameter_BinaryOp);
|
|
|
|
builder.Finish(opB.Finish());
|
2021-11-30 10:10:53 +08:00
|
|
|
SharedPtr<Command> cmdP = new Command;
|
|
|
|
auto& cmd = *cmdP;
|
|
|
|
cmd.buffer.reset(new BufferStorage);
|
|
|
|
cmd.buffer->storage = builder.ReleaseRaw(cmd.buffer->allocated_size, cmd.buffer->offset);
|
2020-11-05 16:41:56 +08:00
|
|
|
cmd.inputs = {input0, input1};
|
|
|
|
cmd.outputs = {output};
|
2021-11-30 10:10:53 +08:00
|
|
|
cmd.op = flatbuffers::GetRoot<Op>(cmd.buffer->buffer());
|
|
|
|
return cmdP;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
|
2021-11-30 10:10:53 +08:00
|
|
|
SharedPtr<Command> GeometryComputerUtils::makeReduce(ReductionType type, Tensor* input0, Tensor* output) {
|
|
|
|
flatbuffers::FlatBufferBuilder builder(DEFAULT_ALLOCATE_SIZE);
|
2021-02-07 10:45:07 +08:00
|
|
|
auto vec = builder.CreateVector(std::vector<int>{1});
|
|
|
|
ReductionParamBuilder builder_(builder);
|
|
|
|
builder_.add_operation(type);
|
|
|
|
builder_.add_keepDims(true);
|
|
|
|
builder_.add_dim(vec);
|
|
|
|
auto mainOffset = builder_.Finish().Union();
|
|
|
|
OpBuilder opB(builder);
|
|
|
|
opB.add_type(OpType_Reduction);
|
|
|
|
opB.add_main(mainOffset);
|
|
|
|
opB.add_main_type(OpParameter_ReductionParam);
|
|
|
|
builder.Finish(opB.Finish());
|
2021-11-30 10:10:53 +08:00
|
|
|
SharedPtr<Command> cmdP = new Command;
|
|
|
|
auto& cmd = *cmdP;
|
|
|
|
cmd.buffer.reset(new BufferStorage);
|
|
|
|
cmd.buffer->storage = builder.ReleaseRaw(cmd.buffer->allocated_size, cmd.buffer->offset);
|
2020-11-05 16:41:56 +08:00
|
|
|
cmd.inputs = {input0};
|
|
|
|
cmd.outputs = {output};
|
2021-11-30 10:10:53 +08:00
|
|
|
cmd.op = flatbuffers::GetRoot<Op>(cmd.buffer->buffer());
|
|
|
|
return cmdP;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2021-11-30 10:10:53 +08:00
|
|
|
SharedPtr<Command> GeometryComputerUtils::makeUnary(UnaryOpOperation type, Tensor* input0, Tensor* output) {
|
|
|
|
flatbuffers::FlatBufferBuilder builder(DEFAULT_ALLOCATE_SIZE);
|
2021-02-07 10:45:07 +08:00
|
|
|
UnaryOpBuilder builder_(builder);
|
|
|
|
builder_.add_opType(type);
|
|
|
|
auto mainOffset = builder_.Finish().Union();
|
|
|
|
OpBuilder opB(builder);
|
|
|
|
opB.add_type(OpType_UnaryOp);
|
|
|
|
opB.add_main(mainOffset);
|
|
|
|
opB.add_main_type(OpParameter_UnaryOp);
|
|
|
|
builder.Finish(opB.Finish());
|
2021-11-30 10:10:53 +08:00
|
|
|
SharedPtr<Command> cmdP = new Command;
|
|
|
|
auto& cmd = *cmdP;
|
|
|
|
cmd.buffer.reset(new BufferStorage);
|
|
|
|
cmd.buffer->storage = builder.ReleaseRaw(cmd.buffer->allocated_size, cmd.buffer->offset);
|
2020-11-05 16:41:56 +08:00
|
|
|
cmd.inputs = {input0};
|
|
|
|
cmd.outputs = {output};
|
2021-11-30 10:10:53 +08:00
|
|
|
cmd.op = flatbuffers::GetRoot<Op>(cmd.buffer->buffer());
|
|
|
|
return cmdP;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2021-11-30 10:10:53 +08:00
|
|
|
SharedPtr<Command> GeometryComputerUtils::makeCommand(flatbuffers::FlatBufferBuilder& builder, const std::vector<Tensor*>& inputs,
|
2020-11-05 16:41:56 +08:00
|
|
|
const std::vector<Tensor*>& outputs) {
|
2021-11-30 10:10:53 +08:00
|
|
|
SharedPtr<Command> cmdP = new Command;
|
|
|
|
auto& cmd = *cmdP;
|
|
|
|
cmd.buffer.reset(new BufferStorage);
|
|
|
|
cmd.buffer->storage = builder.ReleaseRaw(cmd.buffer->allocated_size, cmd.buffer->offset);
|
2020-11-05 16:41:56 +08:00
|
|
|
cmd.outputs = outputs;
|
|
|
|
cmd.inputs = inputs;
|
2021-11-30 10:10:53 +08:00
|
|
|
cmd.op = flatbuffers::GetRoot<Op>(cmd.buffer->buffer());
|
|
|
|
return cmdP;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
|
2021-11-30 10:10:53 +08:00
|
|
|
SharedPtr<Command> GeometryComputerUtils::makeMatMul(Tensor* input0, Tensor* input1, Tensor* output, Tensor* Bias, bool transposeA,
|
2020-11-05 16:41:56 +08:00
|
|
|
bool transposeB) {
|
2021-11-30 10:10:53 +08:00
|
|
|
SharedPtr<Command> cmdP = new Command;
|
|
|
|
auto& cmd = *cmdP;
|
|
|
|
flatbuffers::FlatBufferBuilder builder(DEFAULT_ALLOCATE_SIZE);
|
2021-02-07 10:45:07 +08:00
|
|
|
MatMulBuilder builder_(builder);
|
|
|
|
builder_.add_transposeA(transposeA);
|
|
|
|
builder_.add_transposeB(transposeB);
|
|
|
|
auto mainOffset = builder_.Finish().Union();
|
|
|
|
OpBuilder opB(builder);
|
|
|
|
opB.add_type(OpType_MatMul);
|
|
|
|
opB.add_main(mainOffset);
|
|
|
|
opB.add_main_type(OpParameter_MatMul);
|
|
|
|
builder.Finish(opB.Finish());
|
2021-11-30 10:10:53 +08:00
|
|
|
cmd.buffer.reset(new BufferStorage);
|
|
|
|
cmd.buffer->storage = builder.ReleaseRaw(cmd.buffer->allocated_size, cmd.buffer->offset);
|
2020-11-05 16:41:56 +08:00
|
|
|
if (nullptr == Bias) {
|
|
|
|
cmd.inputs = {input0, input1};
|
|
|
|
} else {
|
|
|
|
cmd.inputs = {input0, input1, Bias};
|
|
|
|
}
|
|
|
|
cmd.outputs = {output};
|
2021-11-30 10:10:53 +08:00
|
|
|
cmd.op = flatbuffers::GetRoot<Op>(cmd.buffer->buffer());
|
|
|
|
return cmdP;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Tensor::InsideDescribe::Region GeometryComputerUtils::makeRawAddressRef(Tensor* src, int srcOffset, int size,
|
|
|
|
int dstOffset) {
|
|
|
|
Tensor::InsideDescribe::Region reg;
|
|
|
|
// Default is 1, 1, 1
|
|
|
|
reg.size[2] = size;
|
|
|
|
|
|
|
|
// Default is 0, 1, 1, 1
|
|
|
|
reg.src.offset = srcOffset;
|
|
|
|
reg.dst.offset = dstOffset;
|
|
|
|
reg.origin = src;
|
|
|
|
return reg;
|
|
|
|
}
|
|
|
|
|
|
|
|
void GeometryComputerUtils::makeRawAddressRef(Tensor* dst, Tensor* src, int srcOffset, int size, int dstOffset) {
|
|
|
|
auto describe = TensorUtils::getDescribe(dst);
|
|
|
|
describe->memoryType = Tensor::InsideDescribe::MEMORY_VIRTUAL;
|
|
|
|
describe->regions = {makeRawAddressRef(src, srcOffset, size, dstOffset)};
|
|
|
|
}
|
|
|
|
|
|
|
|
}; // namespace MNN
|