2020-11-05 16:41:56 +08:00
|
|
|
//
|
|
|
|
// GeometryComputer.cpp
|
|
|
|
// MNN
|
|
|
|
//
|
|
|
|
// Created by MNN on 2020/04/01.
|
|
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
|
|
//
|
|
|
|
|
|
|
|
#include <mutex>
|
|
|
|
#include "geometry/GeometryComputer.hpp"
|
|
|
|
#include "core/Backend.hpp"
|
|
|
|
#include "core/OpCommonUtils.hpp"
|
|
|
|
#include "shape/SizeComputer.hpp"
|
|
|
|
#include "core/TensorUtils.hpp"
|
|
|
|
|
|
|
|
namespace MNN {
|
2022-12-30 15:18:58 +08:00
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
GeometryComputer::Context::~Context() {
|
2021-11-30 10:10:53 +08:00
|
|
|
// Do nothing
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2023-07-18 09:36:26 +08:00
|
|
|
GeometryComputer::Context::Context(std::shared_ptr<Backend> allocBackend, MNNForwardType type, BackendConfig::PrecisionMode precision) {
|
2020-11-05 16:41:56 +08:00
|
|
|
mBackend = allocBackend;
|
2021-11-30 10:10:53 +08:00
|
|
|
flatbuffers::FlatBufferBuilder builder(32);
|
2020-11-05 16:41:56 +08:00
|
|
|
OpBuilder opBuilder(builder);
|
|
|
|
opBuilder.add_type(OpType_Raster);
|
|
|
|
auto lastOffset = opBuilder.Finish();
|
|
|
|
builder.Finish(lastOffset);
|
2021-11-30 10:10:53 +08:00
|
|
|
mRasterOp.reset(new BufferStorage);
|
|
|
|
mRasterOp->storage = builder.ReleaseRaw(mRasterOp->allocated_size, mRasterOp->offset);
|
2021-06-11 17:17:13 +08:00
|
|
|
mForwardType = type;
|
2023-07-18 09:36:26 +08:00
|
|
|
mPrecision = precision;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
|
2021-11-30 10:10:53 +08:00
|
|
|
void GeometryComputer::Context::clear() {
|
2021-06-11 17:17:13 +08:00
|
|
|
mTempConstTensors.clear();
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2021-06-11 17:17:13 +08:00
|
|
|
const std::vector<std::shared_ptr<Tensor>>& GeometryComputer::Context::searchConst(const Op* op) {
|
2020-11-05 16:41:56 +08:00
|
|
|
auto iter = mConstTensors.find(op);
|
|
|
|
if (iter == mConstTensors.end()) {
|
2021-06-11 17:17:13 +08:00
|
|
|
mConstTensors.insert(std::make_pair(op, std::vector<std::shared_ptr<Tensor>>{}));
|
2020-11-05 16:41:56 +08:00
|
|
|
return mEmpty;
|
|
|
|
}
|
|
|
|
return iter->second;
|
|
|
|
}
|
|
|
|
std::shared_ptr<Tensor> GeometryComputer::Context::allocConst(const Op* key, const std::vector<int>& shape,
|
|
|
|
halide_type_t type, Tensor::DimensionType dimType) {
|
|
|
|
std::shared_ptr<Tensor> tensor(Tensor::createDevice(shape, type, dimType));
|
|
|
|
TensorUtils::getDescribe(tensor.get())->usage = Tensor::InsideDescribe::CONSTANT;
|
|
|
|
auto res = mBackend->onAcquireBuffer(tensor.get(), Backend::STATIC);
|
|
|
|
if (!res) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
TensorUtils::getDescribeOrigin(tensor.get())->setBackend(mBackend.get());
|
2021-06-11 17:17:13 +08:00
|
|
|
auto iter = mConstTensors.find(key);
|
|
|
|
if (iter != mConstTensors.end()) {
|
|
|
|
iter->second.emplace_back(tensor);
|
|
|
|
} else {
|
|
|
|
mTempConstTensors.emplace_back(tensor);
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
return tensor;
|
|
|
|
}
|
|
|
|
|
2021-04-08 15:34:23 +08:00
|
|
|
bool GeometryComputer::Context::allocTensor(Tensor* tensor) {
|
|
|
|
auto res = mBackend->onAcquireBuffer(tensor, Backend::STATIC);
|
|
|
|
if (!res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
TensorUtils::getDescribe(tensor)->usage = Tensor::InsideDescribe::CONSTANT;
|
2024-04-19 11:58:21 +08:00
|
|
|
TensorUtils::getDescribeOrigin(tensor)->setBackend(mBackend.get());
|
2021-04-08 15:34:23 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-06-10 10:39:50 +08:00
|
|
|
inline bool _hasZeroDim(const Tensor* t) {
|
|
|
|
|
|
|
|
for (int i = 0; i < t->dimensions(); ++i) {
|
|
|
|
if (t->length(i) <= 0) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
static bool _virtualMemory(Tensor::InsideDescribe::NativeInsideDescribe* des) {
|
|
|
|
return des->memoryType == Tensor::InsideDescribe::MEMORY_VIRTUAL && nullptr == des->rasterCommand.lock().get();
|
|
|
|
}
|
2022-06-10 10:39:50 +08:00
|
|
|
|
2024-05-11 19:17:02 +08:00
|
|
|
bool GeometryComputer::ComputePermuteRegion(Tensor* input, Tensor* output, int* newshape, int shapeDim) {
|
|
|
|
auto inputDes = TensorUtils::getDescribe(input);
|
|
|
|
auto outputDes = TensorUtils::getDescribe(output);
|
|
|
|
MNN_ASSERT(input->dimensions() >= 1);
|
|
|
|
MNN_ASSERT(output->dimensions() == input->dimensions());
|
|
|
|
MNN_ASSERT(shapeDim == input->dimensions());
|
|
|
|
auto originTensor = input;
|
|
|
|
int shape[MNN_MAX_TENSOR_DIM];
|
|
|
|
if (nullptr != newshape) {
|
|
|
|
for (int i = 0; i < input->buffer().dimensions; ++i) {
|
|
|
|
shape[i] = newshape[i];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (int i = 0; i < input->buffer().dimensions; ++i) {
|
|
|
|
shape[i] = input->buffer().dimensions - i - 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int inputShape[MNN_MAX_TENSOR_DIM];
|
|
|
|
int inputStrides[MNN_MAX_TENSOR_DIM];
|
|
|
|
int inputShapeSize = 0;
|
|
|
|
int preAxis = -2;
|
|
|
|
for (int i=0; i<input->buffer().dimensions; ++i) {
|
|
|
|
auto axis = shape[i];
|
|
|
|
auto len = input->length(axis);
|
|
|
|
if (1 == len) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (axis - preAxis == 1) {
|
|
|
|
// Fuse dimension if possible
|
|
|
|
inputShape[inputShapeSize - 1] *= len;
|
|
|
|
} else {
|
|
|
|
if (preAxis >= 0) {
|
|
|
|
// Compute last stride
|
|
|
|
int stride = 1;
|
|
|
|
for (int v=preAxis+1; v < input->buffer().dimensions; ++v) {
|
|
|
|
stride *= input->length(v);
|
|
|
|
}
|
|
|
|
inputStrides[inputShapeSize - 1] = stride;
|
|
|
|
}
|
|
|
|
inputShapeSize+=1;
|
|
|
|
inputShape[inputShapeSize - 1] = len;
|
|
|
|
}
|
|
|
|
preAxis = shape[i];
|
|
|
|
}
|
|
|
|
if (preAxis >= 0) {
|
|
|
|
// Compute last stride
|
|
|
|
int stride = 1;
|
|
|
|
for (int v=preAxis+1; v < input->buffer().dimensions; ++v) {
|
|
|
|
stride *= input->length(v);
|
|
|
|
}
|
|
|
|
inputStrides[inputShapeSize - 1] = stride;
|
|
|
|
}
|
|
|
|
if (0 == inputShapeSize) {
|
|
|
|
outputDes->memoryType = Tensor::InsideDescribe::MEMORY_VIRTUAL;
|
|
|
|
outputDes->regions = {TensorUtils::makeFullSlice(input)};
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
int outputStrides[MNN_MAX_TENSOR_DIM];
|
|
|
|
{
|
|
|
|
int stride = 1;
|
|
|
|
for (int i=inputShapeSize-1; i>=0; --i) {
|
|
|
|
outputStrides[i] = stride;
|
|
|
|
stride *= inputShape[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/** Move max three inputShapeSize to last three location.
|
|
|
|
* Don't change max three number relative position
|
|
|
|
* */
|
|
|
|
bool isReorderShape = false;
|
|
|
|
isReorderShape = (inputShapeSize > 4);
|
|
|
|
if (inputShapeSize == 4) {
|
|
|
|
// TODO: Opt this logic
|
|
|
|
isReorderShape = (inputShape[0] > inputShape[1] + inputShape[2] + inputShape[3]);
|
|
|
|
}
|
|
|
|
if (isReorderShape) {
|
|
|
|
int max1 = inputShape[0], max2 = -1, max3 = -1;
|
|
|
|
// Find Max Three Number
|
|
|
|
for (int i = 1; i < inputShapeSize; i++) {
|
|
|
|
if (inputShape[i] > max1) {
|
|
|
|
max3 = max2;
|
|
|
|
max2 = max1;
|
|
|
|
max1 = inputShape[i];
|
|
|
|
} else if (inputShape[i] > max2) {
|
|
|
|
max3 = max2;
|
|
|
|
max2 = inputShape[i];
|
|
|
|
}
|
|
|
|
else if (inputShape[i] > max3) {
|
|
|
|
max3 = inputShape[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Move Max Three Number to Last Location
|
|
|
|
int lastIndex = inputShapeSize-1;
|
|
|
|
for (int i = inputShapeSize-1; i >= 0; i--) {
|
|
|
|
if (inputShape[i] == max1) {
|
|
|
|
if(i != lastIndex) {
|
|
|
|
std::swap(inputShape[i], inputShape[lastIndex]);
|
|
|
|
std::swap(inputStrides[i], inputStrides[lastIndex]);
|
|
|
|
std::swap(outputStrides[i], outputStrides[lastIndex]);
|
|
|
|
}
|
|
|
|
max1 = -1;
|
|
|
|
lastIndex--;
|
|
|
|
} else if (inputShape[i] == max2) {
|
|
|
|
if(i != lastIndex) {
|
|
|
|
std::swap(inputShape[i], inputShape[lastIndex]);
|
|
|
|
std::swap(inputStrides[i], inputStrides[lastIndex]);
|
|
|
|
std::swap(outputStrides[i], outputStrides[lastIndex]);
|
|
|
|
}
|
|
|
|
max2 = -1;
|
|
|
|
lastIndex--;
|
|
|
|
} else if (inputShape[i] == max3) {
|
|
|
|
if(i != lastIndex) {
|
|
|
|
std::swap(inputShape[i], inputShape[lastIndex]);
|
|
|
|
std::swap(inputStrides[i], inputStrides[lastIndex]);
|
|
|
|
std::swap(outputStrides[i], outputStrides[lastIndex]);
|
|
|
|
}
|
|
|
|
max3 = -1;
|
|
|
|
lastIndex--;
|
|
|
|
}
|
|
|
|
if(lastIndex < inputShapeSize-3) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Compute inside, outside, axis
|
|
|
|
int inside = 1;
|
|
|
|
int insideStride = 0;
|
|
|
|
int outside = 1;
|
|
|
|
int outsideStride = 0;
|
|
|
|
int axis = 1;
|
|
|
|
int axisStride = 0;
|
|
|
|
int breakAxis = -1;
|
|
|
|
int remainSize = 1;
|
|
|
|
int outputInsideStride = 0;
|
|
|
|
int outputAxisStride = 0;
|
|
|
|
int outputOutsideStride = 0;
|
|
|
|
{
|
|
|
|
if (inputShapeSize >= 1) {
|
|
|
|
inside = inputShape[inputShapeSize-1];
|
|
|
|
insideStride = inputStrides[inputShapeSize-1];
|
|
|
|
outputInsideStride = outputStrides[inputShapeSize-1];
|
|
|
|
}
|
|
|
|
if (inputShapeSize >= 2) {
|
|
|
|
axis = inputShape[inputShapeSize-2];
|
|
|
|
axisStride = inputStrides[inputShapeSize-2];
|
|
|
|
outputAxisStride = outputStrides[inputShapeSize-2];
|
|
|
|
}
|
|
|
|
if (inputShapeSize >= 3) {
|
|
|
|
outside = inputShape[inputShapeSize-3];
|
|
|
|
outsideStride = inputStrides[inputShapeSize-3];
|
|
|
|
outputOutsideStride = outputStrides[inputShapeSize-3];
|
|
|
|
breakAxis = inputShapeSize - 3;
|
|
|
|
for (int i = 0; i < inputShapeSize - 3; ++i) {
|
|
|
|
remainSize *= inputShape[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
outputDes->regions.resize(remainSize);
|
|
|
|
outputDes->memoryType = Tensor::InsideDescribe::MEMORY_VIRTUAL;
|
|
|
|
int32_t mod[MNN_MAX_TENSOR_DIM];
|
|
|
|
for (int i = 0; i < breakAxis; ++i) {
|
|
|
|
int value = 1;
|
|
|
|
for (int j = i + 1; j < breakAxis; ++j) {
|
|
|
|
value *= inputShape[j];
|
|
|
|
}
|
|
|
|
mod[i] = value;
|
|
|
|
}
|
|
|
|
for (int indice = 0; indice < remainSize; ++indice) {
|
|
|
|
int value = indice;
|
|
|
|
int inputOffset = 0;
|
|
|
|
int outputOffset = 0;
|
|
|
|
for (int i = 0; i < breakAxis; ++i) {
|
|
|
|
auto coordinate = value / mod[i];
|
|
|
|
inputOffset += coordinate * inputStrides[i];
|
|
|
|
outputOffset += coordinate * outputStrides[i];
|
|
|
|
value = value % mod[i];
|
|
|
|
}
|
|
|
|
Tensor::InsideDescribe::Region& slice = outputDes->regions[indice];
|
|
|
|
slice.src.offset = inputOffset;
|
|
|
|
slice.src.stride[0] = outsideStride;
|
|
|
|
slice.size[0] = outside;
|
|
|
|
slice.src.stride[1] = axisStride;
|
|
|
|
slice.size[1] = axis;
|
|
|
|
slice.src.stride[2] = insideStride;
|
|
|
|
slice.size[2] = inside;
|
|
|
|
slice.origin = originTensor;
|
|
|
|
slice.dst.offset = outputOffset;
|
|
|
|
slice.dst.stride[0] = outputOutsideStride;
|
|
|
|
slice.dst.stride[1] = outputAxisStride;
|
|
|
|
slice.dst.stride[2] = outputInsideStride;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-06-10 10:39:50 +08:00
|
|
|
void GeometryComputer::Context::getRasterCacheCreateRecursive(Tensor* src, CommandBuffer& cmd) {
|
2020-11-05 16:41:56 +08:00
|
|
|
auto srcDes = TensorUtils::getDescribe(src);
|
2024-04-19 11:58:21 +08:00
|
|
|
if (!_virtualMemory(srcDes)) {
|
2021-04-08 15:34:23 +08:00
|
|
|
return;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2022-06-10 10:39:50 +08:00
|
|
|
if (_hasZeroDim(src)) {
|
|
|
|
return;
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
for (auto& input : srcDes->regions) {
|
|
|
|
MNN_ASSERT(input.origin != src);
|
|
|
|
auto inputDes = TensorUtils::getDescribe(input.origin);
|
2024-04-19 11:58:21 +08:00
|
|
|
while (_virtualMemory(inputDes)) {
|
2020-11-05 16:41:56 +08:00
|
|
|
if (1 != inputDes->regions.size()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
bool merge = TensorUtils::fuseRegion(inputDes->regions[0], input);
|
|
|
|
if (!merge) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
inputDes = TensorUtils::getDescribe(input.origin);
|
|
|
|
}
|
2022-06-10 10:39:50 +08:00
|
|
|
getRasterCacheCreateRecursive(input.origin, cmd);
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2021-04-08 15:34:23 +08:00
|
|
|
getRasterCacheCreate(src, cmd);
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2021-04-08 15:34:23 +08:00
|
|
|
void GeometryComputer::Context::getRasterCacheCreate(Tensor* src, CommandBuffer& cmdBuffer) {
|
2020-11-05 16:41:56 +08:00
|
|
|
auto srcDes = TensorUtils::getDescribe(src);
|
2024-04-19 11:58:21 +08:00
|
|
|
if (!_virtualMemory(srcDes)) {
|
2021-11-30 10:10:53 +08:00
|
|
|
return;
|
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
std::shared_ptr<Command> cmdP(new Command);
|
|
|
|
auto& cmd = *cmdP;
|
|
|
|
cmd.op = flatbuffers::GetRoot<Op>(mRasterOp->buffer());
|
|
|
|
cmd.buffer = mRasterOp;
|
|
|
|
cmd.outputs = {src};
|
2022-12-30 15:18:58 +08:00
|
|
|
TensorUtils::setRasterInputs(cmdP.get());
|
2024-04-19 11:58:21 +08:00
|
|
|
srcDes->rasterCommand = std::weak_ptr<Command>(cmdP);
|
2021-11-30 10:10:53 +08:00
|
|
|
cmdBuffer.command.emplace_back(std::move(cmdP));
|
2024-04-19 11:58:21 +08:00
|
|
|
// srcDes->memoryType = Tensor::InsideDescribe::MEMORY_BACKEND;
|
|
|
|
return;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2021-11-30 10:10:53 +08:00
|
|
|
|
|
|
|
bool DefaultGeometryComputer::onRecompute(const Op* op, const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
|
|
|
|
Context& context, CommandBuffer& cmd) const {
|
|
|
|
if (1 != cmd.command.size()) {
|
|
|
|
return false;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2021-11-30 10:10:53 +08:00
|
|
|
return true;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool DefaultGeometryComputer::onCompute(const Op* op, const std::vector<Tensor*>& originInputs,
|
|
|
|
const std::vector<Tensor*>& outputs, GeometryComputer::Context& context,
|
|
|
|
CommandBuffer& res) const {
|
|
|
|
auto inputs = originInputs;
|
|
|
|
// Last Command
|
2024-04-19 11:58:21 +08:00
|
|
|
std::shared_ptr<Command> cmdP(new Command);
|
2021-11-30 10:10:53 +08:00
|
|
|
auto& cmd = *cmdP;
|
2020-11-05 16:41:56 +08:00
|
|
|
cmd.op = op;
|
|
|
|
cmd.inputs = std::move(inputs);
|
|
|
|
cmd.outputs = std::move(outputs);
|
2021-11-30 10:10:53 +08:00
|
|
|
res.command.emplace_back(std::move(cmdP));
|
2020-11-05 16:41:56 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
class GeometryComputerManager {
|
|
|
|
public:
|
2021-06-11 17:17:13 +08:00
|
|
|
GeometryComputer* search(int type, Runtime::CompilerType compType) {
|
|
|
|
if (Runtime::Compiler_Origin == compType) {
|
|
|
|
return &mDefault;
|
|
|
|
}
|
|
|
|
if (Runtime::Compiler_Loop == compType) {
|
2021-11-30 10:10:53 +08:00
|
|
|
auto iter = mLoopTable[type].get();
|
|
|
|
if (iter != nullptr) {
|
|
|
|
return iter;
|
2021-06-11 17:17:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Geometry
|
2021-11-30 10:10:53 +08:00
|
|
|
auto iter = mTable[type].get();
|
|
|
|
if (iter != nullptr) {
|
2020-11-05 16:41:56 +08:00
|
|
|
// FUNC_PRINT(type);
|
2021-11-30 10:10:53 +08:00
|
|
|
return iter;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
return &mDefault;
|
|
|
|
}
|
|
|
|
static void init() {
|
2021-02-07 10:45:07 +08:00
|
|
|
gInstance = new GeometryComputerManager;
|
2021-11-30 10:10:53 +08:00
|
|
|
gInstance->mTable.resize(OpType_MAX + 1);
|
|
|
|
gInstance->mLoopTable.resize(OpType_MAX + 1);
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
static GeometryComputerManager* get() {
|
2021-02-07 10:45:07 +08:00
|
|
|
return gInstance;
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2021-06-11 17:17:13 +08:00
|
|
|
void insert(std::shared_ptr<GeometryComputer> c, int type, Runtime::CompilerType compType) {
|
|
|
|
if (Runtime::Compiler_Geometry == compType) {
|
2021-11-30 10:10:53 +08:00
|
|
|
mTable[type] = c;
|
2021-06-11 17:17:13 +08:00
|
|
|
} else if (Runtime::Compiler_Loop == compType) {
|
2021-11-30 10:10:53 +08:00
|
|
|
mLoopTable[type] = c;
|
2021-06-11 17:17:13 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
private:
|
2021-11-30 10:10:53 +08:00
|
|
|
std::vector<std::shared_ptr<GeometryComputer>> mTable;
|
|
|
|
std::vector<std::shared_ptr<GeometryComputer>> mLoopTable;
|
2021-02-07 10:45:07 +08:00
|
|
|
static GeometryComputerManager* gInstance;
|
2020-11-05 16:41:56 +08:00
|
|
|
DefaultGeometryComputer mDefault;
|
|
|
|
};
|
|
|
|
|
2021-02-07 10:45:07 +08:00
|
|
|
GeometryComputerManager* GeometryComputerManager::gInstance;
|
2021-06-11 17:17:13 +08:00
|
|
|
void GeometryComputer::registerGeometryComputer(std::shared_ptr<GeometryComputer> comp, std::vector<int> type, Runtime::CompilerType compType) {
|
2020-11-05 16:41:56 +08:00
|
|
|
auto ins = GeometryComputerManager::get();
|
|
|
|
for (auto t : type) {
|
2021-06-11 17:17:13 +08:00
|
|
|
ins->insert(comp, t, compType);
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
void GeometryComputer::init() {
|
|
|
|
if (nullptr == GeometryComputerManager::get()) {
|
|
|
|
GeometryComputerManager::init();
|
|
|
|
registerGeometryOps();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-11 17:17:13 +08:00
|
|
|
const GeometryComputer* GeometryComputer::search(int type, Runtime::CompilerType compType) {
|
|
|
|
return GeometryComputerManager::get()->search(type, compType);
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
} // namespace MNN
|