2019-04-17 10:49:11 +08:00
|
|
|
//
|
|
|
|
// ConvExecution.cpp
|
|
|
|
// MNN
|
|
|
|
//
|
|
|
|
// Created by MNN on 2019/02/28.
|
|
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
|
|
//
|
|
|
|
|
2020-03-03 05:47:07 +08:00
|
|
|
#include "ConvExecution.hpp"
|
|
|
|
#include "ConvWinograd.hpp"
|
|
|
|
#include "core/ConvolutionCommon.hpp"
|
2019-12-27 22:16:57 +08:00
|
|
|
#include "core/Macro.h"
|
|
|
|
#include "core/TensorUtils.hpp"
|
|
|
|
#include "backend/opencl/core/OpenCLBackend.hpp"
|
|
|
|
#include "backend/opencl/core/OpenCLRunningUtils.hpp"
|
2023-12-27 17:26:44 +08:00
|
|
|
#include "ConvLowMemoryExecution.hpp"
|
2019-12-27 22:16:57 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
namespace MNN {
|
|
|
|
namespace OpenCL {
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
ConvCommonExecution::ConvCommonExecution(const Convolution2D *conv2dParams, Backend *backend) {
|
|
|
|
mResource.reset(new ConvResource);
|
|
|
|
mOpenCLBackend = (OpenCLBackend *)backend;
|
|
|
|
auto runtime = mOpenCLBackend->getOpenCLRuntime();
|
2019-04-17 10:49:11 +08:00
|
|
|
int biasSize = conv2dParams->bias()->size();
|
|
|
|
const float *biasDataPtr = conv2dParams->bias()->data();
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2024-12-19 16:20:00 +08:00
|
|
|
int buffer_size = ALIGN_UP8(biasSize) * sizeof(float);
|
2024-04-19 11:58:21 +08:00
|
|
|
cl::Buffer biasBuffer(runtime->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, buffer_size);
|
2019-08-15 17:30:39 +08:00
|
|
|
cl_int error;
|
2024-05-11 19:17:02 +08:00
|
|
|
auto biasPtrCL = runtime->commandQueue().enqueueMapBuffer(biasBuffer, true, CL_MAP_WRITE, 0, buffer_size, nullptr, nullptr, &error);
|
2019-08-15 17:30:39 +08:00
|
|
|
if(biasPtrCL != nullptr && error == CL_SUCCESS){
|
2024-12-19 16:20:00 +08:00
|
|
|
::memset(biasPtrCL, 0, ALIGN_UP8(biasSize) * sizeof(float));
|
2024-05-11 19:17:02 +08:00
|
|
|
::memcpy(biasPtrCL, biasDataPtr, biasSize * sizeof(float));
|
2019-07-02 18:01:08 +08:00
|
|
|
}else{
|
|
|
|
MNN_ERROR("Map error biasPtrCL == nullptr \n");
|
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
runtime->commandQueue().enqueueUnmapMemObject(biasBuffer, biasPtrCL);
|
|
|
|
mResource->mBias.reset(Tensor::createDevice<float>({1, 1, 1, biasSize}));
|
|
|
|
backend->onAcquireBuffer(mResource->mBias.get(), Backend::STATIC);
|
2025-04-28 11:38:44 +08:00
|
|
|
copyBufferToImage(runtime, biasBuffer, openCLImage(mResource->mBias.get()), UP_DIV(biasSize, 4), 1, mOpenCLBackend->getPrecision());
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2025-07-23 14:10:58 +08:00
|
|
|
|
|
|
|
ConvCommonExecution::ConvCommonExecution(const Op *op, Backend *backend, bool isExtra) {
|
|
|
|
mResource.reset(new ConvResource);
|
|
|
|
mOpenCLBackend = (OpenCLBackend *)backend;
|
|
|
|
auto runtime = mOpenCLBackend->getOpenCLRuntime();
|
|
|
|
const Convolution2D *conv2dParams = nullptr;
|
|
|
|
if(isExtra){
|
|
|
|
conv2dParams = flatbuffers::GetRoot<Convolution2D>(op->main_as_Extra()->attr()->GetAs<Attribute>(0)->tensor()->uint8s()->data());
|
|
|
|
}else{
|
|
|
|
conv2dParams = op->main_as_Convolution2D();
|
|
|
|
}
|
|
|
|
int biasSize = conv2dParams->bias()->size();
|
|
|
|
const float *biasDataPtr = conv2dParams->bias()->data();
|
|
|
|
|
|
|
|
int buffer_size = ALIGN_UP8(biasSize) * sizeof(float);
|
|
|
|
cl::Buffer biasBuffer(runtime->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, buffer_size);
|
|
|
|
cl_int error;
|
|
|
|
auto biasPtrCL = runtime->commandQueue().enqueueMapBuffer(biasBuffer, true, CL_MAP_WRITE, 0, buffer_size, nullptr, nullptr, &error);
|
|
|
|
if(biasPtrCL != nullptr && error == CL_SUCCESS){
|
|
|
|
::memset(biasPtrCL, 0, ALIGN_UP8(biasSize) * sizeof(float));
|
|
|
|
::memcpy(biasPtrCL, biasDataPtr, biasSize * sizeof(float));
|
|
|
|
}else{
|
|
|
|
MNN_ERROR("Map error biasPtrCL == nullptr \n");
|
|
|
|
}
|
|
|
|
runtime->commandQueue().enqueueUnmapMemObject(biasBuffer, biasPtrCL);
|
|
|
|
mResource->mBias.reset(Tensor::createDevice<float>({1, 1, 1, biasSize}));
|
|
|
|
backend->onAcquireBuffer(mResource->mBias.get(), Backend::STATIC);
|
|
|
|
copyBufferToImage(runtime, biasBuffer, openCLImage(mResource->mBias.get()), UP_DIV(biasSize, 4), 1, mOpenCLBackend->getPrecision());
|
|
|
|
|
|
|
|
if(isExtra){
|
|
|
|
const PRelu* preluParam = flatbuffers::GetRoot<PRelu>(op->main_as_Extra()->attr()->GetAs<Attribute>(1)->tensor()->uint8s()->data());
|
|
|
|
const float *slopeDataPtr = preluParam->slope()->data();
|
|
|
|
cl::Buffer slopeBuffer(runtime->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, buffer_size);
|
|
|
|
cl_int error;
|
|
|
|
auto slopePtrCL = runtime->commandQueue().enqueueMapBuffer(slopeBuffer, true, CL_MAP_WRITE, 0, buffer_size, nullptr, nullptr, &error);
|
|
|
|
if(slopePtrCL != nullptr && error == CL_SUCCESS){
|
|
|
|
::memset(slopePtrCL, 0, ALIGN_UP8(biasSize) * sizeof(float));
|
|
|
|
::memcpy(slopePtrCL, slopeDataPtr, biasSize * sizeof(float));
|
|
|
|
}else{
|
|
|
|
MNN_ERROR("Map error slopePtrCL == nullptr \n");
|
|
|
|
}
|
|
|
|
runtime->commandQueue().enqueueUnmapMemObject(slopeBuffer, slopePtrCL);
|
|
|
|
mResource->mSlope.reset(Tensor::createDevice<float>({1, 1, 1, biasSize}));
|
|
|
|
backend->onAcquireBuffer(mResource->mSlope.get(), Backend::STATIC);
|
|
|
|
copyBufferToImage(runtime, slopeBuffer, openCLImage(mResource->mSlope.get()), UP_DIV(biasSize, 4), 1, mOpenCLBackend->getPrecision());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
ConvCommonExecution::~ConvCommonExecution() {
|
2024-04-19 11:58:21 +08:00
|
|
|
// Do nothinng
|
|
|
|
}
|
|
|
|
|
|
|
|
ConvExecution::ConvExecution(std::shared_ptr<ConvResource> resource, const MNN::Op* op, Backend *backend)
|
|
|
|
: CommonExecution(backend, op), ConvCommonExecution(backend) {
|
|
|
|
mResource = resource;
|
|
|
|
const auto *conv2dParams = op->main_as_Convolution2D();
|
|
|
|
const auto *conv2dCommonParams = conv2dParams->common();
|
|
|
|
mResource->mConv2dParams = conv2dParams;
|
|
|
|
mResource->mConv2dCommonParams = conv2dCommonParams;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ConvExecution::onClone(Backend* bn, const Op* op, Execution** dst) {
|
|
|
|
if (!mValid) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (nullptr == dst) {
|
|
|
|
return true;
|
2023-12-27 17:26:44 +08:00
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
*dst = new ConvExecution(mResource, op, bn);
|
|
|
|
return true;
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
|
2025-07-23 14:10:58 +08:00
|
|
|
ConvExecution::ConvExecution(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs, const MNN::Op *op, Backend *backend, bool isExtra)
|
|
|
|
: CommonExecution(backend, op), ConvCommonExecution(op, backend, isExtra) {
|
2019-04-17 10:49:11 +08:00
|
|
|
#ifdef LOG_VERBOSE
|
|
|
|
MNN_PRINT("Start ConvExecution init !\n");
|
|
|
|
#endif
|
2025-07-23 14:10:58 +08:00
|
|
|
mOpenCLBackend = static_cast<OpenCLBackend *>(backend);
|
|
|
|
const Convolution2D* conv2dParams = nullptr;
|
|
|
|
if(isExtra){
|
|
|
|
conv2dParams = flatbuffers::GetRoot<Convolution2D>(op->main_as_Extra()->attr()->GetAs<Attribute>(0)->tensor()->uint8s()->data());
|
|
|
|
mResource->mPrelu = true;
|
|
|
|
}else{
|
|
|
|
conv2dParams = op->main_as_Convolution2D();
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
const auto *conv2dCommonParams = conv2dParams->common();
|
2024-04-19 11:58:21 +08:00
|
|
|
mResource->mConv2dCommonParams = conv2dCommonParams;
|
|
|
|
mResource->mStrides = {conv2dCommonParams->strideY(), conv2dCommonParams->strideX()};
|
|
|
|
mResource->mDilations = {conv2dCommonParams->dilateY(), conv2dCommonParams->dilateX()};
|
2025-07-23 14:10:58 +08:00
|
|
|
mResource->mRelu = conv2dCommonParams->relu();
|
|
|
|
mResource->mRelu6 = conv2dCommonParams->relu6();
|
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
auto pad = ConvolutionCommon::convolutionPad(inputs[0], outputs[0], mResource->mConv2dCommonParams);
|
2020-11-05 11:10:27 +08:00
|
|
|
mPaddings[0] = pad.second;
|
|
|
|
mPaddings[1] = pad.first;
|
2025-07-23 14:10:58 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
int kernelWidth = conv2dCommonParams->kernelX();
|
|
|
|
int kernelHeight = conv2dCommonParams->kernelY();
|
|
|
|
int outputChannel = conv2dCommonParams->outputCount();
|
2023-08-21 14:51:54 +08:00
|
|
|
auto gpuType = mOpenCLBackend->getOpenCLRuntime()->getGpuType();
|
2023-10-18 10:31:02 +08:00
|
|
|
#ifndef MNN_OPENCL_BUFFER_CLOSED
|
2024-04-19 11:58:21 +08:00
|
|
|
mResource->mWeightUseBuffer = gpuType == GpuType::MALI;
|
2023-10-18 10:31:02 +08:00
|
|
|
#endif
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
int weightSize = 0;
|
|
|
|
const float *filterDataPtr = nullptr;
|
2025-07-23 14:10:58 +08:00
|
|
|
|
2020-03-03 05:47:07 +08:00
|
|
|
std::shared_ptr<MNN::ConvolutionCommon::Int8Common> quanCommon;
|
2019-04-17 10:49:11 +08:00
|
|
|
if (nullptr != conv2dParams->quanParameter()) {
|
2024-08-24 15:46:21 +08:00
|
|
|
quanCommon = ConvolutionCommon::load(op, backend, true);
|
2019-04-17 10:49:11 +08:00
|
|
|
if (nullptr == quanCommon) {
|
|
|
|
MNN_ERROR("Memory not Enough, can't extract IDST Convolution: %s \n", op->name()->c_str());
|
|
|
|
}
|
|
|
|
if (quanCommon->weightFloat.get() == nullptr) {
|
|
|
|
MNN_PRINT("quanCommon->weightFloat.get() == nullptr \n");
|
|
|
|
}
|
|
|
|
// Back to float
|
|
|
|
filterDataPtr = quanCommon->weightFloat.get();
|
|
|
|
weightSize = quanCommon->weightFloat.size();
|
2020-11-05 16:41:56 +08:00
|
|
|
} else if (nullptr == conv2dParams->weight() || nullptr == conv2dParams->bias()) {
|
|
|
|
MNN_ERROR("%s has no weight or bias. The model may be benchmark model, please revert the weight/bias firstly\n", op->name()->c_str());
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2025-07-23 14:10:58 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
if (nullptr == filterDataPtr) {
|
|
|
|
weightSize = conv2dParams->weight()->size();
|
|
|
|
filterDataPtr = conv2dParams->weight()->data();
|
|
|
|
}
|
|
|
|
int inputChannel = weightSize / (kernelWidth * kernelHeight * outputChannel);
|
2023-12-27 17:26:44 +08:00
|
|
|
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
//select opt conv method
|
2023-02-28 10:41:24 +08:00
|
|
|
std::string kernelName = "conv_2d_c4h1w4";
|
2024-04-19 11:58:21 +08:00
|
|
|
if (kernelHeight == kernelWidth && kernelHeight == 1 && mPaddings[0] == 0 && mPaddings[1] == 0) {
|
|
|
|
mResource->mConv1x1Opt = (mResource->mStrides[0] == 1 && mResource->mStrides[1] == 1 && gpuType == GpuType::MALI && !mResource->mWeightUseBuffer);
|
|
|
|
if(mResource->mConv1x1Opt){
|
|
|
|
kernelName = "conv_2d_1x1_mali";
|
|
|
|
}else{
|
|
|
|
kernelName = "conv_2d_1x1";
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
}
|
2019-07-02 18:01:08 +08:00
|
|
|
}
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
if(mResource->mConv1x1Opt){
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
cl_int error;
|
|
|
|
std::shared_ptr<Tensor> filterBuffer(Tensor::createDevice<float>({UP_DIV(outputChannel, 4)*4, UP_DIV(inputChannel, 4)*4, kernelWidth, kernelHeight}));
|
2020-11-05 16:41:56 +08:00
|
|
|
|
|
|
|
int buffer_size = filterBuffer->elementSize();
|
2025-04-28 11:38:44 +08:00
|
|
|
if(mOpenCLBackend->getPrecision() != BackendConfig::Precision_High) {
|
2020-11-05 16:41:56 +08:00
|
|
|
buffer_size *= sizeof(half_float::half);
|
|
|
|
} else {
|
|
|
|
buffer_size *= sizeof(float);
|
|
|
|
}
|
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
mResource->mKernelBuffer.reset(new cl::Buffer(mOpenCLBackend->getOpenCLRuntime()->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, buffer_size));
|
|
|
|
auto kernelBufferPtr = mOpenCLBackend->getOpenCLRuntime()->commandQueue().enqueueMapBuffer(*(mResource->mKernelBuffer.get()), true, CL_MAP_WRITE, 0, buffer_size, nullptr, nullptr, &error);
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
if(kernelBufferPtr != nullptr && error == CL_SUCCESS){
|
2020-11-05 16:41:56 +08:00
|
|
|
::memset(kernelBufferPtr, 0, buffer_size);
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
for(int o = 0; o < outputChannel; o++){
|
|
|
|
for(int i = 0 ; i < inputChannel; i++){
|
|
|
|
int bufferIdx = (o/4) * ROUND_UP(inputChannel, 4)*4 + (i/4)*16 + (o%4)*4 + (i%4);
|
|
|
|
int filterIdx = o*inputChannel + i;
|
2025-04-28 11:38:44 +08:00
|
|
|
if(mOpenCLBackend->getPrecision() != BackendConfig::Precision_High){
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
((half_float::half*)kernelBufferPtr)[bufferIdx] = (half_float::half)(filterDataPtr[filterIdx]);
|
|
|
|
}else{
|
|
|
|
((float*)kernelBufferPtr)[bufferIdx] = (float)(filterDataPtr[filterIdx]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}else{
|
|
|
|
MNN_ERROR("Map error ptrCL == nullptr \n");
|
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
mOpenCLBackend->getOpenCLRuntime()->commandQueue().enqueueUnmapMemObject(*(mResource->mKernelBuffer.get()), kernelBufferPtr);
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
}else if(kernelHeight == kernelWidth && kernelHeight == 1 && mPaddings[0] == 0 && mPaddings[1] == 0 && mResource->mWeightUseBuffer){
|
2023-08-21 14:51:54 +08:00
|
|
|
cl_int error;
|
|
|
|
std::shared_ptr<Tensor> filterBuffer(Tensor::createDevice<float>({UP_DIV(outputChannel, 4), ROUND_UP(inputChannel, 4), 4}));
|
|
|
|
|
|
|
|
int buffer_size = filterBuffer->elementSize();
|
2025-04-28 11:38:44 +08:00
|
|
|
if(mOpenCLBackend->getPrecision() != BackendConfig::Precision_High) {
|
2023-08-21 14:51:54 +08:00
|
|
|
buffer_size *= sizeof(half_float::half);
|
|
|
|
} else {
|
|
|
|
buffer_size *= sizeof(float);
|
|
|
|
}
|
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
mResource->mKernelBuffer.reset(new cl::Buffer(mOpenCLBackend->getOpenCLRuntime()->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, buffer_size));
|
|
|
|
auto kernelBufferPtr = mOpenCLBackend->getOpenCLRuntime()->commandQueue().enqueueMapBuffer(*(mResource->mKernelBuffer.get()), true, CL_MAP_WRITE, 0, buffer_size, nullptr, nullptr, &error);
|
2023-08-21 14:51:54 +08:00
|
|
|
if(kernelBufferPtr != nullptr && error == CL_SUCCESS){
|
|
|
|
::memset(kernelBufferPtr, 0, buffer_size);
|
|
|
|
for(int o = 0; o < outputChannel; o++){
|
|
|
|
for(int i = 0 ; i < inputChannel; i++){
|
|
|
|
int bufferIdx = (o/4) * ROUND_UP(inputChannel, 4)*4 + i*4 + (o%4);
|
|
|
|
int filterIdx = o*inputChannel + i;
|
2025-04-28 11:38:44 +08:00
|
|
|
if(mOpenCLBackend->getPrecision() != BackendConfig::Precision_High){
|
2023-08-21 14:51:54 +08:00
|
|
|
((half_float::half*)kernelBufferPtr)[bufferIdx] = (half_float::half)(filterDataPtr[filterIdx]);
|
|
|
|
}else{
|
|
|
|
((float*)kernelBufferPtr)[bufferIdx] = (float)(filterDataPtr[filterIdx]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}else{
|
|
|
|
MNN_ERROR("Map error ptrCL == nullptr \n");
|
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
mOpenCLBackend->getOpenCLRuntime()->commandQueue().enqueueUnmapMemObject(*(mResource->mKernelBuffer.get()), kernelBufferPtr);
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
}else{
|
2024-07-04 11:53:45 +08:00
|
|
|
std::vector<int> filterImageShape{(int)ROUND_UP(inputChannel, 4), (int)(UP_DIV(outputChannel, 4) * kernelWidth * kernelHeight)};
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
std::shared_ptr<Tensor> filterBuffer(
|
2024-07-04 11:53:45 +08:00
|
|
|
Tensor::createDevice<float>({outputChannel, ROUND_UP(inputChannel, 4), kernelWidth, kernelHeight}));
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2024-05-11 19:17:02 +08:00
|
|
|
size_t buffer_size = filterBuffer->elementSize() * sizeof(float);
|
2020-11-05 16:41:56 +08:00
|
|
|
cl::Buffer filterBufferCL(mOpenCLBackend->getOpenCLRuntime()->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, buffer_size);
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
filterBuffer->buffer().device = (uint64_t)(&filterBufferCL);
|
2023-12-27 17:26:44 +08:00
|
|
|
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
cl_int error;
|
2020-11-05 16:41:56 +08:00
|
|
|
auto ptrCL = mOpenCLBackend->getOpenCLRuntime()->commandQueue().enqueueMapBuffer(filterBufferCL, true, CL_MAP_WRITE, 0, buffer_size, nullptr, nullptr, &error);
|
|
|
|
if(ptrCL != nullptr && error == CL_SUCCESS) {
|
|
|
|
::memset(ptrCL, 0, buffer_size);
|
2024-07-04 11:53:45 +08:00
|
|
|
int cpySrcNum = inputChannel * kernelWidth * kernelHeight;
|
|
|
|
int cpyDstNum = ROUND_UP(inputChannel, 4) * kernelWidth * kernelHeight;
|
|
|
|
int cpysize = cpySrcNum * sizeof(float);
|
|
|
|
for(int o = 0; o < outputChannel; ++o){
|
|
|
|
::memcpy((float*)ptrCL + o * cpyDstNum, filterDataPtr + o * cpySrcNum, cpysize);
|
|
|
|
}
|
2024-05-11 19:17:02 +08:00
|
|
|
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
}else{
|
|
|
|
MNN_ERROR("Map error ptrCL == nullptr \n");
|
|
|
|
}
|
|
|
|
mOpenCLBackend->getOpenCLRuntime()->commandQueue().enqueueUnmapMemObject(filterBufferCL, ptrCL);
|
2023-10-18 10:31:02 +08:00
|
|
|
#ifndef MNN_OPENCL_BUFFER_CLOSED
|
2024-04-19 11:58:21 +08:00
|
|
|
if(mResource->mWeightUseBuffer){
|
|
|
|
mResource->mFilter.reset(Tensor::createDevice<float>({UP_DIV(inputChannel, 4)*4, UP_DIV(outputChannel, 4), kernelWidth * kernelHeight, 4}));
|
2023-08-21 14:51:54 +08:00
|
|
|
int kernel_buffer_size = UP_DIV(outputChannel, 4)*4* UP_DIV(inputChannel, 4)*4* kernelWidth* kernelHeight;
|
2025-04-28 11:38:44 +08:00
|
|
|
if(mOpenCLBackend->getPrecision() != BackendConfig::Precision_High) {
|
2023-08-21 14:51:54 +08:00
|
|
|
kernel_buffer_size *= sizeof(half_float::half);
|
|
|
|
} else {
|
|
|
|
kernel_buffer_size *= sizeof(float);
|
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
mResource->mKernelBuffer.reset(new cl::Buffer(mOpenCLBackend->getOpenCLRuntime()->context(), CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR, kernel_buffer_size));
|
|
|
|
mResource->mFilter.get()->buffer().device = (uint64_t)mResource->mKernelBuffer.get();
|
2023-08-21 14:51:54 +08:00
|
|
|
MNN::OpenCL::BufferConvertor bufferConvertor{mOpenCLBackend->getOpenCLRuntime()};
|
|
|
|
|
2024-05-11 19:17:02 +08:00
|
|
|
bool needTrans = true;
|
2025-04-28 11:38:44 +08:00
|
|
|
bufferConvertor.convertToNC4HW4Buffer(filterBuffer.get(), MNN::OpenCL::CONV2D_FILTER, mResource->mFilter.get(), mOpenCLBackend->getPrecision(), needTrans);
|
2023-10-18 10:31:02 +08:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2024-04-19 11:58:21 +08:00
|
|
|
mResource->mFilter.reset(Tensor::createDevice<float>({1, filterImageShape[1], 1, 4 * filterImageShape[0]}));
|
|
|
|
mOpenCLBackend->onAcquireBuffer(mResource->mFilter.get(), Backend::STATIC);
|
2023-08-21 14:51:54 +08:00
|
|
|
MNN::OpenCL::ImageBufferConvertor imageBufferConvertor{mOpenCLBackend->getOpenCLRuntime()};
|
|
|
|
|
2024-05-11 19:17:02 +08:00
|
|
|
std::string buildOption = "-DBUFFER_INP_FP32";
|
2025-04-28 11:38:44 +08:00
|
|
|
imageBufferConvertor.convertBufferToImage(filterBuffer.get(), MNN::OpenCL::CONV2D_FILTER, mResource->mFilter.get(), mOpenCLBackend->getPrecision(), false, buildOption);
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
}
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
// Create Kernel
|
2024-04-19 11:58:21 +08:00
|
|
|
if (mResource->mStrides[0] == 1 && mResource->mStrides[1] == 1 && mResource->mDilations[0] == 1 && mResource->mDilations[1] == 1) {
|
|
|
|
mResource->mBuildOptions.emplace("-DMNN_CONV_S1D1");
|
2023-02-28 10:41:24 +08:00
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
mResource->mBuildOptions.emplace("-DBIAS");
|
2025-07-23 14:10:58 +08:00
|
|
|
if (mResource->mRelu) {
|
2024-04-19 11:58:21 +08:00
|
|
|
mResource->mBuildOptions.emplace("-DRELU");
|
2025-07-23 14:10:58 +08:00
|
|
|
} else if (mResource->mRelu6) {
|
2024-04-19 11:58:21 +08:00
|
|
|
mResource->mBuildOptions.emplace("-DRELU6");
|
2025-07-23 14:10:58 +08:00
|
|
|
}else if(mResource->mPrelu){
|
|
|
|
mResource->mBuildOptions.emplace("-DPRELU");
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2025-07-23 14:10:58 +08:00
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
if(mResource->mWeightUseBuffer){
|
|
|
|
mResource->mBuildOptions.emplace("-DUSE_BUFFER");
|
2023-08-21 14:51:54 +08:00
|
|
|
}
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
#ifdef LOG_VERBOSE
|
|
|
|
MNN_PRINT("end ConvExecution init !\n");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
ConvExecution::~ConvExecution() {
|
2024-04-19 11:58:21 +08:00
|
|
|
// Do nothing
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
ErrorCode ConvExecution::onEncode(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
|
2019-04-17 10:49:11 +08:00
|
|
|
#ifdef LOG_VERBOSE
|
|
|
|
MNN_PRINT("Start ConvExecution onResize !\n");
|
|
|
|
#endif
|
2024-04-19 11:58:21 +08:00
|
|
|
mUnits.resize(1);
|
|
|
|
auto &unit = mUnits[0];
|
2019-04-17 10:49:11 +08:00
|
|
|
auto input = inputs[0];
|
|
|
|
auto output = outputs[0];
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
std::vector<int> inputShape = tensorShapeFormat(input);
|
|
|
|
std::vector<int> outputShape = tensorShapeFormat(output);
|
|
|
|
const int height = outputShape.at(1);
|
|
|
|
const int width = outputShape.at(2);
|
2024-04-19 11:58:21 +08:00
|
|
|
const int channel = outputShape.at(3);
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
const int inputHeight = inputShape.at(1);
|
|
|
|
const int inputWidth = inputShape.at(2);
|
|
|
|
const int inputChannels = inputShape.at(3);
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
const int inputChannelBlocks = UP_DIV(inputChannels, 4);
|
2024-04-19 11:58:21 +08:00
|
|
|
int kernelHeight = mResource->mConv2dCommonParams->kernelY();
|
|
|
|
int kernelWidth = mResource->mConv2dCommonParams->kernelX();
|
2020-11-05 11:44:17 +08:00
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
auto pad = ConvolutionCommon::convolutionPad(input, output, mResource->mConv2dCommonParams);
|
2020-11-05 11:44:17 +08:00
|
|
|
mPaddings[0] = pad.second;
|
|
|
|
mPaddings[1] = pad.first;
|
2023-02-28 10:41:24 +08:00
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
std::string info = std::to_string(inputChannels) + "_" + std::to_string(channel) + "_" + std::to_string(kernelHeight) + "_" + std::to_string(kernelWidth) + "_" + std::to_string(mResource->mStrides[0]) + "_" + std::to_string(mResource->mStrides[1]) + "_" + std::to_string(mResource->mDilations[0]) + "_" + std::to_string(mResource->mDilations[1]);
|
2019-04-17 10:49:11 +08:00
|
|
|
if (kernelHeight == kernelWidth && kernelHeight == 1 && mPaddings[0] == 0 && mPaddings[1] == 0) {
|
2024-04-19 11:58:21 +08:00
|
|
|
if(mResource->mConv1x1Opt){
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
std::string kernelName = "conv_2d_1x1_mali";
|
2025-04-28 11:38:44 +08:00
|
|
|
unit.kernel = mOpenCLBackend->getOpenCLRuntime()->buildKernel("conv_2d", kernelName, mResource->mBuildOptions, mOpenCLBackend->getPrecision());
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
uint32_t idx = 0;
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
mGlobalWorkSize = {static_cast<uint32_t>(UP_DIV(outputShape.at(3), 4) * UP_DIV(outputShape.at(2), 4)),
|
|
|
|
static_cast<uint32_t>(outputShape.at(0) * outputShape.at(1))};
|
|
|
|
unit.kernel->get().setArg(idx++, mGlobalWorkSize[0]);
|
|
|
|
unit.kernel->get().setArg(idx++, mGlobalWorkSize[1]);
|
|
|
|
unit.kernel->get().setArg(idx++, UP_DIV(width, 4));
|
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(input));
|
|
|
|
unit.kernel->get().setArg(idx++, *mResource->mKernelBuffer.get());
|
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(mResource->mBias.get()));
|
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(output));
|
|
|
|
unit.kernel->get().setArg(idx++, static_cast<int>(inputChannelBlocks));
|
|
|
|
unit.kernel->get().setArg(idx++, height);
|
|
|
|
unit.kernel->get().setArg(idx++, width);
|
2025-07-23 14:10:58 +08:00
|
|
|
if(mResource->mPrelu){
|
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(mResource->mSlope.get()));
|
|
|
|
}
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2025-06-17 11:08:21 +08:00
|
|
|
mLocalWorkSize = localWS2DDefault(mGlobalWorkSize, mResource->mMaxWorkGroupSize, mOpenCLBackend->getOpenCLRuntime(), kernelName, unit.kernel, mOpenCLBackend->getCLTuneLevel(), "conv_2d").first;
|
2024-04-19 11:58:21 +08:00
|
|
|
mOpenCLBackend->recordKernel2d(unit.kernel, mGlobalWorkSize, mLocalWorkSize);
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
}else{
|
|
|
|
int inputImageShape[2] = {inputHeight, inputWidth};
|
|
|
|
int outputImageShape[2] = {height, width};
|
2024-04-19 11:58:21 +08:00
|
|
|
int stideShape[2] = {mResource->mStrides[0], mResource->mStrides[1]};
|
2023-08-21 14:51:54 +08:00
|
|
|
const int total_kernel = 2;
|
|
|
|
std::string kernelName[total_kernel] = {"conv_2d_1x1", "conv_2d_1x1_c8h1w4"};
|
|
|
|
int itemC[total_kernel] = {4, 8};
|
|
|
|
int itemH[total_kernel] = {1, 1};
|
|
|
|
int itemW[total_kernel] = {4, 4};
|
|
|
|
|
|
|
|
int actual_kernel = total_kernel;
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
std::shared_ptr<KernelWrap> kernel[total_kernel];
|
2023-08-21 14:51:54 +08:00
|
|
|
std::vector<uint32_t> globalWorkSize[total_kernel];
|
|
|
|
std::vector<uint32_t> localWorkSize[total_kernel];
|
|
|
|
std::pair<int, int> min_cost(INT_MAX, 0);//(min_time, min_index)
|
|
|
|
|
2024-07-04 11:53:45 +08:00
|
|
|
for(int knl_idx = 0; knl_idx < 1; knl_idx++) {
|
2024-12-19 16:20:00 +08:00
|
|
|
std::set<std::string> buildOption = mResource->mBuildOptions;
|
|
|
|
if(itemC[knl_idx] == 8 && outputShape.at(3) % itemC[knl_idx] > 0 && outputShape.at(3) % itemC[knl_idx] <= 4){
|
|
|
|
buildOption.emplace("-DCHANNEL_BOUNDARY_PROTECT");
|
|
|
|
}
|
2025-04-28 11:38:44 +08:00
|
|
|
kernel[knl_idx] = mOpenCLBackend->getOpenCLRuntime()->buildKernel("conv_2d", kernelName[knl_idx], buildOption, mOpenCLBackend->getPrecision());
|
2023-08-21 14:51:54 +08:00
|
|
|
uint32_t maxWorkGroupSize = static_cast<uint32_t>(mOpenCLBackend->getOpenCLRuntime()->getMaxWorkGroupSize(kernel[knl_idx]));
|
|
|
|
|
|
|
|
globalWorkSize[knl_idx] = {static_cast<uint32_t>(UP_DIV(outputShape.at(3), itemC[knl_idx]) * UP_DIV(outputShape.at(2), itemW[knl_idx])), static_cast<uint32_t>(outputShape.at(0) * UP_DIV(outputShape.at(1), itemH[knl_idx]))};
|
|
|
|
uint32_t idx = 0;
|
2024-04-19 11:58:21 +08:00
|
|
|
kernel[knl_idx]->get().setArg(idx++, globalWorkSize[knl_idx][0]);
|
|
|
|
kernel[knl_idx]->get().setArg(idx++, globalWorkSize[knl_idx][1]);
|
|
|
|
kernel[knl_idx]->get().setArg(idx++, openCLImage(input));
|
|
|
|
if(mResource->mWeightUseBuffer){
|
|
|
|
kernel[knl_idx]->get().setArg(idx++, *mResource->mKernelBuffer.get());
|
2023-08-21 14:51:54 +08:00
|
|
|
}else{
|
2024-04-19 11:58:21 +08:00
|
|
|
kernel[knl_idx]->get().setArg(idx++, openCLImage(mResource->mFilter.get()));
|
2023-08-21 14:51:54 +08:00
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
kernel[knl_idx]->get().setArg(idx++, openCLImage(mResource->mBias.get()));
|
|
|
|
kernel[knl_idx]->get().setArg(idx++, openCLImage(output));
|
|
|
|
kernel[knl_idx]->get().setArg(idx++, sizeof(inputImageShape), inputImageShape);
|
|
|
|
kernel[knl_idx]->get().setArg(idx++, static_cast<int>(inputChannelBlocks));
|
|
|
|
kernel[knl_idx]->get().setArg(idx++, sizeof(outputImageShape), outputImageShape);
|
|
|
|
kernel[knl_idx]->get().setArg(idx++, sizeof(stideShape), stideShape);
|
|
|
|
kernel[knl_idx]->get().setArg(idx++, UP_DIV(width, 4));
|
|
|
|
kernel[knl_idx]->get().setArg(idx++, UP_DIV(outputShape.at(3), 4));
|
2025-07-23 14:10:58 +08:00
|
|
|
if(mResource->mPrelu){
|
|
|
|
kernel[knl_idx]->get().setArg(idx++, openCLImage(mResource->mSlope.get()));
|
|
|
|
}
|
2023-08-21 14:51:54 +08:00
|
|
|
|
|
|
|
std::pair<std::vector<uint32_t>, uint32_t> retTune;
|
2025-06-17 11:08:21 +08:00
|
|
|
retTune = localWS2DDefault(globalWorkSize[knl_idx], maxWorkGroupSize, mOpenCLBackend->getOpenCLRuntime(), kernelName[knl_idx] + info, kernel[knl_idx], mOpenCLBackend->getCLTuneLevel(), "conv_2d");
|
2023-08-21 14:51:54 +08:00
|
|
|
|
|
|
|
//printf("conv1x1 kernel_%d = %d [%d, %d]\n", knl_idx, retTune.second, retTune.first[0], retTune.first[1]);
|
|
|
|
if(min_cost.first > retTune.second) {
|
|
|
|
min_cost.first = retTune.second;
|
|
|
|
min_cost.second = knl_idx;
|
|
|
|
mLocalWorkSize = {retTune.first[0], retTune.first[1]};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
int min_index = min_cost.second;
|
|
|
|
//printf("min_index = %d %d\n", min_index, min_cost.first);
|
|
|
|
mGlobalWorkSize = {globalWorkSize[min_index][0], globalWorkSize[min_index][1]};
|
2024-12-19 16:20:00 +08:00
|
|
|
std::set<std::string> buildOption = mResource->mBuildOptions;
|
|
|
|
if(itemC[min_index] == 8 && outputShape.at(3) % itemC[min_index] > 0 && outputShape.at(3) % itemC[min_index] <= 4){
|
|
|
|
buildOption.emplace("-DCHANNEL_BOUNDARY_PROTECT");
|
|
|
|
}
|
2025-04-28 11:38:44 +08:00
|
|
|
unit.kernel = mOpenCLBackend->getOpenCLRuntime()->buildKernel("conv_2d", kernelName[min_index], buildOption, mOpenCLBackend->getPrecision());
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2023-08-21 14:51:54 +08:00
|
|
|
uint32_t idx = 0;
|
2024-04-19 11:58:21 +08:00
|
|
|
unit.kernel->get().setArg(idx++, mGlobalWorkSize[0]);
|
|
|
|
unit.kernel->get().setArg(idx++, mGlobalWorkSize[1]);
|
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(input));
|
|
|
|
if(mResource->mWeightUseBuffer){
|
|
|
|
unit.kernel->get().setArg(idx++, *mResource->mKernelBuffer.get());
|
2023-08-21 14:51:54 +08:00
|
|
|
}else{
|
2024-04-19 11:58:21 +08:00
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(mResource->mFilter.get()));
|
2023-08-21 14:51:54 +08:00
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(mResource->mBias.get()));
|
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(output));
|
|
|
|
unit.kernel->get().setArg(idx++, sizeof(inputImageShape), inputImageShape);
|
|
|
|
unit.kernel->get().setArg(idx++, static_cast<int>(inputChannelBlocks));
|
|
|
|
unit.kernel->get().setArg(idx++, sizeof(outputImageShape), outputImageShape);
|
|
|
|
unit.kernel->get().setArg(idx++, sizeof(stideShape), stideShape);
|
|
|
|
unit.kernel->get().setArg(idx++, UP_DIV(width, 4));
|
|
|
|
unit.kernel->get().setArg(idx++, UP_DIV(outputShape.at(3), 4));
|
2025-07-23 14:10:58 +08:00
|
|
|
if(mResource->mPrelu){
|
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(mResource->mSlope.get()));
|
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
mOpenCLBackend->recordKernel2d(unit.kernel, mGlobalWorkSize, mLocalWorkSize);
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
}
|
2023-02-28 10:41:24 +08:00
|
|
|
}else {
|
2019-04-17 10:49:11 +08:00
|
|
|
int inputImageShape[2] = {inputHeight, inputWidth};
|
|
|
|
int outputImageShape[2] = {height, width};
|
|
|
|
int kernelShape[2] = {kernelHeight, kernelWidth};
|
2024-04-19 11:58:21 +08:00
|
|
|
int strideShape[2] = {mResource->mStrides[0], mResource->mStrides[1]};
|
2020-11-05 11:10:27 +08:00
|
|
|
int paddingShape[2] = {mPaddings[0], mPaddings[1]};
|
2024-04-19 11:58:21 +08:00
|
|
|
int dilationShape[2] = {mResource->mDilations[0], mResource->mDilations[1]};
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2023-02-28 10:41:24 +08:00
|
|
|
const int total_kernel = 3;
|
|
|
|
std::string kernelName[total_kernel] = {"conv_2d_c4h1w4", "conv_2d_c4h4w1", "conv_2d_c8h4w1" };
|
|
|
|
int itemC[total_kernel] = {4, 4, 8};
|
|
|
|
int itemH[total_kernel] = {1, 4, 4};
|
|
|
|
int itemW[total_kernel] = {4, 1, 1};
|
2023-12-27 17:26:44 +08:00
|
|
|
|
|
|
|
|
2023-02-28 10:41:24 +08:00
|
|
|
int actual_kernel = total_kernel;
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
std::shared_ptr<KernelWrap> kernel[total_kernel];
|
2023-02-28 10:41:24 +08:00
|
|
|
std::vector<uint32_t> globalWorkSize[total_kernel];
|
|
|
|
std::vector<uint32_t> localWorkSize[total_kernel];
|
|
|
|
std::pair<int, int> min_cost(INT_MAX, 0);//(min_time, min_index)
|
2020-06-16 17:11:54 +08:00
|
|
|
|
2023-02-28 10:41:24 +08:00
|
|
|
for(int knl_idx = 0; knl_idx < total_kernel; knl_idx++) {
|
2024-12-19 16:20:00 +08:00
|
|
|
std::set<std::string> buildOption = mResource->mBuildOptions;
|
|
|
|
if(itemC[knl_idx] == 8 && outputShape.at(3) % itemC[knl_idx] > 0 && outputShape.at(3) % itemC[knl_idx] <= 4){
|
|
|
|
buildOption.emplace("-DCHANNEL_BOUNDARY_PROTECT");
|
|
|
|
}
|
2025-04-28 11:38:44 +08:00
|
|
|
kernel[knl_idx] = mOpenCLBackend->getOpenCLRuntime()->buildKernel("conv_2d", kernelName[knl_idx], buildOption, mOpenCLBackend->getPrecision());
|
2023-02-28 10:41:24 +08:00
|
|
|
uint32_t maxWorkGroupSize = static_cast<uint32_t>(mOpenCLBackend->getOpenCLRuntime()->getMaxWorkGroupSize(kernel[knl_idx]));
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2023-02-28 10:41:24 +08:00
|
|
|
globalWorkSize[knl_idx] = {static_cast<uint32_t>(UP_DIV(outputShape.at(3), itemC[knl_idx]) * UP_DIV(outputShape.at(2), itemW[knl_idx])), static_cast<uint32_t>(outputShape.at(0) * UP_DIV(outputShape.at(1), itemH[knl_idx]))};
|
|
|
|
uint32_t idx = 0;
|
2023-07-31 14:24:48 +08:00
|
|
|
cl_int ret = CL_SUCCESS;
|
2024-04-19 11:58:21 +08:00
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, globalWorkSize[knl_idx][0]);
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, globalWorkSize[knl_idx][1]);
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, openCLImage(input));
|
|
|
|
if(mResource->mWeightUseBuffer){
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, openCLBuffer(mResource->mFilter.get()));
|
2023-08-21 14:51:54 +08:00
|
|
|
}else{
|
2024-04-19 11:58:21 +08:00
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, openCLImage(mResource->mFilter.get()));
|
2023-08-21 14:51:54 +08:00
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, openCLImage(mResource->mBias.get()));
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, openCLImage(output));
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, sizeof(inputImageShape), inputImageShape);
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, inputChannelBlocks);
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, sizeof(outputImageShape), outputImageShape);
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, sizeof(kernelShape), kernelShape);
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, sizeof(strideShape), strideShape);
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, sizeof(paddingShape), paddingShape);
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, sizeof(dilationShape), dilationShape);
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, UP_DIV(width, itemW[knl_idx]));
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, UP_DIV(outputShape.at(3), 4));
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, UP_DIV(height, itemH[knl_idx]));
|
2025-07-23 14:10:58 +08:00
|
|
|
if(mResource->mPrelu){
|
|
|
|
ret |= kernel[knl_idx]->get().setArg(idx++, openCLImage(mResource->mSlope.get()));
|
|
|
|
}
|
2023-07-31 14:24:48 +08:00
|
|
|
MNN_CHECK_CL_SUCCESS(ret, "setArg ConvExecution Kernel Select");
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2023-02-28 10:41:24 +08:00
|
|
|
std::pair<std::vector<uint32_t>, uint32_t> retTune;
|
2025-06-17 11:08:21 +08:00
|
|
|
retTune = localWS2DDefault(globalWorkSize[knl_idx], maxWorkGroupSize, mOpenCLBackend->getOpenCLRuntime(), kernelName[knl_idx] + info, kernel[knl_idx], mOpenCLBackend->getCLTuneLevel(), "conv_2d");
|
2023-02-28 10:41:24 +08:00
|
|
|
|
|
|
|
if(min_cost.first > retTune.second) {
|
|
|
|
min_cost.first = retTune.second;
|
|
|
|
min_cost.second = knl_idx;
|
|
|
|
mLocalWorkSize = {retTune.first[0], retTune.first[1]};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
int min_index = min_cost.second;
|
|
|
|
mGlobalWorkSize = {globalWorkSize[min_index][0], globalWorkSize[min_index][1]};
|
2024-12-19 16:20:00 +08:00
|
|
|
std::set<std::string> buildOption = mResource->mBuildOptions;
|
|
|
|
if(itemC[min_index] == 8 && outputShape.at(3) % itemC[min_index] > 0 && outputShape.at(3) % itemC[min_index] <= 4){
|
|
|
|
buildOption.emplace("-DCHANNEL_BOUNDARY_PROTECT");
|
|
|
|
}
|
2025-04-28 11:38:44 +08:00
|
|
|
unit.kernel = mOpenCLBackend->getOpenCLRuntime()->buildKernel("conv_2d", kernelName[min_index], buildOption, mOpenCLBackend->getPrecision());
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2023-02-28 10:41:24 +08:00
|
|
|
uint32_t idx = 0;
|
2023-07-31 14:24:48 +08:00
|
|
|
cl_int ret = CL_SUCCESS;
|
2024-04-19 11:58:21 +08:00
|
|
|
ret |= unit.kernel->get().setArg(idx++, mGlobalWorkSize[0]);
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, mGlobalWorkSize[1]);
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, openCLImage(input));
|
|
|
|
if(mResource->mWeightUseBuffer){
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, openCLBuffer(mResource->mFilter.get()));
|
2023-08-21 14:51:54 +08:00
|
|
|
}else{
|
2024-04-19 11:58:21 +08:00
|
|
|
ret |= unit.kernel->get().setArg(idx++, openCLImage(mResource->mFilter.get()));
|
2023-08-21 14:51:54 +08:00
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
ret |= unit.kernel->get().setArg(idx++, openCLImage(mResource->mBias.get()));
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, openCLImage(output));
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, sizeof(inputImageShape), inputImageShape);
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, inputChannelBlocks);
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, sizeof(outputImageShape), outputImageShape);
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, sizeof(kernelShape), kernelShape);
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, sizeof(strideShape), strideShape);
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, sizeof(paddingShape), paddingShape);
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, sizeof(dilationShape), dilationShape);
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, UP_DIV(width, itemW[min_index]));
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, UP_DIV(outputShape.at(3), 4));
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, UP_DIV(height, itemH[min_index]));
|
2025-07-23 14:10:58 +08:00
|
|
|
if(mResource->mPrelu){
|
|
|
|
ret |= unit.kernel->get().setArg(idx++, openCLImage(mResource->mSlope.get()));
|
|
|
|
}
|
2023-07-31 14:24:48 +08:00
|
|
|
MNN_CHECK_CL_SUCCESS(ret, "setArg ConvExecution");
|
2024-04-19 11:58:21 +08:00
|
|
|
mOpenCLBackend->recordKernel2d(unit.kernel, mGlobalWorkSize, mLocalWorkSize);
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
unit.globalWorkSize = {mGlobalWorkSize[0], mGlobalWorkSize[1]};
|
|
|
|
unit.localWorkSize = {mLocalWorkSize[0], mLocalWorkSize[1]};
|
2019-04-17 10:49:11 +08:00
|
|
|
#ifdef LOG_VERBOSE
|
|
|
|
MNN_PRINT("end ConvExecution onResize !\n");
|
|
|
|
#endif
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
class ConvolutionCreator : public OpenCLBackend::Creator {
|
|
|
|
public:
|
|
|
|
virtual ~ConvolutionCreator() = default;
|
|
|
|
virtual Execution *onCreate(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs,
|
|
|
|
const MNN::Op *op, Backend *backend) const override {
|
2023-12-27 17:26:44 +08:00
|
|
|
auto conv2D = op->main_as_Convolution2D();
|
|
|
|
std::vector<int> inputShape = tensorShapeFormat(inputs[0]);
|
|
|
|
const int inputChannels = inputShape.at(3);
|
2024-04-19 11:58:21 +08:00
|
|
|
#if defined(MNN_LOW_MEMORY) && not defined(MNN_OPENCL_BUFFER_CLOSED)
|
2024-09-12 12:57:57 +08:00
|
|
|
if (static_cast<OpenCLBackend *>(backend)->getMemory() == BackendConfig::Memory_Low){
|
2023-12-27 17:26:44 +08:00
|
|
|
auto conv2dParams = op->main_as_Convolution2D();
|
2024-07-04 11:53:45 +08:00
|
|
|
if (conv2dParams->quanParameter() != nullptr) {
|
2023-12-27 17:26:44 +08:00
|
|
|
if (((conv2dParams->quanParameter()->type() == 4) ||
|
|
|
|
(conv2dParams->quanParameter()->type() == 1) ||
|
|
|
|
(conv2dParams->quanParameter()->type() == 2))) {
|
2024-07-04 11:53:45 +08:00
|
|
|
if ((1 == conv2dParams->quanParameter()->type() || 2 == conv2dParams->quanParameter()->type()) && conv2dParams->quanParameter()->has_scaleInt()) {
|
|
|
|
// Don't support IDST-int8 because of error
|
|
|
|
return nullptr;
|
|
|
|
}
|
2023-12-27 17:26:44 +08:00
|
|
|
return new ConvLowMemoryExecution(inputs, outputs, op, backend);
|
|
|
|
} else {
|
2024-07-04 11:53:45 +08:00
|
|
|
//MNN_ERROR("OpenCL Conv buf low memory init error. For Opencl Backend, only support low memory mode of int8 or int4 dequantization currently.\n");
|
|
|
|
return nullptr;
|
2023-12-27 17:26:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2023-10-18 10:31:02 +08:00
|
|
|
if(op->main_as_Convolution2D()->common()->group() > 1){
|
|
|
|
// Don't support group > 1 now
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2021-06-11 17:17:13 +08:00
|
|
|
if (inputs.size() > 1) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
if (nullptr != op->main_as_Convolution2D()->quanParameter()) {
|
|
|
|
auto quan = op->main_as_Convolution2D()->quanParameter();
|
|
|
|
if (1 == quan->type() || 2 == quan->type()) {
|
2020-11-04 10:48:47 +08:00
|
|
|
if (quan->has_scaleInt()) {
|
|
|
|
// Don't support IDST-int8 because of error
|
|
|
|
return nullptr;
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
}
|
2023-02-28 10:41:24 +08:00
|
|
|
int maxWidth = static_cast<OpenCLBackend *>(backend)->getOpenCLRuntime()->getMaxImage2DSize()[0];
|
|
|
|
int maxHeight = static_cast<OpenCLBackend *>(backend)->getOpenCLRuntime()->getMaxImage2DSize()[1];
|
|
|
|
if (ConvWinograd::valid(conv2D->common(), inputs[0], outputs[0], maxWidth, maxHeight)) {
|
2024-04-19 11:58:21 +08:00
|
|
|
return new ConvWinograd(op, backend);
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2023-12-27 17:26:44 +08:00
|
|
|
|
2020-11-05 11:10:27 +08:00
|
|
|
return new ConvExecution(inputs, outputs, op, backend);
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2023-12-27 17:26:44 +08:00
|
|
|
REGISTER_OPENCL_OP_CREATOR(ConvolutionCreator, OpType_Convolution, IMAGE);
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
} // namespace OpenCL
|
|
|
|
} // namespace MNN
|