2019-04-17 10:49:11 +08:00
|
|
|
//
|
|
|
|
// DepthwiseDeconvExecution.cpp
|
|
|
|
// MNN
|
|
|
|
//
|
|
|
|
// Created by MNN on 2019/02/28.
|
|
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
|
|
//
|
|
|
|
|
2021-03-12 18:41:50 +08:00
|
|
|
#include "backend/opencl/execution/image/DepthwiseDeconvExecution.hpp"
|
|
|
|
#include "backend/opencl/execution/image/MultiInputDWDeconvExecution.hpp"
|
2019-12-27 22:16:57 +08:00
|
|
|
#include "core/Macro.h"
|
|
|
|
#include "core/TensorUtils.hpp"
|
2020-11-05 16:41:56 +08:00
|
|
|
#include "core/ConvolutionCommon.hpp"
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
namespace MNN {
|
|
|
|
namespace OpenCL {
|
|
|
|
|
|
|
|
DepthwiseDeconvExecution::DepthwiseDeconvExecution(const std::vector<Tensor *> &inputs, const MNN::Op *op,
|
|
|
|
Backend *backend)
|
2024-04-19 11:58:21 +08:00
|
|
|
: ConvCommonExecution(op->main_as_Convolution2D(), backend), CommonExecution(backend, op){
|
|
|
|
mResource->mConv2dParams = op->main_as_Convolution2D();
|
|
|
|
mResource->mConv2dCommonParams = mResource->mConv2dParams->common();
|
|
|
|
mResource->mStrides = {mResource->mConv2dCommonParams->strideY(), mResource->mConv2dCommonParams->strideX()};
|
|
|
|
mResource->mDilations = {mResource->mConv2dCommonParams->dilateY(), mResource->mConv2dCommonParams->dilateX()};
|
2019-04-17 10:49:11 +08:00
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
int kernelWidth = mResource->mConv2dCommonParams->kernelX();
|
|
|
|
int kernelHeight = mResource->mConv2dCommonParams->kernelY();
|
|
|
|
int outputChannel = mResource->mConv2dCommonParams->outputCount();
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
std::vector<int> filterShape{1, outputChannel, kernelHeight, kernelWidth};
|
|
|
|
std::vector<int> filterImageShape{(int)kernelHeight * kernelWidth, (int)UP_DIV(outputChannel, 4)};
|
2020-11-05 16:41:56 +08:00
|
|
|
|
|
|
|
const float* filterDataPtr = nullptr;
|
|
|
|
int tempWeightSize = 0;
|
|
|
|
std::shared_ptr<ConvolutionCommon::Int8Common> quanCommon;
|
2024-08-24 15:46:21 +08:00
|
|
|
ConvolutionCommon::getConvParameters(&quanCommon, backend, op, &filterDataPtr, &tempWeightSize);
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
mResource->mFilter.reset(Tensor::createDevice<float>({1, filterImageShape[1], 1, 4 * filterImageShape[0]}));
|
2019-04-17 10:49:11 +08:00
|
|
|
std::shared_ptr<Tensor> filterBuffer(Tensor::createDevice<float>(filterShape));
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2024-05-11 19:17:02 +08:00
|
|
|
size_t buffer_size = filterBuffer->elementSize() * sizeof(float);
|
2020-11-05 16:41:56 +08:00
|
|
|
cl::Buffer filterBufferCL(mOpenCLBackend->getOpenCLRuntime()->context(), CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, buffer_size);
|
2019-04-17 10:49:11 +08:00
|
|
|
filterBuffer->buffer().device = (uint64_t)(&filterBufferCL);
|
2019-08-15 17:30:39 +08:00
|
|
|
cl_int error;
|
2020-11-05 16:41:56 +08:00
|
|
|
auto ptrCL = mOpenCLBackend->getOpenCLRuntime()->commandQueue().enqueueMapBuffer(filterBufferCL, true, CL_MAP_WRITE, 0, buffer_size, nullptr, nullptr, &error);
|
2019-08-15 17:30:39 +08:00
|
|
|
if(nullptr != ptrCL && error == CL_SUCCESS){
|
2024-05-11 19:17:02 +08:00
|
|
|
::memcpy(ptrCL, filterDataPtr, filterBuffer->size());
|
2019-07-02 18:01:08 +08:00
|
|
|
}else{
|
|
|
|
MNN_ERROR("Map error ptrCL == nullptr \n");
|
2019-12-27 22:16:57 +08:00
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
mOpenCLBackend->getOpenCLRuntime()->commandQueue().enqueueUnmapMemObject(filterBufferCL, ptrCL);
|
2024-04-19 11:58:21 +08:00
|
|
|
mOpenCLBackend->onAcquireBuffer(mResource->mFilter.get(), Backend::STATIC);
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
MNN::OpenCL::ImageBufferConvertor imageBufferConvertor{mOpenCLBackend->getOpenCLRuntime()};
|
2024-05-11 19:17:02 +08:00
|
|
|
std::string buildOption = "-DBUFFER_INP_FP32";
|
2025-04-28 11:38:44 +08:00
|
|
|
imageBufferConvertor.convertBufferToImage(filterBuffer.get(), MNN::OpenCL::DW_CONV2D_FILTER, mResource->mFilter.get(), mOpenCLBackend->getPrecision(), false, buildOption);
|
2024-04-19 11:58:21 +08:00
|
|
|
if (mResource->mConv2dCommonParams->relu() == true) {
|
|
|
|
mResource->mBuildOptions.emplace("-DRELU");
|
|
|
|
} else if (mResource->mConv2dCommonParams->relu6() == true) {
|
|
|
|
mResource->mBuildOptions.emplace("-DRELU6");
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
DepthwiseDeconvExecution::~DepthwiseDeconvExecution() {
|
2024-04-19 11:58:21 +08:00
|
|
|
// Do nothing
|
|
|
|
}
|
|
|
|
DepthwiseDeconvExecution::DepthwiseDeconvExecution(std::shared_ptr<ConvResource> resource, const MNN::Op* op, Backend *backend)
|
|
|
|
: ConvCommonExecution(backend), CommonExecution(backend, op) {
|
|
|
|
mResource = resource;
|
|
|
|
const auto *conv2dParams = op->main_as_Convolution2D();
|
|
|
|
const auto *conv2dCommonParams = conv2dParams->common();
|
|
|
|
mResource->mConv2dParams = conv2dParams;
|
|
|
|
mResource->mConv2dCommonParams = conv2dCommonParams;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DepthwiseDeconvExecution::onClone(Backend* bn, const Op* op, Execution** dst) {
|
|
|
|
if (!mValid) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (nullptr == dst) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
*dst = new DepthwiseDeconvExecution(mResource, op, bn);
|
|
|
|
return true;
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2024-04-19 11:58:21 +08:00
|
|
|
ErrorCode DepthwiseDeconvExecution::onEncode(const std::vector<Tensor *> &inputs,
|
2019-04-17 10:49:11 +08:00
|
|
|
const std::vector<Tensor *> &outputs) {
|
2024-04-19 11:58:21 +08:00
|
|
|
mUnits.resize(1);
|
|
|
|
auto &unit = mUnits[0];
|
2019-04-17 10:49:11 +08:00
|
|
|
auto input = inputs[0];
|
|
|
|
auto output = outputs[0];
|
|
|
|
|
|
|
|
std::vector<int> inputShape = tensorShapeFormat(input);
|
|
|
|
std::vector<int> outputShape = tensorShapeFormat(output);
|
|
|
|
|
|
|
|
const int outputBatch = outputShape.at(0);
|
|
|
|
const int outputHeight = outputShape.at(1);
|
|
|
|
const int outputWidth = outputShape.at(2);
|
|
|
|
const int outputChannels = outputShape.at(3);
|
|
|
|
|
|
|
|
const int inputHeight = inputShape.at(1);
|
|
|
|
const int inputWidth = inputShape.at(2);
|
|
|
|
const int inputChannels = inputShape.at(3);
|
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
const int strideHeight = mResource->mStrides[0];
|
|
|
|
const int strideWidth = mResource->mStrides[1];
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
const int channelBlocks = UP_DIV(outputChannels, 4);
|
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
auto pad = ConvolutionCommon::convolutionTransposePad(input, output, mResource->mConv2dCommonParams);
|
2021-06-23 17:20:53 +08:00
|
|
|
const int paddingHeight = pad.second;
|
|
|
|
const int paddingWidth = pad.first;
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
const int alignHeight = strideHeight - 1 - paddingHeight;
|
|
|
|
const int alignWidth = strideWidth - 1 - paddingWidth;
|
|
|
|
|
2024-04-19 11:58:21 +08:00
|
|
|
const int filterHeight = mResource->mConv2dCommonParams->kernelY();
|
|
|
|
const int filterWidth = mResource->mConv2dCommonParams->kernelX();
|
2019-04-17 10:49:11 +08:00
|
|
|
const int kernelSize = filterHeight * filterWidth;
|
|
|
|
|
|
|
|
mGWS = {static_cast<uint32_t>(channelBlocks), static_cast<uint32_t>(outputWidth),
|
|
|
|
static_cast<uint32_t>(outputHeight * outputBatch)};
|
2024-04-19 11:58:21 +08:00
|
|
|
std::string info = std::to_string(inputChannels) + "_" + std::to_string(outputChannels) + "_" + std::to_string(filterHeight) + "_" + std::to_string(filterWidth) + "_" + std::to_string(strideHeight) + "_" + std::to_string(strideWidth);
|
|
|
|
auto runtime = mOpenCLBackend->getOpenCLRuntime();
|
2025-04-28 11:38:44 +08:00
|
|
|
unit.kernel = runtime->buildKernel("depthwise_deconv2d", "depthwise_deconv2d", mResource->mBuildOptions, mOpenCLBackend->getPrecision());
|
2024-04-19 11:58:21 +08:00
|
|
|
mMaxWorkGroupSize = static_cast<uint32_t>(runtime->getMaxWorkGroupSize(unit.kernel));
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
int inputImageShape[2] = {inputHeight, inputWidth};
|
|
|
|
int outputImageShape[2] = {outputHeight, outputWidth};
|
|
|
|
int strideShape[2] = {strideHeight, strideWidth};
|
|
|
|
int paddingShape[2] = {paddingHeight, paddingWidth};
|
|
|
|
int alignShape[2] = {alignHeight, alignWidth};
|
|
|
|
int kernelShape[2] = {filterHeight, filterWidth};
|
|
|
|
|
|
|
|
uint32_t idx = 0;
|
2024-04-19 11:58:21 +08:00
|
|
|
unit.kernel->get().setArg(idx++, mGWS[0]);
|
|
|
|
unit.kernel->get().setArg(idx++, mGWS[1]);
|
|
|
|
unit.kernel->get().setArg(idx++, mGWS[2]);
|
|
|
|
|
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(input));
|
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(mResource->mFilter.get()));
|
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(mResource->mBias.get()));
|
|
|
|
unit.kernel->get().setArg(idx++, openCLImage(output));
|
|
|
|
unit.kernel->get().setArg(idx++, sizeof(inputImageShape), inputImageShape);
|
|
|
|
unit.kernel->get().setArg(idx++, sizeof(outputImageShape), outputImageShape);
|
|
|
|
unit.kernel->get().setArg(idx++, sizeof(strideShape), strideShape);
|
|
|
|
unit.kernel->get().setArg(idx++, sizeof(alignShape), alignShape);
|
|
|
|
unit.kernel->get().setArg(idx++, sizeof(paddingShape), paddingShape);
|
|
|
|
unit.kernel->get().setArg(idx++, sizeof(kernelShape), kernelShape);
|
|
|
|
unit.kernel->get().setArg(idx++, static_cast<int32_t>(kernelSize));
|
|
|
|
unit.kernel->get().setArg(idx++, static_cast<int32_t>(channelBlocks));
|
2020-11-05 16:41:56 +08:00
|
|
|
|
|
|
|
std::string name = "depthwiseDeconv";
|
2025-06-17 11:08:21 +08:00
|
|
|
mLWS = localWS3DDefault(mGWS, mMaxWorkGroupSize, mOpenCLBackend->getOpenCLRuntime(), name + info, unit.kernel, mOpenCLBackend->getCLTuneLevel(), "depthwise_deconv2d").first;
|
2024-04-19 11:58:21 +08:00
|
|
|
mOpenCLBackend->recordKernel3d(unit.kernel, mGWS, mLWS);
|
|
|
|
unit.globalWorkSize = {mGWS[0], mGWS[1], mGWS[2]};
|
|
|
|
unit.localWorkSize = {mLWS[0], mLWS[1], mLWS[2]};
|
2019-04-17 10:49:11 +08:00
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
2020-11-05 16:41:56 +08:00
|
|
|
class DepthwiseDeconvolutionCreator : public OpenCLBackend::Creator {
|
|
|
|
public:
|
|
|
|
virtual ~DepthwiseDeconvolutionCreator() = default;
|
|
|
|
virtual Execution *onCreate(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs,
|
|
|
|
const MNN::Op *op, Backend *backend) const override {
|
|
|
|
|
|
|
|
MNN_ASSERT(inputs.size() <= 3);
|
|
|
|
if (inputs.size() == 2 || inputs.size() == 3) {
|
2021-03-12 18:41:50 +08:00
|
|
|
return new MultiInputDWDeconvExecution(op, backend);
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MNN_ASSERT(inputs.size() == 1);
|
|
|
|
return new DepthwiseDeconvExecution(inputs, op, backend);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2023-12-27 17:26:44 +08:00
|
|
|
REGISTER_OPENCL_OP_CREATOR(DepthwiseDeconvolutionCreator, OpType_DeconvolutionDepthwise, IMAGE);
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
} // namespace OpenCL
|
|
|
|
} // namespace MNN
|