2019-04-17 10:49:11 +08:00
|
|
|
//
|
2019-05-14 07:29:37 +08:00
|
|
|
// VulkanNormalize.cpp
|
2019-04-17 10:49:11 +08:00
|
|
|
// MNN
|
|
|
|
//
|
|
|
|
// Created by MNN on 2019/01/31.
|
|
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
|
|
//
|
|
|
|
|
2019-12-27 22:16:57 +08:00
|
|
|
#include "backend/vulkan/execution/VulkanNormalize.hpp"
|
|
|
|
#include "core/Macro.h"
|
|
|
|
#include "core/TensorUtils.hpp"
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
namespace MNN {
|
|
|
|
struct GpuParam {
|
|
|
|
ivec4 imgSize;
|
|
|
|
int channelDiv4;
|
|
|
|
float eps;
|
|
|
|
};
|
|
|
|
|
2019-05-14 07:29:37 +08:00
|
|
|
VulkanNormalize::VulkanNormalize(const Op* op, Backend* bn) : VulkanBasicExecution(bn) {
|
|
|
|
auto normalizeParam = op->main_as_Normalize();
|
|
|
|
mEps = normalizeParam->eps();
|
2019-04-17 10:49:11 +08:00
|
|
|
|
2019-05-14 07:29:37 +08:00
|
|
|
std::vector<VkDescriptorType> VulkanNormalizeTypes{
|
2019-04-17 10:49:11 +08:00
|
|
|
VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
|
|
|
|
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
|
|
|
|
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
|
|
|
|
};
|
|
|
|
|
|
|
|
std::vector<VkDescriptorType> VulkanScaleTypes{
|
|
|
|
VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
|
|
|
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER};
|
|
|
|
|
|
|
|
mVkBackend = static_cast<VulkanBackend*>(bn);
|
|
|
|
mSampler = mVkBackend->getCommonSampler();
|
2019-05-14 07:29:37 +08:00
|
|
|
// normalize
|
|
|
|
mVulkanNormalizePipeline =
|
2019-04-17 10:49:11 +08:00
|
|
|
mVkBackend->getPipeline("glsl_normalizeChannel_comp",
|
2019-05-14 07:29:37 +08:00
|
|
|
/*glsl_normalizeChannel_comp, glsl_normalizeChannel_comp_len,*/ VulkanNormalizeTypes);
|
2019-04-17 10:49:11 +08:00
|
|
|
mParamBuffer.reset(new VulkanBuffer(mVkBackend->getMemoryPool(), false, sizeof(GpuParam), nullptr,
|
|
|
|
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT));
|
2019-05-14 07:29:37 +08:00
|
|
|
MNN_ASSERT(normalizeParam->channelShared() == false);
|
2019-04-17 10:49:11 +08:00
|
|
|
// scale
|
|
|
|
mVulkanScalePipeline =
|
|
|
|
mVkBackend->getPipeline("glsl_scale_comp", /*glsl_scale_comp, glsl_scale_comp_len,*/ VulkanScaleTypes);
|
|
|
|
|
2019-05-14 07:29:37 +08:00
|
|
|
mScale.reset(new VulkanBuffer(mVkBackend->getMemoryPool(), false, sizeof(float) * normalizeParam->scale()->size(),
|
|
|
|
normalizeParam->scale()->data(), VK_BUFFER_USAGE_STORAGE_BUFFER_BIT));
|
|
|
|
mBias.reset(new VulkanBuffer(mVkBackend->getMemoryPool(), false, sizeof(float) * normalizeParam->scale()->size(),
|
2019-04-17 10:49:11 +08:00
|
|
|
nullptr, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT));
|
|
|
|
auto biasPtr = reinterpret_cast<float*>(mBias->map());
|
2019-05-14 07:29:37 +08:00
|
|
|
::memset(biasPtr, 0, sizeof(float) * normalizeParam->scale()->size());
|
2019-04-17 10:49:11 +08:00
|
|
|
mBias->unmap();
|
|
|
|
}
|
2019-05-14 07:29:37 +08:00
|
|
|
VulkanNormalize::~VulkanNormalize() {
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2019-05-14 07:29:37 +08:00
|
|
|
ErrorCode VulkanNormalize::onEncode(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
|
|
|
|
const VulkanCommandPool::Buffer* cmdBuffer) {
|
2019-04-17 10:49:11 +08:00
|
|
|
auto input = inputs[0];
|
|
|
|
auto output = outputs[0];
|
|
|
|
const int channelDiv4 = UP_DIV(input->channel(), 4);
|
|
|
|
|
|
|
|
TensorUtils::copyShape(input, &mTempTensor, true);
|
|
|
|
MNN_ASSERT(TensorUtils::getDescribe(input)->dimensionFormat == MNN_DATA_FORMAT_NC4HW4);
|
|
|
|
backend()->onAcquireBuffer(&mTempTensor, Backend::DYNAMIC);
|
|
|
|
backend()->onReleaseBuffer(&mTempTensor, Backend::DYNAMIC);
|
|
|
|
|
|
|
|
auto tempTensorImage = mVkBackend->findTensor(mTempTensor.deviceId())->image();
|
|
|
|
MNN_ASSERT(nullptr != tempTensorImage);
|
2019-05-14 07:29:37 +08:00
|
|
|
auto VulkanNormalizeParam = reinterpret_cast<GpuParam*>(mParamBuffer->map());
|
|
|
|
::memset(VulkanNormalizeParam, 0, sizeof(GpuParam));
|
2019-04-17 10:49:11 +08:00
|
|
|
|
2019-05-14 07:29:37 +08:00
|
|
|
VulkanNormalizeParam->imgSize[0] = input->width();
|
|
|
|
VulkanNormalizeParam->imgSize[1] = input->height();
|
|
|
|
VulkanNormalizeParam->imgSize[2] = channelDiv4;
|
|
|
|
VulkanNormalizeParam->imgSize[3] = 0;
|
|
|
|
VulkanNormalizeParam->channelDiv4 = channelDiv4;
|
|
|
|
VulkanNormalizeParam->eps = mEps;
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
mParamBuffer->flush(true, 0, sizeof(GpuParam));
|
|
|
|
mParamBuffer->unmap();
|
|
|
|
|
2019-05-14 07:29:37 +08:00
|
|
|
// normalize
|
|
|
|
mNormalizeDescriptorSet.reset(mVulkanNormalizePipeline->createSet());
|
|
|
|
mNormalizeDescriptorSet->writeImage(reinterpret_cast<VkImageView>(mTempTensor.deviceId()), mSampler->get(),
|
|
|
|
VK_IMAGE_LAYOUT_GENERAL, 0);
|
|
|
|
mNormalizeDescriptorSet->writeImage(reinterpret_cast<VkImageView>(input->deviceId()), mSampler->get(),
|
|
|
|
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 1);
|
|
|
|
mNormalizeDescriptorSet->writeBuffer(mParamBuffer->buffer(), 2, mParamBuffer->size());
|
2019-04-17 10:49:11 +08:00
|
|
|
|
2019-05-14 07:29:37 +08:00
|
|
|
mVulkanNormalizePipeline->bind(cmdBuffer->get(), mNormalizeDescriptorSet->get());
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
vkCmdDispatch(cmdBuffer->get(), UP_DIV(input->width(), 8), UP_DIV(input->height(), 8), input->batch());
|
|
|
|
|
|
|
|
// scale
|
|
|
|
mScaleDescriptorSet.reset(mVulkanScalePipeline->createSet());
|
|
|
|
mScaleDescriptorSet->writeImage(reinterpret_cast<VkImageView>(output->deviceId()), mSampler->get(),
|
|
|
|
VK_IMAGE_LAYOUT_GENERAL, 0);
|
|
|
|
mScaleDescriptorSet->writeImage(reinterpret_cast<VkImageView>(mTempTensor.deviceId()), mSampler->get(),
|
|
|
|
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 1);
|
|
|
|
mScaleDescriptorSet->writeBuffer(mScale->buffer(), 2, mScale->size());
|
|
|
|
mScaleDescriptorSet->writeBuffer(mBias->buffer(), 3, mBias->size());
|
|
|
|
mScaleDescriptorSet->writeBuffer(mParamBuffer->buffer(), 4, mParamBuffer->size());
|
|
|
|
mVulkanScalePipeline->bind(cmdBuffer->get(), mScaleDescriptorSet->get());
|
|
|
|
|
|
|
|
cmdBuffer->barrierImage(tempTensorImage->get(), VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
|
|
|
|
|
|
|
|
vkCmdDispatch(cmdBuffer->get(), UP_DIV(input->width(), 16), UP_DIV(input->height(), 16),
|
|
|
|
channelDiv4 * input->batch());
|
|
|
|
|
|
|
|
return NO_ERROR;
|
|
|
|
}
|
|
|
|
|
2019-05-14 07:29:37 +08:00
|
|
|
class VulkanNormalizeCreator : public VulkanBackend::Creator {
|
2019-04-17 10:49:11 +08:00
|
|
|
public:
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
virtual VulkanBasicExecution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op, Backend* bn) const override {
|
2019-05-14 07:29:37 +08:00
|
|
|
return new VulkanNormalize(op, bn);
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool gResistor = []() {
|
2019-05-14 07:29:37 +08:00
|
|
|
VulkanBackend::addCreator(OpType_Normalize, new VulkanNormalizeCreator);
|
2019-04-17 10:49:11 +08:00
|
|
|
return true;
|
|
|
|
}();
|
|
|
|
|
|
|
|
} // namespace MNN
|