mirror of https://github.com/alibaba/MNN.git
				
				
				
			Fix minor typos
1. bechmark -> benchmark 2. MMN -> MNN 3. nomalize -> normalize 4. paramater -> parameter 5. tflie -> tflite
This commit is contained in:
		
							parent
							
								
									ac335b377f
								
							
						
					
					
						commit
						455786f0dc
					
				|  | @ -74,7 +74,7 @@ function bench_android() { | ||||||
|     adb shell "echo Build Flags: ABI=$ABI  OpenMP=$OPENMP Vulkan=$VULKAN OpenCL=$OPENCL >> $ANDROID_DIR/benchmark.txt" |     adb shell "echo Build Flags: ABI=$ABI  OpenMP=$OPENMP Vulkan=$VULKAN OpenCL=$OPENCL >> $ANDROID_DIR/benchmark.txt" | ||||||
|     #benchmark  CPU |     #benchmark  CPU | ||||||
|     adb shell "LD_LIBRARY_PATH=$ANDROID_DIR $ANDROID_DIR/benchmark.out $ANDROID_DIR/benchmark_models $RUN_LOOP $FORWARD_TYPE 2>$ANDROID_DIR/benchmark.err >> $ANDROID_DIR/benchmark.txt" |     adb shell "LD_LIBRARY_PATH=$ANDROID_DIR $ANDROID_DIR/benchmark.out $ANDROID_DIR/benchmark_models $RUN_LOOP $FORWARD_TYPE 2>$ANDROID_DIR/benchmark.err >> $ANDROID_DIR/benchmark.txt" | ||||||
|     #bechmark  Vulkan |     #benchmark  Vulkan | ||||||
|     adb shell "LD_LIBRARY_PATH=$ANDROID_DIR $ANDROID_DIR/benchmark.out $ANDROID_DIR/benchmark_models $RUN_LOOP 7 2>$ANDROID_DIR/benchmark.err >> $ANDROID_DIR/benchmark.txt" |     adb shell "LD_LIBRARY_PATH=$ANDROID_DIR $ANDROID_DIR/benchmark.out $ANDROID_DIR/benchmark_models $RUN_LOOP 7 2>$ANDROID_DIR/benchmark.err >> $ANDROID_DIR/benchmark.txt" | ||||||
|     #benchmark OpenGL |     #benchmark OpenGL | ||||||
|     #adb shell "LD_LIBRARY_PATH=$ANDROID_DIR $ANDROID_DIR/benchmark.out $ANDROID_DIR/benchmark_models 10 6 2>$ANDROID_DIR/benchmark.err >> $ANDROID_DIR/benchmark.txt" |     #adb shell "LD_LIBRARY_PATH=$ANDROID_DIR $ANDROID_DIR/benchmark.out $ANDROID_DIR/benchmark_models 10 6 2>$ANDROID_DIR/benchmark.err >> $ANDROID_DIR/benchmark.txt" | ||||||
|  |  | ||||||
|  | @ -18,19 +18,19 @@ namespace MNN { | ||||||
| class CPUInnerProductExecutor : public Execution { | class CPUInnerProductExecutor : public Execution { | ||||||
| public: | public: | ||||||
|     CPUInnerProductExecutor(Backend *bn, const MNN::Op *op) : Execution(bn) { |     CPUInnerProductExecutor(Backend *bn, const MNN::Op *op) : Execution(bn) { | ||||||
|         auto paramater  = op->main_as_InnerProduct(); |         auto parameter  = op->main_as_InnerProduct(); | ||||||
|         int outputCount = paramater->outputCount(); |         int outputCount = parameter->outputCount(); | ||||||
|         int srcCount    = paramater->weight()->size() / outputCount; |         int srcCount    = parameter->weight()->size() / outputCount; | ||||||
|         mWeight.reset(CPUConvolution::reorderWeightSize(srcCount, outputCount, 1, 4)); |         mWeight.reset(CPUConvolution::reorderWeightSize(srcCount, outputCount, 1, 4)); | ||||||
|         if (mWeight.get() == nullptr) { |         if (mWeight.get() == nullptr) { | ||||||
|             mValid = false; |             mValid = false; | ||||||
|             return; |             return; | ||||||
|         } |         } | ||||||
|         mWeight.clear(); |         mWeight.clear(); | ||||||
|         CPUConvolution::reorderWeight(mWeight.get(), paramater->weight()->data(), srcCount, outputCount, 1, 4); |         CPUConvolution::reorderWeight(mWeight.get(), parameter->weight()->data(), srcCount, outputCount, 1, 4); | ||||||
|         mBias.reset(ALIGN_UP4(outputCount)); |         mBias.reset(ALIGN_UP4(outputCount)); | ||||||
|         mBias.clear(); |         mBias.clear(); | ||||||
|         ::memcpy(mBias.get(), paramater->bias()->data(), paramater->bias()->size() * sizeof(float)); |         ::memcpy(mBias.get(), parameter->bias()->data(), parameter->bias()->size() * sizeof(float)); | ||||||
|         mInputPad.reset(new Tensor(2)); |         mInputPad.reset(new Tensor(2)); | ||||||
|         mOutputPad.reset(new Tensor(2)); |         mOutputPad.reset(new Tensor(2)); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|  | @ -13,7 +13,7 @@ | ||||||
| namespace MNN { | namespace MNN { | ||||||
| VulkanGroupConvolution::VulkanGroupConvolution(const Op *op, Backend *backend) | VulkanGroupConvolution::VulkanGroupConvolution(const Op *op, Backend *backend) | ||||||
|     : Execution(backend), mTempSrc(4), mTempDst(4) { |     : Execution(backend), mTempSrc(4), mTempDst(4) { | ||||||
|     mConvParamater = op->main_as_Convolution2D(); |     mConvParameter = op->main_as_Convolution2D(); | ||||||
|     mBackend       = static_cast<VulkanBackend *>(backend); |     mBackend       = static_cast<VulkanBackend *>(backend); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -31,12 +31,12 @@ ErrorCode VulkanGroupConvolution::onExecute(const std::vector<Tensor *> &inputs, | ||||||
| ErrorCode VulkanGroupConvolution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) { | ErrorCode VulkanGroupConvolution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) { | ||||||
|     auto input      = inputs[0]; |     auto input      = inputs[0]; | ||||||
|     auto output     = outputs[0]; |     auto output     = outputs[0]; | ||||||
|     const int group = mConvParamater->common()->group(); |     const int group = mConvParameter->common()->group(); | ||||||
|     mTempInputs     = std::vector<Tensor *>{&mTempSrc}; |     mTempInputs     = std::vector<Tensor *>{&mTempSrc}; | ||||||
|     mTempOutputs    = std::vector<Tensor *>{&mTempDst}; |     mTempOutputs    = std::vector<Tensor *>{&mTempDst}; | ||||||
|     if (mSubConvolutions.empty()) { |     if (mSubConvolutions.empty()) { | ||||||
|         mSubConvolutions.resize(group); |         mSubConvolutions.resize(group); | ||||||
|         const auto convReal    = mConvParamater; |         const auto convReal    = mConvParameter; | ||||||
|         const auto common      = convReal->common(); |         const auto common      = convReal->common(); | ||||||
|         const auto outputCount = common->outputCount(); |         const auto outputCount = common->outputCount(); | ||||||
|         const int fh           = common->kernelY(); |         const int fh           = common->kernelY(); | ||||||
|  | @ -61,7 +61,7 @@ ErrorCode VulkanGroupConvolution::onResize(const std::vector<Tensor *> &inputs, | ||||||
|             const float *curWeightPtr = source + i * groupWeightSize; |             const float *curWeightPtr = source + i * groupWeightSize; | ||||||
|             const float *curBiasPtr   = convReal->bias()->data() + i * groupCO; |             const float *curBiasPtr   = convReal->bias()->data() + i * groupCO; | ||||||
|             std::shared_ptr<Execution> subConvolution(VulkanConvolutionImpl::create( |             std::shared_ptr<Execution> subConvolution(VulkanConvolutionImpl::create( | ||||||
|                 mBackend, mConvParamater->common(), input, output, curWeightPtr, curBiasPtr, groupCI, groupCO)); |                 mBackend, mConvParameter->common(), input, output, curWeightPtr, curBiasPtr, groupCI, groupCO)); | ||||||
|             std::get<1>(mSubConvolutions[i]) = subConvolution; |             std::get<1>(mSubConvolutions[i]) = subConvolution; | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  |  | ||||||
|  | @ -24,7 +24,7 @@ private: | ||||||
|     Tensor mTempDst; |     Tensor mTempDst; | ||||||
|     std::vector<Tensor *> mTempInputs; |     std::vector<Tensor *> mTempInputs; | ||||||
|     std::vector<Tensor *> mTempOutputs; |     std::vector<Tensor *> mTempOutputs; | ||||||
|     const Convolution2D *mConvParamater; |     const Convolution2D *mConvParameter; | ||||||
|     std::vector<std::tuple<std::shared_ptr<VulkanCommandPool::Buffer>, std::shared_ptr<Execution>, |     std::vector<std::tuple<std::shared_ptr<VulkanCommandPool::Buffer>, std::shared_ptr<Execution>, | ||||||
|                            std::shared_ptr<VulkanCommandPool::Buffer>>> |                            std::shared_ptr<VulkanCommandPool::Buffer>>> | ||||||
|         mSubConvolutions; |         mSubConvolutions; | ||||||
|  |  | ||||||
|  | @ -1,12 +1,12 @@ | ||||||
| //
 | //
 | ||||||
| //  VulkanNormlize.cpp
 | //  VulkanNormalize.cpp
 | ||||||
| //  MNN
 | //  MNN
 | ||||||
| //
 | //
 | ||||||
| //  Created by MNN on 2019/01/31.
 | //  Created by MNN on 2019/01/31.
 | ||||||
| //  Copyright © 2018, Alibaba Group Holding Limited
 | //  Copyright © 2018, Alibaba Group Holding Limited
 | ||||||
| //
 | //
 | ||||||
| 
 | 
 | ||||||
| #include "VulkanNormlize.hpp" | #include "VulkanNormalize.hpp" | ||||||
| #include "Macro.h" | #include "Macro.h" | ||||||
| #include "TensorUtils.hpp" | #include "TensorUtils.hpp" | ||||||
| 
 | 
 | ||||||
|  | @ -17,11 +17,11 @@ struct GpuParam { | ||||||
|     float eps; |     float eps; | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| VulkanNormlize::VulkanNormlize(const Op* op, Backend* bn) : VulkanBasicExecution(bn) { | VulkanNormalize::VulkanNormalize(const Op* op, Backend* bn) : VulkanBasicExecution(bn) { | ||||||
|     auto normlizeParam = op->main_as_Normalize(); |     auto normalizeParam = op->main_as_Normalize(); | ||||||
|     mEps               = normlizeParam->eps(); |     mEps                = normalizeParam->eps(); | ||||||
| 
 | 
 | ||||||
|     std::vector<VkDescriptorType> VulkanNormlizeTypes{ |     std::vector<VkDescriptorType> VulkanNormalizeTypes{ | ||||||
|         VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, |         VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, | ||||||
|         VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, |         VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, | ||||||
|         VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, |         VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, | ||||||
|  | @ -33,29 +33,29 @@ VulkanNormlize::VulkanNormlize(const Op* op, Backend* bn) : VulkanBasicExecution | ||||||
| 
 | 
 | ||||||
|     mVkBackend = static_cast<VulkanBackend*>(bn); |     mVkBackend = static_cast<VulkanBackend*>(bn); | ||||||
|     mSampler   = mVkBackend->getCommonSampler(); |     mSampler   = mVkBackend->getCommonSampler(); | ||||||
|     // normlize
 |     // normalize
 | ||||||
|     mVulkanNormlizePipeline = |     mVulkanNormalizePipeline = | ||||||
|         mVkBackend->getPipeline("glsl_normalizeChannel_comp", |         mVkBackend->getPipeline("glsl_normalizeChannel_comp", | ||||||
|                                 /*glsl_normalizeChannel_comp, glsl_normalizeChannel_comp_len,*/ VulkanNormlizeTypes); |                                 /*glsl_normalizeChannel_comp, glsl_normalizeChannel_comp_len,*/ VulkanNormalizeTypes); | ||||||
|     mParamBuffer.reset(new VulkanBuffer(mVkBackend->getMemoryPool(), false, sizeof(GpuParam), nullptr, |     mParamBuffer.reset(new VulkanBuffer(mVkBackend->getMemoryPool(), false, sizeof(GpuParam), nullptr, | ||||||
|                                         VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)); |                                         VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)); | ||||||
|     MNN_ASSERT(normlizeParam->channelShared() == false); |     MNN_ASSERT(normalizeParam->channelShared() == false); | ||||||
|     // scale
 |     // scale
 | ||||||
|     mVulkanScalePipeline = |     mVulkanScalePipeline = | ||||||
|         mVkBackend->getPipeline("glsl_scale_comp", /*glsl_scale_comp, glsl_scale_comp_len,*/ VulkanScaleTypes); |         mVkBackend->getPipeline("glsl_scale_comp", /*glsl_scale_comp, glsl_scale_comp_len,*/ VulkanScaleTypes); | ||||||
| 
 | 
 | ||||||
|     mScale.reset(new VulkanBuffer(mVkBackend->getMemoryPool(), false, sizeof(float) * normlizeParam->scale()->size(), |     mScale.reset(new VulkanBuffer(mVkBackend->getMemoryPool(), false, sizeof(float) * normalizeParam->scale()->size(), | ||||||
|                                   normlizeParam->scale()->data(), VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)); |                                   normalizeParam->scale()->data(), VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)); | ||||||
|     mBias.reset(new VulkanBuffer(mVkBackend->getMemoryPool(), false, sizeof(float) * normlizeParam->scale()->size(), |     mBias.reset(new VulkanBuffer(mVkBackend->getMemoryPool(), false, sizeof(float) * normalizeParam->scale()->size(), | ||||||
|                                  nullptr, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)); |                                  nullptr, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)); | ||||||
|     auto biasPtr = reinterpret_cast<float*>(mBias->map()); |     auto biasPtr = reinterpret_cast<float*>(mBias->map()); | ||||||
|     ::memset(biasPtr, 0, sizeof(float) * normlizeParam->scale()->size()); |     ::memset(biasPtr, 0, sizeof(float) * normalizeParam->scale()->size()); | ||||||
|     mBias->unmap(); |     mBias->unmap(); | ||||||
| } | } | ||||||
| VulkanNormlize::~VulkanNormlize() { | VulkanNormalize::~VulkanNormalize() { | ||||||
| } | } | ||||||
| ErrorCode VulkanNormlize::onEncode(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, | ErrorCode VulkanNormalize::onEncode(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, | ||||||
|                                    const VulkanCommandPool::Buffer* cmdBuffer) { |                                     const VulkanCommandPool::Buffer* cmdBuffer) { | ||||||
|     auto input            = inputs[0]; |     auto input            = inputs[0]; | ||||||
|     auto output           = outputs[0]; |     auto output           = outputs[0]; | ||||||
|     const int channelDiv4 = UP_DIV(input->channel(), 4); |     const int channelDiv4 = UP_DIV(input->channel(), 4); | ||||||
|  | @ -67,28 +67,28 @@ ErrorCode VulkanNormlize::onEncode(const std::vector<Tensor*>& inputs, const std | ||||||
| 
 | 
 | ||||||
|     auto tempTensorImage = mVkBackend->findTensor(mTempTensor.deviceId())->image(); |     auto tempTensorImage = mVkBackend->findTensor(mTempTensor.deviceId())->image(); | ||||||
|     MNN_ASSERT(nullptr != tempTensorImage); |     MNN_ASSERT(nullptr != tempTensorImage); | ||||||
|     auto VulkanNormlizeParam = reinterpret_cast<GpuParam*>(mParamBuffer->map()); |     auto VulkanNormalizeParam = reinterpret_cast<GpuParam*>(mParamBuffer->map()); | ||||||
|     ::memset(VulkanNormlizeParam, 0, sizeof(GpuParam)); |     ::memset(VulkanNormalizeParam, 0, sizeof(GpuParam)); | ||||||
| 
 | 
 | ||||||
|     VulkanNormlizeParam->imgSize[0]  = input->width(); |     VulkanNormalizeParam->imgSize[0]  = input->width(); | ||||||
|     VulkanNormlizeParam->imgSize[1]  = input->height(); |     VulkanNormalizeParam->imgSize[1]  = input->height(); | ||||||
|     VulkanNormlizeParam->imgSize[2]  = channelDiv4; |     VulkanNormalizeParam->imgSize[2]  = channelDiv4; | ||||||
|     VulkanNormlizeParam->imgSize[3]  = 0; |     VulkanNormalizeParam->imgSize[3]  = 0; | ||||||
|     VulkanNormlizeParam->channelDiv4 = channelDiv4; |     VulkanNormalizeParam->channelDiv4 = channelDiv4; | ||||||
|     VulkanNormlizeParam->eps         = mEps; |     VulkanNormalizeParam->eps         = mEps; | ||||||
| 
 | 
 | ||||||
|     mParamBuffer->flush(true, 0, sizeof(GpuParam)); |     mParamBuffer->flush(true, 0, sizeof(GpuParam)); | ||||||
|     mParamBuffer->unmap(); |     mParamBuffer->unmap(); | ||||||
| 
 | 
 | ||||||
|     // normlize
 |     // normalize
 | ||||||
|     mNormlizeDescriptorSet.reset(mVulkanNormlizePipeline->createSet()); |     mNormalizeDescriptorSet.reset(mVulkanNormalizePipeline->createSet()); | ||||||
|     mNormlizeDescriptorSet->writeImage(reinterpret_cast<VkImageView>(mTempTensor.deviceId()), mSampler->get(), |     mNormalizeDescriptorSet->writeImage(reinterpret_cast<VkImageView>(mTempTensor.deviceId()), mSampler->get(), | ||||||
|                                        VK_IMAGE_LAYOUT_GENERAL, 0); |                                         VK_IMAGE_LAYOUT_GENERAL, 0); | ||||||
|     mNormlizeDescriptorSet->writeImage(reinterpret_cast<VkImageView>(input->deviceId()), mSampler->get(), |     mNormalizeDescriptorSet->writeImage(reinterpret_cast<VkImageView>(input->deviceId()), mSampler->get(), | ||||||
|                                        VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 1); |                                         VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 1); | ||||||
|     mNormlizeDescriptorSet->writeBuffer(mParamBuffer->buffer(), 2, mParamBuffer->size()); |     mNormalizeDescriptorSet->writeBuffer(mParamBuffer->buffer(), 2, mParamBuffer->size()); | ||||||
| 
 | 
 | ||||||
|     mVulkanNormlizePipeline->bind(cmdBuffer->get(), mNormlizeDescriptorSet->get()); |     mVulkanNormalizePipeline->bind(cmdBuffer->get(), mNormalizeDescriptorSet->get()); | ||||||
| 
 | 
 | ||||||
|     vkCmdDispatch(cmdBuffer->get(), UP_DIV(input->width(), 8), UP_DIV(input->height(), 8), input->batch()); |     vkCmdDispatch(cmdBuffer->get(), UP_DIV(input->width(), 8), UP_DIV(input->height(), 8), input->batch()); | ||||||
| 
 | 
 | ||||||
|  | @ -111,15 +111,15 @@ ErrorCode VulkanNormlize::onEncode(const std::vector<Tensor*>& inputs, const std | ||||||
|     return NO_ERROR; |     return NO_ERROR; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| class VulkanNormlizeCreator : public VulkanBackend::Creator { | class VulkanNormalizeCreator : public VulkanBackend::Creator { | ||||||
| public: | public: | ||||||
|     virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const MNN::Op* op, Backend* bn) const override { |     virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const MNN::Op* op, Backend* bn) const override { | ||||||
|         return new VulkanNormlize(op, bn); |         return new VulkanNormalize(op, bn); | ||||||
|     } |     } | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
| static bool gResistor = []() { | static bool gResistor = []() { | ||||||
|     VulkanBackend::addCreator(OpType_Normalize, new VulkanNormlizeCreator); |     VulkanBackend::addCreator(OpType_Normalize, new VulkanNormalizeCreator); | ||||||
|     return true; |     return true; | ||||||
| }(); | }(); | ||||||
| 
 | 
 | ||||||
|  | @ -1,28 +1,28 @@ | ||||||
| //
 | //
 | ||||||
| //  VulkanNormlize.hpp
 | //  VulkanNormalize.hpp
 | ||||||
| //  MNN
 | //  MNN
 | ||||||
| //
 | //
 | ||||||
| //  Created by MNN on 2019/01/31.
 | //  Created by MNN on 2019/01/31.
 | ||||||
| //  Copyright © 2018, Alibaba Group Holding Limited
 | //  Copyright © 2018, Alibaba Group Holding Limited
 | ||||||
| //
 | //
 | ||||||
| 
 | 
 | ||||||
| #ifndef VulkanNormlize_hpp | #ifndef VulkanNormalize_hpp | ||||||
| #define VulkanNormlize_hpp | #define VulkanNormalize_hpp | ||||||
| #include "VulkanBasicExecution.hpp" | #include "VulkanBasicExecution.hpp" | ||||||
| 
 | 
 | ||||||
| namespace MNN { | namespace MNN { | ||||||
| class VulkanNormlize : public VulkanBasicExecution { | class VulkanNormalize : public VulkanBasicExecution { | ||||||
| public: | public: | ||||||
|     VulkanNormlize(const Op* op, Backend* bn); |     VulkanNormalize(const Op* op, Backend* bn); | ||||||
|     virtual ~VulkanNormlize(); |     virtual ~VulkanNormalize(); | ||||||
|     ErrorCode onEncode(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, |     ErrorCode onEncode(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, | ||||||
|                        const VulkanCommandPool::Buffer* cmdBuffer) override; |                        const VulkanCommandPool::Buffer* cmdBuffer) override; | ||||||
| 
 | 
 | ||||||
| private: | private: | ||||||
|     std::shared_ptr<VulkanBuffer> mParamBuffer; |     std::shared_ptr<VulkanBuffer> mParamBuffer; | ||||||
|     const VulkanPipeline* mVulkanNormlizePipeline; |     const VulkanPipeline* mVulkanNormalizePipeline; | ||||||
|     const VulkanPipeline* mVulkanScalePipeline; |     const VulkanPipeline* mVulkanScalePipeline; | ||||||
|     std::shared_ptr<VulkanPipeline::DescriptorSet> mNormlizeDescriptorSet; |     std::shared_ptr<VulkanPipeline::DescriptorSet> mNormalizeDescriptorSet; | ||||||
|     std::shared_ptr<VulkanPipeline::DescriptorSet> mScaleDescriptorSet; |     std::shared_ptr<VulkanPipeline::DescriptorSet> mScaleDescriptorSet; | ||||||
|     std::shared_ptr<VulkanBuffer> mScale; |     std::shared_ptr<VulkanBuffer> mScale; | ||||||
|     std::shared_ptr<VulkanBuffer> mBias; |     std::shared_ptr<VulkanBuffer> mBias; | ||||||
|  | @ -7,7 +7,7 @@ layout(set = 0, binding = 2) uniform constBuffer{ | ||||||
| 	ivec4 imgSize; | 	ivec4 imgSize; | ||||||
| 	int channelDiv4; | 	int channelDiv4; | ||||||
| 	float eps; | 	float eps; | ||||||
| }uNormlizeParam; | }uNormalizeParam; | ||||||
| 
 | 
 | ||||||
| layout(local_size_x = 8, local_size_y = 8) in; | layout(local_size_x = 8, local_size_y = 8) in; | ||||||
| 
 | 
 | ||||||
|  | @ -15,19 +15,19 @@ void main() | ||||||
| { | { | ||||||
| 	ivec3 pos = ivec3(gl_GlobalInvocationID); | 	ivec3 pos = ivec3(gl_GlobalInvocationID); | ||||||
| 
 | 
 | ||||||
| 	if(all(lessThan(pos, uNormlizeParam.imgSize.xyz))) | 	if(all(lessThan(pos, uNormalizeParam.imgSize.xyz))) | ||||||
| 	{ | 	{ | ||||||
| 		vec4 color = texelFetch(uInput, ivec3(pos.x, pos.y, 0), 0); | 		vec4 color = texelFetch(uInput, ivec3(pos.x, pos.y, 0), 0); | ||||||
| 		vec4 sum = color * color; | 		vec4 sum = color * color; | ||||||
| 		for(int i = 1; i < uNormlizeParam.channelDiv4; ++i) | 		for(int i = 1; i < uNormalizeParam.channelDiv4; ++i) | ||||||
| 		{ | 		{ | ||||||
| 			color = texelFetch(uInput, ivec3(pos.x, pos.y, i), 0); | 			color = texelFetch(uInput, ivec3(pos.x, pos.y, i), 0); | ||||||
| 			sum += color * color; | 			sum += color * color; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		float summerResult = inversesqrt((sum.x + sum.y + sum.z + sum.w) + uNormlizeParam.eps); | 		float summerResult = inversesqrt((sum.x + sum.y + sum.z + sum.w) + uNormalizeParam.eps); | ||||||
| 		 | 		 | ||||||
| 		for(int i = 0; i < uNormlizeParam.channelDiv4; ++i) | 		for(int i = 0; i < uNormalizeParam.channelDiv4; ++i) | ||||||
| 		{ | 		{ | ||||||
| 			vec4 tempSum = vec4(summerResult); | 			vec4 tempSum = vec4(summerResult); | ||||||
| 			ivec3 curPos = ivec3(pos.x, pos.y, i); | 			ivec3 curPos = ivec3(pos.x, pos.y, i); | ||||||
|  |  | ||||||
|  | @ -5,7 +5,7 @@ | ||||||
|  * found in the LICENSE file. |  * found in the LICENSE file. | ||||||
|  */ |  */ | ||||||
| /*
 | /*
 | ||||||
|  Modified by MMN |  Modified by MNN | ||||||
|  2018.9.19 |  2018.9.19 | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -5,7 +5,7 @@ | ||||||
|  * found in the LICENSE file. |  * found in the LICENSE file. | ||||||
|  */ |  */ | ||||||
| /*
 | /*
 | ||||||
|  Modified by MMN |  Modified by MNN | ||||||
|  2018.9.19 |  2018.9.19 | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -19,13 +19,13 @@ public: | ||||||
| 
 | 
 | ||||||
|         auto output    = outputs[0]; |         auto output    = outputs[0]; | ||||||
|         auto input     = inputs[0]; |         auto input     = inputs[0]; | ||||||
|         auto paramater = op->main_as_InnerProduct(); |         auto parameter = op->main_as_InnerProduct(); | ||||||
| 
 | 
 | ||||||
|         MNN_ASSERT(2 == input->buffer().dimensions); |         MNN_ASSERT(2 == input->buffer().dimensions); | ||||||
|         output->buffer().dimensions    = input->buffer().dimensions; |         output->buffer().dimensions    = input->buffer().dimensions; | ||||||
|         output->buffer().dim[0].extent = input->buffer().dim[0].extent; |         output->buffer().dim[0].extent = input->buffer().dim[0].extent; | ||||||
|         output->buffer().dim[0].flags  = 0; |         output->buffer().dim[0].flags  = 0; | ||||||
|         output->buffer().dim[1].extent = paramater->outputCount(); |         output->buffer().dim[1].extent = parameter->outputCount(); | ||||||
|         output->buffer().dim[1].flags  = 0; |         output->buffer().dim[1].flags  = 0; | ||||||
| 
 | 
 | ||||||
|         return true; |         return true; | ||||||
|  |  | ||||||
|  | @ -24,7 +24,7 @@ int tflite2MNNNet(const std::string inputModel, const std::string bizCode, std:: | ||||||
|     const auto subGraphsSize      = tfliteModel->subgraphs.size(); |     const auto subGraphsSize      = tfliteModel->subgraphs.size(); | ||||||
|     const auto& tfliteModelBuffer = tfliteModel->buffers; |     const auto& tfliteModelBuffer = tfliteModel->buffers; | ||||||
| 
 | 
 | ||||||
|     // check whether this tflie model is quantization model
 |     // check whether this tflite model is quantization model
 | ||||||
|     // use the weight's data type of Conv2D|DepthwiseConv2D to decide quantizedModel mode
 |     // use the weight's data type of Conv2D|DepthwiseConv2D to decide quantizedModel mode
 | ||||||
|     bool quantizedModel = true; |     bool quantizedModel = true; | ||||||
|     for (int i = 0; i < subGraphsSize; ++i) { |     for (int i = 0; i < subGraphsSize; ++i) { | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue