2023-12-27 17:26:44 +08:00
// ConvLowMemoryExecution.cpp
//
// Created by MNN on 2023/12/1.
// Copyright © 2018, Alibaba Group Holding Limited
//
# ifdef MNN_LOW_MEMORY
2024-04-19 11:58:21 +08:00
# ifndef MNN_OPENCL_BUFFER_CLOSED
2023-12-27 17:26:44 +08:00
# include "ConvLowMemoryExecution.hpp"
// #define LOG_VERBOSE
namespace MNN {
namespace OpenCL {
// set mDequantScale mDequantOffset mNumQuantBit mFilterDataPtr from mConv2dParams
void ConvLowMemoryExecution : : getInfoFromOpLowMemory ( std : : shared_ptr < ConvolutionCommon : : Int8Common > & quanCommon ) {
2024-08-24 15:46:21 +08:00
quanCommon = ConvolutionCommon : : load ( mOp , this - > backend ( ) , false , true ) ;
2024-07-04 11:53:45 +08:00
if ( mResource - > mConv2dParams - > quanParameter ( ) ! = nullptr ) {
2023-12-27 17:26:44 +08:00
mLowMemoryFlag = true ;
} else {
MNN_ERROR ( " Conv buf low memory init error. \n " ) ;
MNN_ASSERT ( false ) ;
}
2024-07-04 11:53:45 +08:00
mResource - > mInputChannel = quanCommon - > weight . size ( ) / ( mResource - > mKernelWidth * mResource - > mKernelHeight * mResource - > mOutputChannel ) ;
2023-12-27 17:26:44 +08:00
// set mNumQuantBit
2024-07-04 11:53:45 +08:00
if ( quanCommon - > canUseInt4 ) {
2023-12-27 17:26:44 +08:00
mNumQuantBit = 4 ;
2024-08-24 15:46:21 +08:00
mResource - > mInputChannel = ( quanCommon - > weight . size ( ) * 2 ) / ( mResource - > mKernelWidth * mResource - > mKernelHeight * mResource - > mOutputChannel ) ;
2024-07-04 11:53:45 +08:00
} else {
mNumQuantBit = 8 ;
}
2023-12-27 17:26:44 +08:00
// src of alpha in CPU
float * dequantAlpha = quanCommon - > alpha . get ( ) ;
2024-07-04 11:53:45 +08:00
int totalCount = quanCommon - > alpha . size ( ) ;
if ( quanCommon - > asymmetric ) {
totalCount / = 2 ;
}
2024-04-19 11:58:21 +08:00
int numAlpha = mResource - > mOutputChannel ;
2024-07-04 11:53:45 +08:00
mResource - > mBlockSize = totalCount / numAlpha ;
2023-12-27 17:26:44 +08:00
// set mDequantScale mDequantOffset
int numAlphaPack = ROUND_UP ( numAlpha , 16 ) ;
2024-07-04 11:53:45 +08:00
int mapSize = mResource - > mBlockSize * numAlphaPack * sizeof ( int32_t ) * 2 ;
mResource - > dequantScaleOffset . reset ( new cl : : Buffer ( mOpenCLBackend - > getOpenCLRuntime ( ) - > context ( ) , CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR , mapSize ) ) ;
2023-12-27 17:26:44 +08:00
// transfer data from src in cpu to dst in gpu
2024-07-04 11:53:45 +08:00
cl_int resScaleOffset ;
void * dequantScaleOffsetBufferMap = mOpenCLBackend - > getOpenCLRuntime ( ) - > commandQueue ( ) . enqueueMapBuffer ( * mResource - > dequantScaleOffset . get ( ) , true , CL_MAP_WRITE , 0 , mapSize , nullptr , nullptr , & resScaleOffset ) ;
// mBlockSize % 4 need equal 0
if ( dequantScaleOffsetBufferMap ! = nullptr & & resScaleOffset = = CL_SUCCESS ) {
if ( quanCommon - > asymmetric ) {
for ( int i = 0 ; i < numAlpha ; + + i ) {
auto srcZ = dequantAlpha + i * mResource - > mBlockSize * 2 ;
for ( int j = 0 ; j < mResource - > mBlockSize ; + + j ) {
float o = srcZ [ 2 * j + 0 ] ;
float s = srcZ [ 2 * j + 1 ] ;
( ( float * ) dequantScaleOffsetBufferMap ) [ ( j * numAlphaPack + i ) * 2 ] = s ;
( ( float * ) dequantScaleOffsetBufferMap ) [ ( j * numAlphaPack + i ) * 2 + 1 ] = o ;
2023-12-27 17:26:44 +08:00
}
}
} else {
2024-07-04 11:53:45 +08:00
for ( int i = 0 ; i < numAlpha ; + + i ) {
auto srcZ = dequantAlpha + i * mResource - > mBlockSize ;
for ( int j = 0 ; j < mResource - > mBlockSize ; + + j ) {
( ( float * ) dequantScaleOffsetBufferMap ) [ ( j * numAlphaPack + i ) * 2 ] = srcZ [ j ] ;
( ( float * ) dequantScaleOffsetBufferMap ) [ ( j * numAlphaPack + i ) * 2 + 1 ] = 0.0f ;
2023-12-27 17:26:44 +08:00
}
}
}
} else {
MNN_ERROR ( " Map error dequantBufferMap == nullptr \n " ) ;
MNN_ASSERT ( false ) ;
}
2024-07-04 11:53:45 +08:00
mOpenCLBackend - > getOpenCLRuntime ( ) - > commandQueue ( ) . enqueueUnmapMemObject ( * mResource - > dequantScaleOffset . get ( ) , dequantScaleOffsetBufferMap ) ;
2023-12-27 17:26:44 +08:00
// set mFilterDataPtr
mFilterDataPtr = ( void * ) quanCommon - > weight . get ( ) ;
}
2024-08-24 15:46:21 +08:00
bool ConvLowMemoryExecution : : convertToQuantWeight1x1Buffer ( cl : : Buffer input , int icPack , int ocPack ) {
# ifdef LOG_VERBOSE
MNN_PRINT ( " start convertToQuantWeight1x1Buffer ! \n " ) ;
# endif
auto runtime = mOpenCLBackend - > getOpenCLRuntime ( ) ;
std : : string kernelName = " conv2d_1x1_ic_oc_weight_quant_buffer " ;
std : : set < std : : string > buildOptions ;
if ( mNumQuantBit = = 8 ) {
buildOptions . emplace ( " -DUSE_LOW_BIT_WEIGHT_INT8 " ) ;
} else if ( mNumQuantBit = = 4 ) {
// int4 case
buildOptions . emplace ( " -DUSE_LOW_BIT_WEIGHT_INT4 " ) ;
} else { /* More types to be supported. */ }
if ( mResource - > mInputChannel % icPack ! = 0 ) {
buildOptions . emplace ( " -DCHANNEL_LEAVE " ) ;
}
mBufferToConv1x1Kernel = runtime - > buildKernelWithCache ( " buffer_convert_quant " , kernelName , buildOptions ) ;
auto kernel = mBufferToConv1x1Kernel - > get ( ) ;
uint32_t gws [ 2 ] = { static_cast < uint32_t > ( UP_DIV ( mResource - > mInputChannel , icPack ) ) , static_cast < uint32_t > ( UP_DIV ( mResource - > mOutputChannel , ocPack ) ) } ;
uint32_t idx = 0 ;
cl_int ret = CL_SUCCESS ;
ret | = kernel . setArg ( idx + + , gws [ 0 ] ) ;
ret | = kernel . setArg ( idx + + , gws [ 1 ] ) ;
ret | = kernel . setArg ( idx + + , input ) ;
ret | = kernel . setArg ( idx + + , * mResource - > mKernelBuffer . get ( ) ) ;
ret | = kernel . setArg ( idx + + , mResource - > mInputChannel ) ;
ret | = kernel . setArg ( idx + + , mResource - > mOutputChannel ) ;
ret | = kernel . setArg ( idx + + , icPack ) ;
ret | = kernel . setArg ( idx + + , ocPack ) ;
MNN_CHECK_CL_SUCCESS ( ret , " setArg convertToQuantWeight1x1Buffer " ) ;
const uint32_t maxWorkGroupSize = static_cast < uint32_t > ( runtime - > getMaxWorkGroupSize ( mBufferToConv1x1Kernel ) ) ;
const std : : vector < uint32_t > lws = { 16 , std : : max ( ( uint32_t ) 1 , maxWorkGroupSize / 16 ) } ;
cl : : Event event ;
cl_int res ;
std : : vector < uint32_t > roundUpGroupWorkSize ( lws . size ( ) ) ;
for ( size_t i = 0 ; i < lws . size ( ) ; + + i ) {
roundUpGroupWorkSize [ i ] = ROUND_UP ( gws [ i ] , lws [ i ] ) ;
}
res = runtime - > commandQueue ( ) . enqueueNDRangeKernel ( kernel , cl : : NullRange ,
cl : : NDRange ( roundUpGroupWorkSize [ 0 ] , roundUpGroupWorkSize [ 1 ] ) ,
cl : : NDRange ( lws [ 0 ] , lws [ 1 ] ) , nullptr , & event ) ;
event . wait ( ) ;
MNN_CHECK_CL_SUCCESS ( res , " convertToQuantWeight1x1Buffer " ) ;
# ifdef LOG_VERBOSE
MNN_PRINT ( " end convertToQuantWeight1x1Buffer ! \n " ) ;
# endif
return true ;
}
2023-12-27 17:26:44 +08:00
// set mKernelBuffer for the 1x1 kernels
void ConvLowMemoryExecution : : set1x1WeightLowMemory ( int packCout , int packCin , void * filterDataPtr , std : : shared_ptr < ConvolutionCommon : : Int8Common > & quanCommon ) {
cl_int res ;
2024-08-24 15:46:21 +08:00
std : : shared_ptr < Tensor > filterBuffer ( Tensor : : createDevice < float > ( { ROUND_UP ( mResource - > mOutputChannel , packCout ) /*Cout pack set to max 8*/ , ROUND_UP ( mResource - > mInputChannel , packCin ) , 1 , 1 } ) ) ;
2023-12-27 17:26:44 +08:00
size_t buffer_size = filterBuffer - > usize ( ) / sizeof ( float ) ;
2024-08-24 15:46:21 +08:00
size_t cpy_size = mResource - > mOutputChannel * mResource - > mInputChannel ;
2023-12-27 17:26:44 +08:00
float * dequantAlpha = quanCommon - > alpha . get ( ) ;
// shared part for all cases
2024-08-24 15:46:21 +08:00
if ( mNumQuantBit = = 4 ) {
2023-12-27 17:26:44 +08:00
// int4 case
buffer_size / = 2 ;
2024-08-24 15:46:21 +08:00
cpy_size = UP_DIV ( cpy_size , 2 ) ;
2023-12-27 17:26:44 +08:00
} else { /* More types to be supported. */ }
2024-08-24 15:46:21 +08:00
cl : : Buffer filterBufferCL ( mOpenCLBackend - > getOpenCLRuntime ( ) - > context ( ) , CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR , buffer_size ) ;
void * mapPtr = mOpenCLBackend - > getOpenCLRuntime ( ) - > commandQueue ( ) . enqueueMapBuffer ( filterBufferCL , true , CL_MAP_WRITE , 0 , buffer_size , nullptr , nullptr , & res ) ;
if ( mapPtr ! = nullptr & & res = = CL_SUCCESS ) {
: : memcpy ( mapPtr , filterDataPtr , cpy_size ) ;
2023-12-27 17:26:44 +08:00
} else {
MNN_ERROR ( " set1x1WeightLowMemory: Map error ptrCL == nullptr \n " ) ;
MNN_ASSERT ( false ) ;
}
2024-08-24 15:46:21 +08:00
mOpenCLBackend - > getOpenCLRuntime ( ) - > commandQueue ( ) . enqueueUnmapMemObject ( filterBufferCL , mapPtr ) ;
mResource - > mKernelBuffer . reset ( new cl : : Buffer ( mOpenCLBackend - > getOpenCLRuntime ( ) - > context ( ) , CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR , buffer_size ) ) ;
convertToQuantWeight1x1Buffer ( filterBufferCL , packCin , packCout ) ;
2023-12-27 17:26:44 +08:00
}
// set mFilter for the general kernels
void ConvLowMemoryExecution : : setGeneralWeightLowMemory ( void * filterDataPtr , std : : shared_ptr < ConvolutionCommon : : Int8Common > & quanCommon ) {
if ( filterDataPtr ! = nullptr ) {
2024-08-24 15:46:21 +08:00
std : : shared_ptr < Tensor > filterBuffer ( Tensor : : createDevice < float > ( { ROUND_UP ( mResource - > mOutputChannel , 4 ) , mResource - > mInputChannel , mResource - > mKernelWidth , mResource - > mKernelHeight } ) ) ;
2023-12-27 17:26:44 +08:00
size_t buffer_size = filterBuffer - > usize ( ) / sizeof ( float ) ;
2024-08-24 15:46:21 +08:00
size_t cpy_size = mResource - > mOutputChannel * mResource - > mInputChannel * mResource - > mKernelWidth * mResource - > mKernelHeight ;
if ( mNumQuantBit = = 4 ) {
buffer_size / = 2 ;
cpy_size = UP_DIV ( cpy_size , 2 ) ;
}
2023-12-27 17:26:44 +08:00
cl : : Buffer filterBufferCL ( mOpenCLBackend - > getOpenCLRuntime ( ) - > context ( ) , CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR , buffer_size ) ;
filterBuffer - > buffer ( ) . device = ( uint64_t ) ( & filterBufferCL ) ;
float * dequantAlpha = quanCommon - > alpha . get ( ) ;
// map and pack data from filterDataPtr
cl_int res ;
auto ptrCL = mOpenCLBackend - > getOpenCLRuntime ( ) - > commandQueue ( ) . enqueueMapBuffer ( filterBufferCL , true , CL_MAP_WRITE , 0 , buffer_size , nullptr , nullptr , & res ) ;
if ( ptrCL ! = nullptr & & res = = CL_SUCCESS ) {
2024-08-24 15:46:21 +08:00
: : memcpy ( ptrCL , filterDataPtr , cpy_size ) ;
2023-12-27 17:26:44 +08:00
} else {
MNN_ERROR ( " setGeneralWeightLowMemory: Map error ptrCL == nullptr \n " ) ;
}
mOpenCLBackend - > getOpenCLRuntime ( ) - > commandQueue ( ) . enqueueUnmapMemObject ( filterBufferCL , ptrCL ) ;
// convert to NC4HW4
if ( mNumQuantBit = = 8 ) {
// ROUND_UP(IC, 4), UP_DIV(OC, 4) * mKernelWidth * mKernelHeight
2024-08-24 15:46:21 +08:00
mResource - > mFilter . reset ( Tensor : : createDevice < int8_t > ( { 1 , UP_DIV ( mResource - > mOutputChannel , 4 ) * mResource - > mKernelWidth * mResource - > mKernelHeight , 1 , 4 * ROUND_UP ( mResource - > mInputChannel , 4 ) } ) ) ;
2024-04-19 11:58:21 +08:00
mResource - > mKernelBuffer . reset ( new cl : : Buffer ( mOpenCLBackend - > getOpenCLRuntime ( ) - > context ( ) , CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR , buffer_size ) ) ;
mResource - > mFilter - > buffer ( ) . device = ( uint64_t ) ( mResource - > mKernelBuffer . get ( ) ) ;
2023-12-27 17:26:44 +08:00
MNN : : OpenCL : : BufferConvertor bufferConvertor { mOpenCLBackend - > getOpenCLRuntime ( ) } ;
// filterBuffer shape: {OC, ROUND_UP(IC, 4), mKernelWidth, mKernelHeight}
2024-04-19 11:58:21 +08:00
bufferConvertor . convertToNC4HW4Buffer ( filterBuffer . get ( ) , MNN : : OpenCL : : CONV2D_FILTER , mResource - > mFilter . get ( ) , false , true , mLowMemoryFlag , mNumQuantBit ) ;
2023-12-27 17:26:44 +08:00
} else if ( mNumQuantBit = = 4 ) {
// ROUND_UP(IC, 4), UP_DIV(OC, 4) * mKernelWidth * mKernelHeight
// For int4 case, data stored in mFilter should be uint8_t
// while "Tensor::createDevice<uint8_t>" occupies more memory than "Tensor::createDevice<int8_t>".
// Therefore, we use "Tensor::createDevice<int8_t>" currently, leaving "Tensor::createDevice<uint8_t>" to be supported.
2024-08-24 15:46:21 +08:00
mResource - > mFilter . reset ( Tensor : : createDevice < int8_t > ( { 1 , UP_DIV ( mResource - > mOutputChannel , 4 ) * mResource - > mKernelWidth * mResource - > mKernelHeight , 1 , 2 * ROUND_UP ( mResource - > mInputChannel , 4 ) } ) ) ;
mResource - > mKernelBuffer . reset ( new cl : : Buffer ( mOpenCLBackend - > getOpenCLRuntime ( ) - > context ( ) , CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR , buffer_size ) ) ;
2024-04-19 11:58:21 +08:00
mResource - > mFilter - > buffer ( ) . device = ( uint64_t ) ( mResource - > mKernelBuffer . get ( ) ) ;
2023-12-27 17:26:44 +08:00
MNN : : OpenCL : : BufferConvertor bufferConvertor { mOpenCLBackend - > getOpenCLRuntime ( ) } ;
// filterBuffer shape: {OC, ROUND_UP(IC, 4), mKernelWidth, mKernelHeight}
2024-04-19 11:58:21 +08:00
bufferConvertor . convertToNC4HW4Buffer ( filterBuffer . get ( ) , MNN : : OpenCL : : CONV2D_FILTER , mResource - > mFilter . get ( ) , false , true , mLowMemoryFlag , mNumQuantBit ) ;
2023-12-27 17:26:44 +08:00
} else { /* More types to be supported. */ }
} else {
MNN_ERROR ( " GetConvParams Error: filterDataPtr == nullptr. \n " ) ;
MNN_ASSERT ( false ) ;
}
}
// select the fastest kernel for the 1x1 cases by tuning
void ConvLowMemoryExecution : : tune1x1CaseLowMemory ( Tensor * input , Tensor * output ) {
2024-04-19 11:58:21 +08:00
auto & unit = mUnits [ 0 ] ;
2023-12-27 17:26:44 +08:00
std : : vector < int > inputShape = tensorShapeFormat ( input ) ;
std : : vector < int > outputShape = tensorShapeFormat ( output ) ;
auto runTime = ( ( OpenCLBackend * ) backend ( ) ) - > getOpenCLRuntime ( ) ;
const int height = outputShape . at ( 1 ) ;
const int width = outputShape . at ( 2 ) ;
const int outChannel = outputShape . at ( 3 ) ;
const int inputHeight = inputShape . at ( 1 ) ;
const int inputWidth = inputShape . at ( 2 ) ;
const int inputChannels = inputShape . at ( 3 ) ;
const int inputChannelBlocks = UP_DIV ( inputChannels , 4 ) ;
2024-07-04 11:53:45 +08:00
const int blockDim = mResource - > mInputChannel / mResource - > mBlockSize ;
2024-04-19 11:58:21 +08:00
std : : string info = std : : to_string ( inputChannels ) + " _ " + std : : to_string ( outChannel ) + " _ " + std : : to_string ( mResource - > mKernelHeight ) + " _ " + std : : to_string ( mResource - > mKernelWidth ) + " _ " + std : : to_string ( mResource - > mStrides [ 0 ] ) + " _ " + std : : to_string ( mResource - > mStrides [ 1 ] ) + " _ " + std : : to_string ( mResource - > mDilations [ 0 ] ) + " _ " + std : : to_string ( mResource - > mDilations [ 1 ] ) ;
2023-12-27 17:26:44 +08:00
int inputImageShape [ 2 ] = { inputHeight , inputWidth } ;
int outputImageShape [ 2 ] = { height , width } ;
2024-02-29 16:21:40 +08:00
int stideShape [ 2 ] = { mResource - > mStrides [ 0 ] , mResource - > mStrides [ 1 ] } ;
2023-12-27 17:26:44 +08:00
const int total_kernel = 2 ;
std : : string kernelName [ total_kernel ] = { " conv_2d_1x1 " , " conv_2d_1x1_c8h1w4 " } ;
int itemC [ total_kernel ] = { 4 , 8 } ;
int itemH [ total_kernel ] = { 1 , 1 } ;
int itemW [ total_kernel ] = { 4 , 4 } ;
int actual_kernel = total_kernel ;
2024-04-19 11:58:21 +08:00
std : : shared_ptr < KernelWrap > kernel [ total_kernel ] ;
2023-12-27 17:26:44 +08:00
std : : vector < uint32_t > globalWorkSize [ total_kernel ] ;
std : : vector < uint32_t > localWorkSize [ total_kernel ] ;
std : : pair < int , int > min_cost ( INT_MAX , 0 ) ; //(min_time, min_index)
cl_int ret = CL_SUCCESS ;
for ( int knl_idx = 0 ; knl_idx < actual_kernel ; knl_idx + + ) {
2024-04-19 11:58:21 +08:00
std : : set < std : : string > buildOption = mResource - > mBuildOptions ;
2024-07-04 11:53:45 +08:00
if ( inputChannels % 4 ! = 0 ) {
buildOption . emplace ( " -DINPUT_CHANNEL_LEAVE " ) ;
}
2023-12-27 17:26:44 +08:00
kernel [ knl_idx ] = mOpenCLBackend - > getOpenCLRuntime ( ) - > buildKernel ( " conv_2d " , kernelName [ knl_idx ] , buildOption ) ;
uint32_t maxWorkGroupSize = static_cast < uint32_t > ( mOpenCLBackend - > getOpenCLRuntime ( ) - > getMaxWorkGroupSize ( kernel [ knl_idx ] ) ) ;
globalWorkSize [ knl_idx ] = { static_cast < uint32_t > ( UP_DIV ( outputShape . at ( 3 ) , itemC [ knl_idx ] ) * UP_DIV ( outputShape . at ( 2 ) , itemW [ knl_idx ] ) ) , static_cast < uint32_t > ( outputShape . at ( 0 ) * UP_DIV ( outputShape . at ( 1 ) , itemH [ knl_idx ] ) ) } ;
uint32_t idx = 0 ;
2024-04-19 11:58:21 +08:00
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , globalWorkSize [ knl_idx ] [ 0 ] ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , globalWorkSize [ knl_idx ] [ 1 ] ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , openCLImage ( input ) ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , * mResource - > mKernelBuffer . get ( ) ) ;
2024-07-04 11:53:45 +08:00
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , * mResource - > dequantScaleOffset . get ( ) ) ;
2024-04-19 11:58:21 +08:00
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , openCLImage ( mResource - > mBias . get ( ) ) ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , openCLImage ( output ) ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , sizeof ( inputImageShape ) , inputImageShape ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , static_cast < int > ( inputChannelBlocks ) ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , sizeof ( outputImageShape ) , outputImageShape ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , sizeof ( stideShape ) , stideShape ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , UP_DIV ( width , 4 ) ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , UP_DIV ( outputShape . at ( 3 ) , 4 ) ) ;
2024-07-04 11:53:45 +08:00
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , blockDim ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , inputChannels ) ;
2023-12-27 17:26:44 +08:00
std : : pair < std : : vector < uint32_t > , uint32_t > retTune ;
retTune = localWS2DDefault ( globalWorkSize [ knl_idx ] , maxWorkGroupSize , mOpenCLBackend - > getOpenCLRuntime ( ) , kernelName [ knl_idx ] + info , kernel [ knl_idx ] ) ;
//printf("conv1x1 kernel_%d = %d [%d, %d]\n", knl_idx, retTune.second, retTune.first[0], retTune.first[1]);
if ( min_cost . first > retTune . second ) {
min_cost . first = retTune . second ;
min_cost . second = knl_idx ;
mLocalWorkSize = { retTune . first [ 0 ] , retTune . first [ 1 ] } ;
}
}
int min_index = min_cost . second ;
mGlobalWorkSize = { globalWorkSize [ min_index ] [ 0 ] , globalWorkSize [ min_index ] [ 1 ] } ;
2024-04-19 11:58:21 +08:00
std : : set < std : : string > buildOption = mResource - > mBuildOptions ;
2024-07-04 11:53:45 +08:00
if ( inputChannels % 4 ! = 0 ) {
buildOption . emplace ( " -DINPUT_CHANNEL_LEAVE " ) ;
}
2024-04-19 11:58:21 +08:00
unit . kernel = mOpenCLBackend - > getOpenCLRuntime ( ) - > buildKernel ( " conv_2d " , kernelName [ min_index ] , buildOption ) ;
2023-12-27 17:26:44 +08:00
uint32_t idx = 0 ;
2024-04-19 11:58:21 +08:00
ret | = unit . kernel - > get ( ) . setArg ( idx + + , mGlobalWorkSize [ 0 ] ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , mGlobalWorkSize [ 1 ] ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , openCLImage ( input ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , * mResource - > mKernelBuffer . get ( ) ) ;
2024-07-04 11:53:45 +08:00
ret | = unit . kernel - > get ( ) . setArg ( idx + + , * mResource - > dequantScaleOffset . get ( ) ) ;
2024-04-19 11:58:21 +08:00
ret | = unit . kernel - > get ( ) . setArg ( idx + + , openCLImage ( mResource - > mBias . get ( ) ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , openCLImage ( output ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , sizeof ( inputImageShape ) , inputImageShape ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , static_cast < int > ( inputChannelBlocks ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , sizeof ( outputImageShape ) , outputImageShape ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , sizeof ( stideShape ) , stideShape ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , UP_DIV ( width , 4 ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , UP_DIV ( outputShape . at ( 3 ) , 4 ) ) ;
2024-07-04 11:53:45 +08:00
ret | = unit . kernel - > get ( ) . setArg ( idx + + , blockDim ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , inputChannels ) ;
2023-12-27 17:26:44 +08:00
MNN_CHECK_CL_SUCCESS ( ret , " setArg Conv1x1LowMemory " ) ;
2024-04-19 11:58:21 +08:00
mOpenCLBackend - > recordKernel2d ( unit . kernel , mGlobalWorkSize , mLocalWorkSize ) ;
unit . globalWorkSize = { mGlobalWorkSize [ 0 ] , mGlobalWorkSize [ 1 ] } ;
unit . localWorkSize = { mLocalWorkSize [ 0 ] , mLocalWorkSize [ 1 ] } ;
2023-12-27 17:26:44 +08:00
return ;
}
// select the fastest kernel for the general cases by tuning
void ConvLowMemoryExecution : : tuneGeneralCaseLowMemory ( Tensor * input , Tensor * output ) {
2024-04-19 11:58:21 +08:00
auto & unit = mUnits [ 0 ] ;
2023-12-27 17:26:44 +08:00
std : : vector < int > inputShape = tensorShapeFormat ( input ) ;
std : : vector < int > outputShape = tensorShapeFormat ( output ) ;
auto runTime = ( ( OpenCLBackend * ) backend ( ) ) - > getOpenCLRuntime ( ) ;
const int height = outputShape . at ( 1 ) ;
const int width = outputShape . at ( 2 ) ;
const int outChannel = outputShape . at ( 3 ) ;
const int inputHeight = inputShape . at ( 1 ) ;
const int inputWidth = inputShape . at ( 2 ) ;
const int inputChannels = inputShape . at ( 3 ) ;
const int inputChannelBlocks = UP_DIV ( inputChannels , 4 ) ;
2024-07-04 11:53:45 +08:00
const int blockDim = mResource - > mInputChannel / mResource - > mBlockSize ;
2024-04-19 11:58:21 +08:00
std : : string info = std : : to_string ( inputChannels ) + " _ " + std : : to_string ( outChannel ) + " _ " + std : : to_string ( mResource - > mKernelHeight ) + " _ " + std : : to_string ( mResource - > mKernelWidth ) + " _ " + std : : to_string ( mResource - > mStrides [ 0 ] ) + " _ " + std : : to_string ( mResource - > mStrides [ 1 ] ) + " _ " + std : : to_string ( mResource - > mDilations [ 0 ] ) + " _ " + std : : to_string ( mResource - > mDilations [ 1 ] ) ;
2023-12-27 17:26:44 +08:00
int inputImageShape [ 2 ] = { inputHeight , inputWidth } ;
int outputImageShape [ 2 ] = { height , width } ;
2024-02-29 16:21:40 +08:00
int kernelShape [ 2 ] = { mResource - > mKernelHeight , mResource - > mKernelWidth } ;
int strideShape [ 2 ] = { mResource - > mStrides [ 0 ] , mResource - > mStrides [ 1 ] } ;
2023-12-27 17:26:44 +08:00
int paddingShape [ 2 ] = { mPaddings [ 0 ] , mPaddings [ 1 ] } ;
2024-02-29 16:21:40 +08:00
int dilationShape [ 2 ] = { mResource - > mDilations [ 0 ] , mResource - > mDilations [ 1 ] } ;
2023-12-27 17:26:44 +08:00
const int total_kernel = 3 ;
std : : string kernelName [ total_kernel ] = { " conv_2d_c4h1w4 " , " conv_2d_c4h4w1 " , " conv_2d_c8h4w1 " } ;
int itemC [ total_kernel ] = { 4 , 4 , 8 } ;
int itemH [ total_kernel ] = { 1 , 4 , 4 } ;
int itemW [ total_kernel ] = { 4 , 1 , 1 } ;
int actual_kernel = total_kernel ;
2024-04-19 11:58:21 +08:00
std : : shared_ptr < KernelWrap > kernel [ total_kernel ] ;
2023-12-27 17:26:44 +08:00
std : : vector < uint32_t > globalWorkSize [ total_kernel ] ;
std : : vector < uint32_t > localWorkSize [ total_kernel ] ;
std : : pair < int , int > min_cost ( INT_MAX , 0 ) ; //(min_time, min_index)
// MNN_PRINT("Checking kernel %d.\n", knlCheck);
for ( int knl_idx = 0 ; knl_idx < actual_kernel ; knl_idx + + ) {
2024-04-19 11:58:21 +08:00
std : : set < std : : string > buildOption = mResource - > mBuildOptions ;
2024-07-04 11:53:45 +08:00
if ( inputChannels % 4 ! = 0 ) {
buildOption . emplace ( " -DINPUT_CHANNEL_LEAVE " ) ;
}
2023-12-27 17:26:44 +08:00
kernel [ knl_idx ] = mOpenCLBackend - > getOpenCLRuntime ( ) - > buildKernel ( " conv_2d " , kernelName [ knl_idx ] , buildOption ) ;
uint32_t maxWorkGroupSize = static_cast < uint32_t > ( mOpenCLBackend - > getOpenCLRuntime ( ) - > getMaxWorkGroupSize ( kernel [ knl_idx ] ) ) ;
globalWorkSize [ knl_idx ] = { static_cast < uint32_t > ( UP_DIV ( outputShape . at ( 3 ) , itemC [ knl_idx ] ) * UP_DIV ( outputShape . at ( 2 ) , itemW [ knl_idx ] ) ) , static_cast < uint32_t > ( outputShape . at ( 0 ) * UP_DIV ( outputShape . at ( 1 ) , itemH [ knl_idx ] ) ) } ;
uint32_t idx = 0 ;
cl_int ret = CL_SUCCESS ;
2024-04-19 11:58:21 +08:00
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , globalWorkSize [ knl_idx ] [ 0 ] ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , globalWorkSize [ knl_idx ] [ 1 ] ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , openCLImage ( input ) ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , openCLBuffer ( mResource - > mFilter . get ( ) ) ) ;
2024-07-04 11:53:45 +08:00
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , * mResource - > dequantScaleOffset . get ( ) ) ;
2024-04-19 11:58:21 +08:00
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , openCLImage ( mResource - > mBias . get ( ) ) ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , openCLImage ( output ) ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , sizeof ( inputImageShape ) , inputImageShape ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , inputChannelBlocks ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , sizeof ( outputImageShape ) , outputImageShape ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , sizeof ( kernelShape ) , kernelShape ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , sizeof ( strideShape ) , strideShape ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , sizeof ( paddingShape ) , paddingShape ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , sizeof ( dilationShape ) , dilationShape ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , UP_DIV ( width , itemW [ knl_idx ] ) ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , UP_DIV ( outputShape . at ( 3 ) , 4 ) ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , UP_DIV ( height , itemH [ knl_idx ] ) ) ;
2024-07-04 11:53:45 +08:00
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , blockDim ) ;
ret | = kernel [ knl_idx ] - > get ( ) . setArg ( idx + + , inputChannels ) ;
2023-12-27 17:26:44 +08:00
MNN_CHECK_CL_SUCCESS ( ret , " setArg ConvLowMemory Kernel Select " ) ;
std : : pair < std : : vector < uint32_t > , int > retTune ;
retTune = localWS2DDefault ( globalWorkSize [ knl_idx ] , maxWorkGroupSize , mOpenCLBackend - > getOpenCLRuntime ( ) , kernelName [ knl_idx ] + info , kernel [ knl_idx ] ) ;
if ( min_cost . first > retTune . second ) {
min_cost . first = retTune . second ;
min_cost . second = knl_idx ;
mLocalWorkSize = { retTune . first [ 0 ] , retTune . first [ 1 ] } ;
}
}
int min_index = min_cost . second ;
mGlobalWorkSize = { globalWorkSize [ min_index ] [ 0 ] , globalWorkSize [ min_index ] [ 1 ] } ;
2024-04-19 11:58:21 +08:00
std : : set < std : : string > buildOption = mResource - > mBuildOptions ;
2024-07-04 11:53:45 +08:00
if ( inputChannels % 4 ! = 0 ) {
buildOption . emplace ( " -DINPUT_CHANNEL_LEAVE " ) ;
}
2024-04-19 11:58:21 +08:00
unit . kernel = mOpenCLBackend - > getOpenCLRuntime ( ) - > buildKernel ( " conv_2d " , kernelName [ min_index ] , buildOption ) ;
2023-12-27 17:26:44 +08:00
uint32_t idx = 0 ;
cl_int ret = CL_SUCCESS ;
2024-04-19 11:58:21 +08:00
ret | = unit . kernel - > get ( ) . setArg ( idx + + , mGlobalWorkSize [ 0 ] ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , mGlobalWorkSize [ 1 ] ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , openCLImage ( input ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , openCLBuffer ( mResource - > mFilter . get ( ) ) ) ;
2024-07-04 11:53:45 +08:00
ret | = unit . kernel - > get ( ) . setArg ( idx + + , * mResource - > dequantScaleOffset . get ( ) ) ;
2024-04-19 11:58:21 +08:00
ret | = unit . kernel - > get ( ) . setArg ( idx + + , openCLImage ( mResource - > mBias . get ( ) ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , openCLImage ( output ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , sizeof ( inputImageShape ) , inputImageShape ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , inputChannelBlocks ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , sizeof ( outputImageShape ) , outputImageShape ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , sizeof ( kernelShape ) , kernelShape ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , sizeof ( strideShape ) , strideShape ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , sizeof ( paddingShape ) , paddingShape ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , sizeof ( dilationShape ) , dilationShape ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , UP_DIV ( width , itemW [ min_index ] ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , UP_DIV ( outputShape . at ( 3 ) , 4 ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , UP_DIV ( height , itemH [ min_index ] ) ) ;
2024-07-04 11:53:45 +08:00
ret | = unit . kernel - > get ( ) . setArg ( idx + + , blockDim ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , inputChannels ) ;
2023-12-27 17:26:44 +08:00
MNN_CHECK_CL_SUCCESS ( ret , " setArg ConvLowMemory " ) ;
2024-04-19 11:58:21 +08:00
mOpenCLBackend - > recordKernel2d ( unit . kernel , mGlobalWorkSize , mLocalWorkSize ) ;
unit . globalWorkSize = { mGlobalWorkSize [ 0 ] , mGlobalWorkSize [ 1 ] } ;
unit . localWorkSize = { mLocalWorkSize [ 0 ] , mLocalWorkSize [ 1 ] } ;
2023-12-27 17:26:44 +08:00
return ;
}
void ConvLowMemoryExecution : : tuneGemmLowMemory ( Tensor * input , Tensor * output ) {
2024-04-19 11:58:21 +08:00
auto & unit = mUnits [ 0 ] ;
2023-12-27 17:26:44 +08:00
std : : vector < int > inputShape = tensorShapeFormat ( input ) ;
std : : vector < int > outputShape = tensorShapeFormat ( output ) ;
auto runTime = ( ( OpenCLBackend * ) backend ( ) ) - > getOpenCLRuntime ( ) ;
const int outChannel = outputShape . at ( 3 ) ;
const int inputChannels = inputShape . at ( 3 ) ;
const int batch = outputShape . at ( 0 ) ;
const int inputChannelBlocks = UP_DIV ( inputChannels , 4 ) ;
const int outputChannelBlocks = UP_DIV ( outChannel , 4 ) ;
2024-07-04 11:53:45 +08:00
const int blockDim = mResource - > mInputChannel / mResource - > mBlockSize ;
2023-12-27 17:26:44 +08:00
std : : string kernelname = " gemm_conv " ;
int global_x = outputChannelBlocks ;
int global_y = batch ;
if ( batch > 1 )
{
kernelname = " gemm_conv_b2 " ;
global_y = UP_DIV ( batch , 2 ) ;
}
2024-04-19 11:58:21 +08:00
std : : string info = std : : to_string ( inputChannels ) + " _ " + std : : to_string ( outChannel ) ;
2024-07-04 11:53:45 +08:00
std : : set < std : : string > buildOption = mResource - > mBuildOptions ;
if ( inputChannels % 4 ! = 0 ) {
buildOption . emplace ( " -DINPUT_CHANNEL_LEAVE " ) ;
}
unit . kernel = mOpenCLBackend - > getOpenCLRuntime ( ) - > buildKernel ( " gemm " , kernelname , buildOption ) ;
2024-04-19 11:58:21 +08:00
uint32_t maxWorkGroupSize = static_cast < uint32_t > ( mOpenCLBackend - > getOpenCLRuntime ( ) - > getMaxWorkGroupSize ( unit . kernel ) ) ;
2023-12-27 17:26:44 +08:00
mGlobalWorkSize = { static_cast < uint32_t > ( global_x ) , static_cast < uint32_t > ( global_y ) } ;
// MNN_PRINT("Kernel is %d.\n", min_index);
uint32_t idx = 0 ;
cl_int ret = CL_SUCCESS ;
2024-04-19 11:58:21 +08:00
ret | = unit . kernel - > get ( ) . setArg ( idx + + , mGlobalWorkSize [ 0 ] ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , mGlobalWorkSize [ 1 ] ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , openCLImage ( input ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , * mResource - > mKernelBuffer . get ( ) ) ;
2024-07-04 11:53:45 +08:00
ret | = unit . kernel - > get ( ) . setArg ( idx + + , * mResource - > dequantScaleOffset . get ( ) ) ;
2024-04-19 11:58:21 +08:00
ret | = unit . kernel - > get ( ) . setArg ( idx + + , openCLImage ( mResource - > mBias . get ( ) ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , openCLImage ( output ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , static_cast < int > ( outputChannelBlocks ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , static_cast < int > ( inputChannelBlocks ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , static_cast < int > ( batch ) ) ;
2024-07-04 11:53:45 +08:00
ret | = unit . kernel - > get ( ) . setArg ( idx + + , static_cast < int > ( blockDim ) ) ;
ret | = unit . kernel - > get ( ) . setArg ( idx + + , static_cast < int > ( inputChannels ) ) ;
2023-12-27 17:26:44 +08:00
MNN_CHECK_CL_SUCCESS ( ret , " setArg gemm_conv " ) ;
2024-04-19 11:58:21 +08:00
mLocalWorkSize = localWS2DDefault ( mGlobalWorkSize , maxWorkGroupSize , mOpenCLBackend - > getOpenCLRuntime ( ) , kernelname + info , unit . kernel ) . first ;
mOpenCLBackend - > recordKernel2d ( unit . kernel , mGlobalWorkSize , mLocalWorkSize ) ;
unit . globalWorkSize = { mGlobalWorkSize [ 0 ] , mGlobalWorkSize [ 1 ] } ;
unit . localWorkSize = { mLocalWorkSize [ 0 ] , mLocalWorkSize [ 1 ] } ;
2023-12-27 17:26:44 +08:00
return ;
}
ConvLowMemoryExecution : : ConvLowMemoryExecution ( const std : : vector < Tensor * > & inputs , const std : : vector < Tensor * > & outputs , const MNN : : Op * op , Backend * backend )
2024-04-19 11:58:21 +08:00
: ConvCommonExecution ( op - > main_as_Convolution2D ( ) , backend ) , CommonExecution ( backend , op ) {
2023-12-27 17:26:44 +08:00
# ifdef LOG_VERBOSE
MNN_PRINT ( " Start ConvLowMemoryExecution init ! \n " ) ;
# endif
2024-04-19 11:58:21 +08:00
auto & unit = mUnits [ 0 ] ;
2023-12-27 17:26:44 +08:00
mOpenCLBackend = static_cast < OpenCLBackend * > ( backend ) ;
const auto * conv2dParams = op - > main_as_Convolution2D ( ) ;
const auto * conv2dCommonParams = conv2dParams - > common ( ) ;
2024-04-19 11:58:21 +08:00
mResource - > mConv2dParams = conv2dParams ;
mResource - > mConv2dCommonParams = conv2dCommonParams ;
2024-02-29 16:21:40 +08:00
mResource - > mStrides = { conv2dCommonParams - > strideY ( ) , conv2dCommonParams - > strideX ( ) } ;
mResource - > mDilations = { conv2dCommonParams - > dilateY ( ) , conv2dCommonParams - > dilateX ( ) } ;
2023-12-27 17:26:44 +08:00
auto padding = ConvolutionCommon : : convolutionPad ( inputs [ 0 ] , outputs [ 0 ] , conv2dCommonParams ) ;
mPaddings [ 0 ] = padding . second ; //padY
mPaddings [ 1 ] = padding . first ; //padX
2024-02-29 16:21:40 +08:00
mResource - > mKernelWidth = conv2dCommonParams - > kernelX ( ) ;
mResource - > mKernelHeight = conv2dCommonParams - > kernelY ( ) ;
2024-04-19 11:58:21 +08:00
mResource - > mOutputChannel = conv2dCommonParams - > outputCount ( ) ;
2023-12-27 17:26:44 +08:00
std : : shared_ptr < ConvolutionCommon : : Int8Common > quanCommon ;
// set mDequantScale, mDequantOffset, mFilterDataPtr
// prepare mDequantScale mDequantOffset mFilterDataPtr
getInfoFromOpLowMemory ( quanCommon ) ;
//select opt conv method
2024-07-04 11:53:45 +08:00
if ( mResource - > mKernelHeight = = mResource - > mKernelWidth & & mResource - > mKernelHeight = = 1 & & mResource - > mStrides [ 0 ] = = 1 & & mResource - > mStrides [ 1 ] = = 1 & & mPaddings [ 0 ] = = 0 & & mPaddings [ 1 ] = = 0 ) {
2024-02-29 16:21:40 +08:00
// set mKernelBuffer for 1x1 case
// At first, set packCout equal to 4
set1x1WeightLowMemory ( 4 , 4 , mFilterDataPtr , quanCommon ) ;
2024-07-04 11:53:45 +08:00
mResource - > mConv1x1Opt = true ;
2024-02-29 16:21:40 +08:00
} else {
// set mFilter for not 1x1 case
setGeneralWeightLowMemory ( mFilterDataPtr , quanCommon ) ;
}
2023-12-27 17:26:44 +08:00
// Create Kernel
2024-04-19 11:58:21 +08:00
mResource - > mBuildOptions . emplace ( " -DBIAS " ) ;
2023-12-27 17:26:44 +08:00
if ( conv2dCommonParams - > relu ( ) ) {
2024-04-19 11:58:21 +08:00
mResource - > mBuildOptions . emplace ( " -DRELU " ) ;
2023-12-27 17:26:44 +08:00
} else if ( conv2dCommonParams - > relu6 ( ) ) {
2024-04-19 11:58:21 +08:00
mResource - > mBuildOptions . emplace ( " -DRELU6 " ) ;
2023-12-27 17:26:44 +08:00
}
if ( mNumQuantBit = = 8 ) {
// int8 case
2024-04-19 11:58:21 +08:00
mResource - > mBuildOptions . emplace ( " -DUSE_LOW_BIT_WEIGHT_INT8 " ) ;
2023-12-27 17:26:44 +08:00
} else if ( mNumQuantBit = = 4 ) {
// int4 case
2024-04-19 11:58:21 +08:00
mResource - > mBuildOptions . emplace ( " -DUSE_LOW_BIT_WEIGHT_INT4 " ) ;
2023-12-27 17:26:44 +08:00
} else { /* More types to be supported. */ }
# ifdef LOG_VERBOSE
MNN_PRINT ( " end ConvExecution init ! \n " ) ;
# endif
}
2024-04-19 11:58:21 +08:00
ConvLowMemoryExecution : : ConvLowMemoryExecution ( std : : shared_ptr < ConvResource > resource , const MNN : : Op * op , Backend * backend )
: ConvCommonExecution ( backend ) , CommonExecution ( backend , op ) {
2023-12-27 17:26:44 +08:00
mResource = resource ;
2024-02-29 16:21:40 +08:00
const auto * conv2dParams = op - > main_as_Convolution2D ( ) ;
const auto * conv2dCommonParams = conv2dParams - > common ( ) ;
2024-04-19 11:58:21 +08:00
mResource - > mConv2dParams = conv2dParams ;
mResource - > mConv2dCommonParams = conv2dCommonParams ;
2023-12-27 17:26:44 +08:00
}
ConvLowMemoryExecution : : ~ ConvLowMemoryExecution ( ) {
// Do nothing
}
bool ConvLowMemoryExecution : : onClone ( Backend * bn , const Op * op , Execution * * dst ) {
if ( ! mValid ) {
return false ;
}
if ( nullptr = = dst ) {
return true ;
}
* dst = new ConvLowMemoryExecution ( mResource , op , bn ) ;
return true ;
}
2024-04-19 11:58:21 +08:00
ErrorCode ConvLowMemoryExecution : : onEncode ( const std : : vector < Tensor * > & inputs , const std : : vector < Tensor * > & outputs ) {
2023-12-27 17:26:44 +08:00
# ifdef LOG_VERBOSE
MNN_PRINT ( " Start ConvExecution onResize ! \n " ) ;
# endif
2024-04-19 11:58:21 +08:00
mUnits . resize ( 1 ) ;
2023-12-27 17:26:44 +08:00
auto input = inputs [ 0 ] ;
auto output = outputs [ 0 ] ;
2024-04-19 11:58:21 +08:00
auto padding = ConvolutionCommon : : convolutionPad ( input , output , mResource - > mConv2dCommonParams ) ;
2024-02-29 16:21:40 +08:00
mPaddings [ 0 ] = padding . second ; //padY
mPaddings [ 1 ] = padding . first ; //padX
2024-07-04 11:53:45 +08:00
mResource - > gemmOpt = ( mResource - > mConv1x1Opt & & inputs [ 0 ] - > width ( ) = = 1 & & inputs [ 0 ] - > height ( ) = = 1 ) ;
if ( mResource - > gemmOpt ) {
2023-12-27 17:26:44 +08:00
tuneGemmLowMemory ( input , output ) ;
2024-07-04 11:53:45 +08:00
} else if ( mResource - > mConv1x1Opt ) {
tune1x1CaseLowMemory ( input , output ) ;
2023-12-27 17:26:44 +08:00
} else {
tuneGeneralCaseLowMemory ( input , output ) ;
}
# ifdef LOG_VERBOSE
MNN_PRINT ( " end ConvExecution onResize ! \n " ) ;
# endif
return NO_ERROR ;
}
} // namespace OpenCL
} // namespace MNN
2024-04-19 11:58:21 +08:00
# endif /* MNN_OPENCL_BUFFER_CLOSED */
2023-12-27 17:26:44 +08:00
# endif /* MNN_LOW_MEMORY */