2019-12-27 22:16:57 +08:00
//
// NeuralNetWorkOp.cpp
// MNN
//
// Created by MNN on 2019/06/27.
// Copyright © 2018, Alibaba Group Holding Limited
//
# include <algorithm>
# include <map>
# include <numeric>
2021-04-28 18:02:10 +08:00
# include <cmath>
2019-12-27 22:16:57 +08:00
# include <MNN/expr/ExprCreator.hpp>
# include <MNN/MNNDefine.h>
# include "MNN_generated.h"
# include "Utils.hpp"
namespace MNN {
namespace Express {
static PadMode _convertPadMode ( PaddingMode mode ) {
switch ( mode ) {
case CAFFE :
return PadMode_CAFFE ;
case VALID :
return PadMode_VALID ;
case SAME :
return PadMode_SAME ;
default :
break ;
}
return PadMode_CAFFE ;
}
static PoolPadType _convertPoollingPadMode ( PaddingMode mode ) {
switch ( mode ) {
case CAFFE :
return PoolPadType_CAFFE ;
case VALID :
return PoolPadType_VALID ;
case SAME :
return PoolPadType_SAME ;
default :
break ;
}
return PoolPadType_CAFFE ;
}
2020-01-15 13:33:47 +08:00
/*create a input variable.
Args :
shape : A vector , the shape of the variable .
data_format : A enum , NCHW / NHWC / NC4HW4 is allowed .
dtype : The type of the elements of the resulting variable .
Returns :
output : A variable .
*/
VARP _Input ( INTS shape , Dimensionformat data_format , halide_type_t dtype ) {
2019-12-27 22:16:57 +08:00
Variable : : Info info ;
2020-01-15 13:33:47 +08:00
info . dim = std : : move ( shape ) ;
info . order = data_format ;
info . type = dtype ;
2020-11-05 16:41:56 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( info ) , nullptr , VARP : : INPUT ) ) ) ;
2019-12-27 22:16:57 +08:00
}
VARP _Scalar ( const void * ptr , halide_type_t type ) {
Variable : : Info info ;
info . dim = { } ;
info . order = NHWC ;
info . type = type ;
2020-11-05 16:41:56 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( info ) , ptr , VARP : : CONSTANT ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*create a constant variable.
Args :
ptr : A pointer . Indicates the values .
shape : A vector , the shape of the variable .
format : A enum , NCHW / NHWC / NC4HW4 is allowed .
type : The type of the elements of the resulting variable .
Returns :
output : A constant variable .
*/
VARP _Const ( const void * ptr , INTS shape , Dimensionformat format , halide_type_t type ) {
2019-12-27 22:16:57 +08:00
Variable : : Info info ;
2020-01-15 13:33:47 +08:00
info . dim = std : : move ( shape ) ;
2019-12-27 22:16:57 +08:00
info . order = format ;
info . type = type ;
2020-11-05 16:41:56 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( info ) , ptr , VARP : : CONSTANT ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
VARP _Const ( float value , INTS shape , Dimensionformat format ) {
2019-12-27 22:16:57 +08:00
Variable : : Info info ;
2020-01-15 13:33:47 +08:00
info . dim = std : : move ( shape ) ;
2019-12-27 22:16:57 +08:00
info . order = format ;
info . type = halide_type_of < float > ( ) ;
2020-01-15 13:33:47 +08:00
info . syncSize ( ) ;
std : : vector < float > values ( info . size ) ;
for ( int i = 0 ; i < info . size ; + + i ) {
values [ i ] = value ;
}
2020-11-05 16:41:56 +08:00
auto ptr = ( void * ) values . data ( ) ;
return ( Variable : : create ( Expr : : create ( std : : move ( info ) , ptr , VARP : : CONSTANT ) ) ) ;
2019-12-27 22:16:57 +08:00
}
VARP _TrainableParam ( const void * ptr , INTS dims , Dimensionformat format , halide_type_t type ) {
auto v = _Const ( ptr , dims , format , type ) ;
v . fix ( VARP : : TRAINABLE ) ;
return v ;
}
VARP _TrainableParam ( float value , INTS dims , Dimensionformat format ) {
auto v = _Const ( value , dims , format ) ;
v . fix ( VARP : : TRAINABLE ) ;
return v ;
}
2020-11-05 16:41:56 +08:00
VARP _InnerProduct ( std : : vector < float > & & weight , std : : vector < float > & & bias , VARP x , INTS outputShape ) {
std : : unique_ptr < OpT > ipOp ( new OpT ) ;
ipOp - > type = OpType_InnerProduct ;
ipOp - > main . type = OpParameter_InnerProduct ;
ipOp - > main . value = new InnerProductT ;
auto ipParam = ipOp - > main . AsInnerProduct ( ) ;
ipParam - > outputCount = outputShape [ 1 ] ;
if ( ! bias . empty ( ) ) {
ipParam - > biasTerm = 1 ;
}
2020-12-15 14:12:35 +08:00
ipParam - > weightSize = ( int ) weight . size ( ) ;
2020-11-05 16:41:56 +08:00
ipParam - > weight = std : : move ( weight ) ;
ipParam - > bias = std : : move ( bias ) ;
return ( Variable : : create ( Expr : : create ( ipOp . get ( ) , { x } ) ) ) ;
}
2019-12-27 22:16:57 +08:00
VARP _Conv ( VARP weight , VARP bias , VARP x , PaddingMode pad , INTS stride , INTS dilate , int group , INTS pads ) {
std : : unique_ptr < OpT > convOp ( new OpT ) ;
convOp - > type = OpType_Convolution ;
auto shape = weight - > getInfo ( ) ;
if ( NHWC = = shape - > order ) {
weight = _Transpose ( weight , { 0 , 3 , 1 , 2 } ) ;
shape = weight - > getInfo ( ) ;
}
2020-02-26 09:57:17 +08:00
auto channel = std : : vector < int > { shape - > dim [ 0 ] , shape - > dim [ 1 ] } ;
2019-12-27 22:16:57 +08:00
auto kernelSize = std : : vector < int > { shape - > dim [ 3 ] , shape - > dim [ 2 ] } ;
if ( 1 = = channel [ 1 ] & & channel [ 0 ] = = group ) {
convOp - > type = OpType_ConvolutionDepthwise ;
channel [ 1 ] = group ;
}
convOp - > main . type = OpParameter_Convolution2D ;
convOp - > main . value = new Convolution2DT ;
auto conv2D = convOp - > main . AsConvolution2D ( ) ;
conv2D - > common . reset ( new Convolution2DCommonT ) ;
2020-02-26 09:57:17 +08:00
if ( pads . size ( ) = = 2 ) {
conv2D - > common - > padX = pads [ 0 ] ;
conv2D - > common - > padY = pads [ 1 ] ;
} else {
conv2D - > common - > pads = std : : move ( pads ) ;
}
2019-12-27 22:16:57 +08:00
conv2D - > common - > padMode = _convertPadMode ( pad ) ;
conv2D - > common - > strideX = stride [ 0 ] ;
conv2D - > common - > strideY = stride [ 1 ] ;
conv2D - > common - > group = group ;
2020-02-26 09:57:17 +08:00
conv2D - > common - > outputCount = channel [ 0 ] ;
conv2D - > common - > inputCount = channel [ 1 ] ;
2019-12-27 22:16:57 +08:00
conv2D - > common - > dilateX = dilate [ 0 ] ;
conv2D - > common - > dilateY = dilate [ 1 ] ;
conv2D - > common - > kernelX = kernelSize [ 0 ] ;
conv2D - > common - > kernelY = kernelSize [ 1 ] ;
2020-01-15 13:33:47 +08:00
if ( nullptr = = bias ) {
return ( Variable : : create ( Expr : : create ( convOp . get ( ) , { x , weight } ) ) ) ;
}
2019-12-27 22:16:57 +08:00
return ( Variable : : create ( Expr : : create ( convOp . get ( ) , { x , weight , bias } ) ) ) ;
}
VARP _Conv ( std : : vector < float > & & weight , std : : vector < float > & & bias , VARP x , INTS channel , INTS kernelSize ,
2020-02-26 09:57:17 +08:00
PaddingMode pad , INTS stride , INTS dilate , int group , INTS pads , bool relu , bool relu6 ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > convOp ( new OpT ) ;
convOp - > type = OpType_Convolution ;
if ( channel [ 0 ] = = channel [ 1 ] & & channel [ 0 ] = = group ) {
convOp - > type = OpType_ConvolutionDepthwise ;
}
convOp - > main . type = OpParameter_Convolution2D ;
convOp - > main . value = new Convolution2DT ;
auto conv2D = convOp - > main . AsConvolution2D ( ) ;
conv2D - > common . reset ( new Convolution2DCommonT ) ;
conv2D - > common - > padMode = _convertPadMode ( pad ) ;
2020-02-26 09:57:17 +08:00
if ( pads . size ( ) = = 2 ) {
conv2D - > common - > padX = pads [ 0 ] ;
conv2D - > common - > padY = pads [ 1 ] ;
} else {
conv2D - > common - > pads = std : : move ( pads ) ;
}
2019-12-27 22:16:57 +08:00
conv2D - > common - > strideX = stride [ 0 ] ;
conv2D - > common - > strideY = stride [ 1 ] ;
conv2D - > common - > group = group ;
conv2D - > common - > outputCount = channel [ 1 ] ;
conv2D - > common - > inputCount = channel [ 0 ] ;
conv2D - > common - > dilateX = dilate [ 0 ] ;
conv2D - > common - > dilateY = dilate [ 1 ] ;
conv2D - > common - > kernelX = kernelSize [ 0 ] ;
conv2D - > common - > kernelY = kernelSize [ 1 ] ;
2020-02-26 09:57:17 +08:00
conv2D - > common - > relu6 = relu6 ;
conv2D - > common - > relu = relu ;
2019-12-27 22:16:57 +08:00
MNN_ASSERT ( weight . size ( ) = = channel [ 1 ] * ( channel [ 0 ] / group ) * kernelSize [ 0 ] * kernelSize [ 1 ] ) ;
conv2D - > weight = std : : move ( weight ) ;
MNN_ASSERT ( bias . size ( ) = = channel [ 1 ] ) ;
conv2D - > bias = std : : move ( bias ) ;
return ( Variable : : create ( Expr : : create ( convOp . get ( ) , { x } ) ) ) ;
}
2020-03-24 14:11:08 +08:00
VARP _Conv ( std : : vector < int8_t > & & weight , std : : vector < float > & & bias , VARP x , INTS channel , INTS kernelSize ,
2020-11-05 16:41:56 +08:00
PaddingMode pad , INTS stride , INTS dilate , int group , INTS pads , bool relu , bool relu6 , int nbits ) {
2020-03-23 09:32:02 +08:00
std : : unique_ptr < OpT > convOp ( new OpT ) ;
convOp - > type = OpType_Convolution ;
if ( channel [ 0 ] = = channel [ 1 ] & & channel [ 0 ] = = group ) {
convOp - > type = OpType_ConvolutionDepthwise ;
}
convOp - > main . type = OpParameter_Convolution2D ;
convOp - > main . value = new Convolution2DT ;
auto conv2D = convOp - > main . AsConvolution2D ( ) ;
conv2D - > common . reset ( new Convolution2DCommonT ) ;
conv2D - > common - > padMode = _convertPadMode ( pad ) ;
if ( pads . size ( ) = = 2 ) {
conv2D - > common - > padX = pads [ 0 ] ;
conv2D - > common - > padY = pads [ 1 ] ;
} else {
conv2D - > common - > pads = std : : move ( pads ) ;
}
conv2D - > common - > strideX = stride [ 0 ] ;
conv2D - > common - > strideY = stride [ 1 ] ;
conv2D - > common - > group = group ;
conv2D - > common - > outputCount = channel [ 1 ] ;
conv2D - > common - > inputCount = channel [ 0 ] ;
conv2D - > common - > dilateX = dilate [ 0 ] ;
conv2D - > common - > dilateY = dilate [ 1 ] ;
conv2D - > common - > kernelX = kernelSize [ 0 ] ;
conv2D - > common - > kernelY = kernelSize [ 1 ] ;
conv2D - > common - > relu6 = relu6 ;
conv2D - > common - > relu = relu ;
2020-03-24 20:47:37 +08:00
MNN_ASSERT ( weight . size ( ) / 2 = = channel [ 1 ] * ( channel [ 0 ] / group ) * kernelSize [ 0 ] * kernelSize [ 1 ] ) ;
2020-03-23 09:32:02 +08:00
conv2D - > quanParameter . reset ( new IDSTQuanT ) ;
conv2D - > quanParameter - > type = 3 ;
2020-03-24 14:11:08 +08:00
conv2D - > quanParameter - > buffer = std : : move ( weight ) ;
2020-03-23 09:32:02 +08:00
conv2D - > weight . clear ( ) ;
MNN_ASSERT ( bias . size ( ) = = channel [ 1 ] ) ;
conv2D - > bias = std : : move ( bias ) ;
return ( Variable : : create ( Expr : : create ( convOp . get ( ) , { x } ) ) ) ;
}
2019-12-27 22:16:57 +08:00
VARP _Conv ( float weight , float bias , VARP x , INTS channel , INTS kernelSize , PaddingMode pad , INTS stride , INTS dilate ,
int group ) {
std : : unique_ptr < OpT > convOp ( new OpT ) ;
convOp - > type = OpType_Convolution ;
if ( channel [ 0 ] = = channel [ 1 ] & & channel [ 0 ] = = group ) {
convOp - > type = OpType_ConvolutionDepthwise ;
}
convOp - > main . type = OpParameter_Convolution2D ;
convOp - > main . value = new Convolution2DT ;
auto conv2D = convOp - > main . AsConvolution2D ( ) ;
conv2D - > common . reset ( new Convolution2DCommonT ) ;
conv2D - > common - > padMode = _convertPadMode ( pad ) ;
conv2D - > common - > strideX = stride [ 0 ] ;
conv2D - > common - > strideY = stride [ 1 ] ;
conv2D - > common - > group = group ;
conv2D - > common - > outputCount = channel [ 1 ] ;
conv2D - > common - > inputCount = channel [ 0 ] ;
conv2D - > common - > dilateX = dilate [ 0 ] ;
conv2D - > common - > dilateY = dilate [ 1 ] ;
conv2D - > common - > kernelX = kernelSize [ 0 ] ;
conv2D - > common - > kernelY = kernelSize [ 1 ] ;
conv2D - > weight . resize ( channel [ 1 ] * ( channel [ 0 ] / group ) * kernelSize [ 0 ] * kernelSize [ 1 ] ) ;
std : : fill ( conv2D - > weight . begin ( ) , conv2D - > weight . end ( ) , weight ) ;
conv2D - > bias . resize ( channel [ 1 ] ) ;
std : : fill ( conv2D - > bias . begin ( ) , conv2D - > bias . end ( ) , bias ) ;
return ( Variable : : create ( Expr : : create ( convOp . get ( ) , { x } ) ) ) ;
}
VARP _Deconv ( VARP weight , VARP bias , VARP x , PaddingMode pad , INTS stride , INTS dilate , int group , INTS pads ) {
std : : unique_ptr < OpT > convOp ( new OpT ) ;
convOp - > type = OpType_Deconvolution ;
auto shape = weight - > getInfo ( ) ;
auto channel = std : : vector < int > { shape - > dim [ 1 ] , shape - > dim [ 0 ] } ;
auto kernelSize = std : : vector < int > { shape - > dim [ 3 ] , shape - > dim [ 2 ] } ;
2020-02-26 09:57:17 +08:00
if ( channel [ 1 ] * channel [ 0 ] = = group ) {
2019-12-27 22:16:57 +08:00
convOp - > type = OpType_DeconvolutionDepthwise ;
channel [ 1 ] = group ;
2020-02-26 09:57:17 +08:00
channel [ 0 ] = group ;
2019-12-27 22:16:57 +08:00
}
convOp - > main . type = OpParameter_Convolution2D ;
convOp - > main . value = new Convolution2DT ;
auto conv2D = convOp - > main . AsConvolution2D ( ) ;
conv2D - > common . reset ( new Convolution2DCommonT ) ;
2020-02-26 09:57:17 +08:00
if ( pads . size ( ) = = 2 ) {
conv2D - > common - > padX = pads [ 0 ] ;
conv2D - > common - > padY = pads [ 1 ] ;
} else {
conv2D - > common - > pads = std : : move ( pads ) ;
}
conv2D - > common - > padMode = _convertPadMode ( pad ) ;
2019-12-27 22:16:57 +08:00
conv2D - > common - > strideX = stride [ 0 ] ;
conv2D - > common - > strideY = stride [ 1 ] ;
conv2D - > common - > group = group ;
conv2D - > common - > outputCount = channel [ 0 ] ;
conv2D - > common - > inputCount = channel [ 1 ] ;
conv2D - > common - > dilateX = dilate [ 0 ] ;
conv2D - > common - > dilateY = dilate [ 1 ] ;
conv2D - > common - > kernelX = kernelSize [ 0 ] ;
conv2D - > common - > kernelY = kernelSize [ 1 ] ;
if ( nullptr ! = bias ) {
return ( Variable : : create ( Expr : : create ( std : : move ( convOp ) , { x , weight , bias } ) ) ) ;
}
return ( Variable : : create ( Expr : : create ( std : : move ( convOp ) , { x , weight } ) ) ) ;
}
2020-11-05 16:41:56 +08:00
VARP _Deconv ( std : : vector < float > & & weight , std : : vector < float > & & bias , VARP x , INTS channel , INTS kernelSize ,
PaddingMode pad , INTS stride , INTS dilate , int group , INTS pads , bool relu , bool relu6 ) {
std : : unique_ptr < OpT > convOp ( new OpT ) ;
convOp - > type = OpType_Deconvolution ;
if ( channel [ 0 ] = = channel [ 1 ] & & channel [ 0 ] = = group ) {
convOp - > type = OpType_DeconvolutionDepthwise ;
}
convOp - > main . type = OpParameter_Convolution2D ;
convOp - > main . value = new Convolution2DT ;
auto conv2D = convOp - > main . AsConvolution2D ( ) ;
conv2D - > common . reset ( new Convolution2DCommonT ) ;
conv2D - > common - > padMode = _convertPadMode ( pad ) ;
if ( pads . size ( ) = = 2 ) {
conv2D - > common - > padX = pads [ 0 ] ;
conv2D - > common - > padY = pads [ 1 ] ;
} else {
conv2D - > common - > pads = std : : move ( pads ) ;
}
conv2D - > common - > strideX = stride [ 0 ] ;
conv2D - > common - > strideY = stride [ 1 ] ;
conv2D - > common - > group = group ;
conv2D - > common - > outputCount = channel [ 1 ] ;
conv2D - > common - > inputCount = channel [ 0 ] ;
conv2D - > common - > dilateX = dilate [ 0 ] ;
conv2D - > common - > dilateY = dilate [ 1 ] ;
conv2D - > common - > kernelX = kernelSize [ 0 ] ;
conv2D - > common - > kernelY = kernelSize [ 1 ] ;
conv2D - > common - > relu6 = relu6 ;
conv2D - > common - > relu = relu ;
MNN_ASSERT ( weight . size ( ) = = channel [ 1 ] * ( channel [ 0 ] / group ) * kernelSize [ 0 ] * kernelSize [ 1 ] ) ;
conv2D - > weight = std : : move ( weight ) ;
MNN_ASSERT ( bias . size ( ) = = channel [ 1 ] ) ;
conv2D - > bias = std : : move ( bias ) ;
return ( Variable : : create ( Expr : : create ( convOp . get ( ) , { x } ) ) ) ;
}
2019-12-27 22:16:57 +08:00
static VARP _Pool ( VARP x , INTS kernel , INTS stride , PoolType type , PaddingMode pad , INTS pads ) {
std : : unique_ptr < OpT > pool ( new OpT ) ;
pool - > type = OpType_Pooling ;
pool - > main . type = OpParameter_Pool ;
pool - > main . value = new PoolT ;
if ( kernel [ 0 ] = = - 1 & & kernel [ 1 ] = = - 1 ) {
pool - > main . AsPool ( ) - > isGlobal = true ;
}
pool - > main . AsPool ( ) - > padX = 0 ;
pool - > main . AsPool ( ) - > padY = 0 ;
if ( pads . size ( ) > = 2 ) {
pool - > main . AsPool ( ) - > padX = pads [ 0 ] ;
pool - > main . AsPool ( ) - > padY = pads [ 1 ] ;
}
pool - > main . AsPool ( ) - > padType = _convertPoollingPadMode ( pad ) ;
pool - > main . AsPool ( ) - > kernelX = kernel [ 0 ] ;
pool - > main . AsPool ( ) - > kernelY = kernel [ 1 ] ;
pool - > main . AsPool ( ) - > strideX = stride [ 0 ] ;
pool - > main . AsPool ( ) - > strideY = stride [ 1 ] ;
pool - > main . AsPool ( ) - > type = type ;
return ( Variable : : create ( Expr : : create ( pool . get ( ) , { x } ) ) ) ;
}
VARP _AvePool ( VARP x , INTS kernel , INTS stride , PaddingMode pad , INTS pads ) {
return _Pool ( x , kernel , stride , PoolType_AVEPOOL , pad , pads ) ;
}
VARP _MaxPool ( VARP x , INTS kernel , INTS stride , PaddingMode pad , INTS pads ) {
return _Pool ( x , kernel , stride , PoolType_MAXPOOL , pad , pads ) ;
}
2020-01-15 13:33:47 +08:00
/*Reshapes a variable.
Args :
x : A variable .
shape : A vector , the shape of the target variable .
original_format : A enum , only NCHW / NHWC is allowed , NC4HW4 is not allowed ,
as it provides additional information ( x comes from NCHW or NHWC ) When x is NC4HW4 .
Returns :
output : A variable with the same type as ` x ` .
*/
VARP _Reshape ( VARP x , INTS shape , Dimensionformat original_format ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > reshape ( new OpT ) ;
reshape - > type = OpType_Reshape ;
reshape - > main . type = OpParameter_Reshape ;
reshape - > main . value = new ReshapeT ;
2020-01-15 13:33:47 +08:00
reshape - > main . AsReshape ( ) - > dims = shape ;
reshape - > main . AsReshape ( ) - > dimType = ( MNN_DATA_FORMAT ) Utils : : convertFormat ( original_format ) ;
2019-12-27 22:16:57 +08:00
return ( Variable : : create ( Expr : : create ( reshape . get ( ) , { x } ) ) ) ;
}
2020-01-15 13:33:47 +08:00
/*Reshapes a variable.
Args :
x : A variable .
shape : A variable , the shape of the target variable .
Returns :
output : A variable with the same type as ` x ` .
*/
2019-12-27 22:16:57 +08:00
VARP _Reshape ( VARP x , VARP shape ) {
2020-01-15 13:33:47 +08:00
MNN_ASSERT ( nullptr ! = x ) ;
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > reshape ( new OpT ) ;
reshape - > type = OpType_Reshape ;
reshape - > main . type = OpParameter_Reshape ;
reshape - > main . value = new ReshapeT ;
2021-04-08 15:34:23 +08:00
if ( nullptr ! = x - > getInfo ( ) ) {
reshape - > main . AsReshape ( ) - > dimType = ( MNN_DATA_FORMAT ) Utils : : convertFormat ( x - > getInfo ( ) - > order ) ;
} else {
reshape - > main . AsReshape ( ) - > dimType = MNN_DATA_FORMAT_NHWC ;
}
2019-12-27 22:16:57 +08:00
return ( Variable : : create ( Expr : : create ( reshape . get ( ) , { x , shape } ) ) ) ;
}
VARP _Scale ( VARP x , int channels , std : : vector < float > & & scales , std : : vector < float > & & bias ) {
std : : unique_ptr < OpT > scale ( new OpT ) ;
scale - > type = OpType_Scale ;
scale - > main . type = OpParameter_Scale ;
scale - > main . value = new ScaleT ;
scale - > main . AsScale ( ) - > channels = channels ;
scale - > main . AsScale ( ) - > scaleData = std : : move ( scales ) ;
scale - > main . AsScale ( ) - > biasData = std : : move ( bias ) ;
return ( Variable : : create ( Expr : : create ( std : : move ( scale ) , { x } ) ) ) ;
}
2020-01-15 13:33:47 +08:00
/*Given an input value x, it computes the output as x if x > 0 and slope * x if x <= 0.
Args :
x : A variable .
slope : A float , a positive float value , it leakes the negative part by multiplying with ` slope ` rather than setting it to 0.0f .
Returns :
output : A variable with the same type as ` x ` .
*/
2019-12-27 22:16:57 +08:00
VARP _Relu ( VARP x , float slope ) {
std : : unique_ptr < OpT > relu ( new OpT ) ;
relu - > type = OpType_ReLU ;
relu - > main . type = OpParameter_Relu ;
relu - > main . value = new ReluT ;
relu - > main . AsRelu ( ) - > slope = slope ;
return ( Variable : : create ( Expr : : create ( relu . get ( ) , { x } ) ) ) ;
}
2021-04-08 15:34:23 +08:00
/*Given an input value x, it computes Rectified Linear 6: min(max(x, 0), 6).
2020-01-15 13:33:47 +08:00
Args :
x : A variable .
Returns :
output : A variable with the same type as ` x ` .
*/
2020-11-05 16:41:56 +08:00
VARP _Relu6 ( VARP x , float minValue , float maxValue ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > relu ( new OpT ) ;
relu - > type = OpType_ReLU6 ;
2020-11-05 16:41:56 +08:00
relu - > main . value = new Relu6T ;
relu - > main . type = OpParameter_Relu6 ;
relu - > main . AsRelu6 ( ) - > maxValue = maxValue ;
relu - > main . AsRelu6 ( ) - > minValue = minValue ;
2019-12-27 22:16:57 +08:00
return ( Variable : : create ( Expr : : create ( relu . get ( ) , { x } ) ) ) ;
}
2020-01-15 13:33:47 +08:00
/*Given an input value x, it computes the output as x if x > 0 and slopes * x if x <= 0.
Args :
x : A variable , must be 4 - D with NC4HW4 format .
slopes : A vector , has save size as x .
Returns :
output : A variable with the same type as ` x ` .
*/
2019-12-27 22:16:57 +08:00
VARP _PRelu ( VARP x , std : : vector < float > & & slopes ) {
std : : unique_ptr < OpT > prelu ( new OpT ) ;
prelu - > type = OpType_PReLU ;
prelu - > main . type = OpParameter_PRelu ;
prelu - > main . value = new PReluT ;
prelu - > main . AsPRelu ( ) - > slope = slopes ;
2021-01-06 16:29:37 +08:00
prelu - > main . AsPRelu ( ) - > slopeCount = ( int ) slopes . size ( ) ;
2019-12-27 22:16:57 +08:00
return ( Variable : : create ( Expr : : create ( prelu . get ( ) , { x } ) ) ) ;
}
2020-01-15 13:33:47 +08:00
/*Computes softmax activations.
Args :
logits : A non - empty variable . Must be Halide_Type_Float .
axis : The dimension softmax would be performed on . The default is - 1 which indicates the last dimension .
Returns :
output : A variable with the same type as ` logits ` .
*/
VARP _Softmax ( VARP logits , int axis ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > softmax ( new OpT ) ;
softmax - > type = OpType_Softmax ;
softmax - > main . type = OpParameter_Axis ;
softmax - > main . value = new AxisT ;
softmax - > main . AsAxis ( ) - > axis = axis ;
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( softmax . get ( ) , { logits } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Computes softplus: log(exp(features) + 1).
Args :
features : A variable . Must be Halide_Type_Float .
Returns :
A variable with the same type as ` features ` .
*/
VARP _Softplus ( VARP features ) {
return _Log ( _Add ( _Exp ( features ) , _Const ( 1 ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Computes softsign: features / (abs(features) + 1).
Args :
features : A variable . Must be Halide_Type_Float .
Returns :
A variable with the same type as ` features ` .
*/
VARP _Softsign ( VARP features ) {
return _Divide ( features , _Add ( _Abs ( features ) , _Const ( 1 ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Concatenates variables along one dimension.
Args :
values : A list of variables a single variable .
axis : A int . Dimension along which to concatenate .
Must be in the range [ - rank ( values ) , rank ( values ) ) .
As in Python , indexing for axis is 0 - based .
Positive axis in the rage of [ 0 , rank ( values ) ) refers to axis - th dimension .
And negative axis refers to axis + rank ( values ) - th dimension .
Returns :
A variable resulting from concatenation of the input variables .
*/
VARP _Concat ( VARPS values , int axis ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > concat ( new OpT ) ;
concat - > type = OpType_Concat ;
concat - > main . type = OpParameter_Axis ;
concat - > main . value = new AxisT ;
concat - > main . AsAxis ( ) - > axis = axis ;
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( concat . get ( ) , values ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Convert a variable to another format(possibily added after `input`).
Args :
input : A variable .
format : The target format .
Returns :
A variable . If ` input ` is already ` format ` , then return ` input ` directly , otherwize add a variable after ` input ` with ` format ` .
*/
VARP _Convert ( VARP input , Dimensionformat format ) {
if ( nullptr ! = input - > getInfo ( ) ) {
auto source = input - > getInfo ( ) - > order ;
if ( source = = format ) {
return input ;
2019-12-27 22:16:57 +08:00
}
}
2020-01-15 13:33:47 +08:00
std : : unique_ptr < OpT > convert ( new OpT ) ;
2019-12-27 22:16:57 +08:00
convert - > type = OpType_ConvertTensor ;
convert - > main . type = OpParameter_TensorConvertInfo ;
convert - > main . value = new TensorConvertInfoT ;
2020-01-15 13:33:47 +08:00
convert - > main . AsTensorConvertInfo ( ) - > dest = ( MNN_DATA_FORMAT ) Utils : : convertFormat ( format ) ;
return ( Variable : : create ( Expr : : create ( convert . get ( ) , { input } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Splits a variable value into a list of sub variables.
Args :
value : The variable to split .
size_splits : A vector , a 1 - D integer containing the sizes of each output variable along axis .
axis : A int , the dimension along which to split . Must be in the range [ - rank ( value ) , rank ( value ) ) . Defaults to 0
Returns :
A list of variables .
*/
std : : vector < VARP > _Split ( VARP value , INTS size_splits , int axis ) {
MNN_ASSERT ( size_splits . size ( ) > = 1 ) ;
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_Slice ;
op - > main . type = OpParameter_Slice ;
op - > main . value = new SliceT ;
op - > main . AsSlice ( ) - > axis = axis ;
op - > main . AsSlice ( ) - > sourceType = NetSource_TENSORFLOW ;
2020-01-15 13:33:47 +08:00
op - > main . AsSlice ( ) - > slicePoints = size_splits ;
2019-12-27 22:16:57 +08:00
2020-01-15 13:33:47 +08:00
int slices = size_splits . size ( ) = = 1 ? size_splits [ 0 ] : ( int ) size_splits . size ( ) ;
EXPRP expr = Expr : : create ( std : : move ( op ) , { value } , slices ) ;
2019-12-27 22:16:57 +08:00
std : : vector < VARP > res ;
for ( int i = 0 ; i < slices ; + + i ) {
res . emplace_back ( Variable : : create ( expr , i ) ) ;
}
return res ;
}
VARP _Slice ( VARP x , VARP starts , VARP sizes ) {
std : : unique_ptr < OpT > slice ( new OpT ) ;
slice - > type = OpType_SliceTf ;
return ( Variable : : create ( Expr : : create ( slice . get ( ) , { x , starts , sizes } ) ) ) ;
}
2020-03-23 09:32:02 +08:00
VARP _StridedSlice ( VARP input , VARP begin , VARP end , VARP strided , int32_t beginMask ,
2019-12-27 22:16:57 +08:00
int32_t endMask , int32_t ellipsisMask , int32_t newAxisMask , int32_t shrinkAxisMask ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_StridedSlice ;
op - > main . type = OpParameter_StridedSliceParam ;
op - > main . value = new StridedSliceParamT ;
2020-03-23 09:32:02 +08:00
op - > main . AsStridedSliceParam ( ) - > T = DataType_DT_FLOAT ;
2019-12-27 22:16:57 +08:00
op - > main . AsStridedSliceParam ( ) - > beginMask = beginMask ;
op - > main . AsStridedSliceParam ( ) - > endMask = endMask ;
op - > main . AsStridedSliceParam ( ) - > ellipsisMask = ellipsisMask ;
op - > main . AsStridedSliceParam ( ) - > newAxisMask = newAxisMask ;
op - > main . AsStridedSliceParam ( ) - > shrinkAxisMask = shrinkAxisMask ;
2020-03-23 09:32:02 +08:00
return ( Variable : : create ( Expr : : create ( op . get ( ) , { input , begin , end , strided } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2022-02-18 11:30:27 +08:00
VARP _StridedSliceWrite ( VARP input , VARP begin , VARP end , VARP strided , VARP write , int32_t beginMask ,
int32_t endMask , int32_t ellipsisMask , int32_t newAxisMask , int32_t shrinkAxisMask ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_StridedSlice ;
op - > main . type = OpParameter_StridedSliceParam ;
op - > main . value = new StridedSliceParamT ;
op - > main . AsStridedSliceParam ( ) - > T = DataType_DT_FLOAT ;
op - > main . AsStridedSliceParam ( ) - > beginMask = beginMask ;
op - > main . AsStridedSliceParam ( ) - > endMask = endMask ;
op - > main . AsStridedSliceParam ( ) - > ellipsisMask = ellipsisMask ;
op - > main . AsStridedSliceParam ( ) - > newAxisMask = newAxisMask ;
op - > main . AsStridedSliceParam ( ) - > shrinkAxisMask = shrinkAxisMask ;
return ( Variable : : create ( Expr : : create ( op . get ( ) , { input , begin , end , strided , write } ) ) ) ;
}
2020-01-15 13:33:47 +08:00
/*Transposes x.
Args :
x : A variable .
perm : A vector , indicating the permutation of the dimensions of x .
Returns :
A transposed variable .
*/
2019-12-27 22:16:57 +08:00
VARP _Transpose ( VARP x , INTS perm ) {
auto permVar = _Const ( ( const void * ) perm . data ( ) , { static_cast < int > ( perm . size ( ) ) } , NHWC , halide_type_of < int > ( ) ) ;
return _Transpose ( x , permVar ) ;
}
VARP _Transpose ( VARP x , VARP perm ) {
std : : unique_ptr < OpT > transpose ( new OpT ) ;
transpose - > type = OpType_Transpose ;
transpose - > main . type = OpParameter_Transpose ;
transpose - > main . value = new TransposeT ;
transpose - > main . AsTranspose ( ) - > Tperm = DataType_DT_INT32 ;
return ( Variable : : create ( Expr : : create ( std : : move ( transpose ) , { x , perm } ) ) ) ;
}
VARP _ChannelShuffle ( VARP x , int group ) {
x = _Convert ( x , NHWC ) ;
x = _Reshape ( x , { 0 , 0 , 0 , group , - 1 } , NHWC ) ;
x = _Transpose ( x , { 0 , 1 , 2 , 4 , 3 } ) ;
x = _Reshape ( x , { 0 , 0 , 0 , - 1 } , NHWC ) ;
x = _Convert ( x , NC4HW4 ) ;
return x ;
}
VARP _ReverseSequence ( VARP x , VARP y , int batchDim , int seqDim ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_ReverseSequence ;
op - > main . type = OpParameter_ReverseSequenceParam ;
op - > main . value = new ReverseSequenceParamT ;
op - > main . AsReverseSequenceParam ( ) - > batchDim = batchDim ;
op - > main . AsReverseSequenceParam ( ) - > seqDim = seqDim ;
return ( Variable : : create ( Expr : : create ( op . get ( ) , { x , y } ) ) ) ;
}
2020-01-15 13:33:47 +08:00
/*Convert a variable to another format(possibily added before `input`).
Args :
input : A variable .
format : The target format .
Returns :
A variable . If ` input ` is already ` format ` , then return ` input ` directly , otherwize add a variable before ` input ` with ` format ` .
*/
VARP _ChangeInputFormat ( VARP input , Dimensionformat format ) {
if ( nullptr = = input | | nullptr = = input - > getInfo ( ) ) {
2019-12-27 22:16:57 +08:00
return nullptr ;
}
2020-01-15 13:33:47 +08:00
if ( input - > getInfo ( ) - > order = = format ) {
return input ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
auto input_before = _Input ( input - > getInfo ( ) - > dim , format , input - > getInfo ( ) - > type ) ;
auto convert = _Convert ( input_before , input - > getInfo ( ) - > order ) ;
Variable : : replace ( input , convert ) ;
return input_before ;
2019-12-27 22:16:57 +08:00
}
VARP _Clone ( VARP source , bool deepCopy ) {
if ( nullptr = = source | | nullptr = = source - > expr ( ) . first ) {
return nullptr ;
}
if ( ! deepCopy ) {
return Variable : : create ( source - > expr ( ) . first , source - > expr ( ) . second ) ;
}
auto info = source - > getInfo ( ) ;
auto sourcePtr = source - > readMap < void > ( ) ;
2020-07-04 01:21:30 +08:00
if ( nullptr = = info ) {
MNN_ERROR ( " Source buffer info is not available. \n " ) ;
2019-12-27 22:16:57 +08:00
return nullptr ;
}
auto inputVar = _Input ( info - > dim , info - > order , info - > type ) ;
auto destPtr = inputVar - > writeMap < void > ( ) ;
2020-07-04 01:21:30 +08:00
if ( info - > size & & destPtr & & sourcePtr ) {
: : memcpy ( destPtr , sourcePtr , info - > size * info - > type . bytes ( ) ) ;
2019-12-27 22:16:57 +08:00
}
return inputVar ;
}
2020-03-16 13:46:01 +08:00
VARP _Conv2DBackPropFilter ( VARP input , VARP inputGrad , INTS kernelSize , PaddingMode pad , INTS stride , INTS dilate ,
2019-12-27 22:16:57 +08:00
int group , INTS pads ) {
std : : unique_ptr < OpT > convOp ( new OpT ) ;
convOp - > type = OpType_Conv2DBackPropFilter ;
2020-03-16 13:46:01 +08:00
auto srcShape = input - > getInfo ( ) ;
auto dstShape = inputGrad - > getInfo ( ) ;
auto channel = std : : vector < int > { srcShape - > dim [ 1 ] , dstShape - > dim [ 1 ] } ;
2019-12-27 22:16:57 +08:00
convOp - > main . type = OpParameter_Convolution2D ;
convOp - > main . value = new Convolution2DT ;
auto conv2D = convOp - > main . AsConvolution2D ( ) ;
conv2D - > common . reset ( new Convolution2DCommonT ) ;
conv2D - > common - > padX = pads [ 0 ] ;
conv2D - > common - > padY = pads [ 1 ] ;
conv2D - > common - > padMode = _convertPadMode ( pad ) ;
conv2D - > common - > strideX = stride [ 0 ] ;
conv2D - > common - > strideY = stride [ 1 ] ;
conv2D - > common - > group = group ;
conv2D - > common - > outputCount = channel [ 1 ] ;
conv2D - > common - > inputCount = channel [ 0 ] ;
conv2D - > common - > dilateX = dilate [ 0 ] ;
conv2D - > common - > dilateY = dilate [ 1 ] ;
conv2D - > common - > kernelX = kernelSize [ 0 ] ;
conv2D - > common - > kernelY = kernelSize [ 1 ] ;
INTS weightDims = { channel [ 1 ] , channel [ 0 ] / group , kernelSize [ 1 ] , kernelSize [ 0 ] } ;
2020-03-16 13:46:01 +08:00
return Variable : : create ( Expr : : create ( std : : move ( convOp ) , { input , inputGrad } ) ) ;
2019-12-27 22:16:57 +08:00
}
VARP _PoolGrad ( VARP originInput , VARP originOutput , VARP inputGrad , INTS kernel , INTS stride , PoolingMode type ,
PaddingMode pad , INTS pads ) {
std : : unique_ptr < OpT > pool ( new OpT ) ;
pool - > type = OpType_PoolGrad ;
pool - > main . type = OpParameter_Pool ;
pool - > main . value = new PoolT ;
if ( kernel [ 0 ] = = - 1 & & kernel [ 1 ] = = - 1 ) {
pool - > main . AsPool ( ) - > isGlobal = true ;
}
pool - > main . AsPool ( ) - > padX = 0 ;
pool - > main . AsPool ( ) - > padY = 0 ;
if ( pads . size ( ) > = 2 ) {
pool - > main . AsPool ( ) - > padX = pads [ 0 ] ;
pool - > main . AsPool ( ) - > padY = pads [ 1 ] ;
}
pool - > main . AsPool ( ) - > padType = _convertPoollingPadMode ( pad ) ;
pool - > main . AsPool ( ) - > kernelX = kernel [ 0 ] ;
pool - > main . AsPool ( ) - > kernelY = kernel [ 1 ] ;
pool - > main . AsPool ( ) - > strideX = stride [ 0 ] ;
pool - > main . AsPool ( ) - > strideY = stride [ 1 ] ;
pool - > main . AsPool ( ) - > type = ( PoolType ) type ;
return ( Variable : : create ( Expr : : create ( std : : move ( pool ) , { originInput , originOutput , inputGrad } ) ) ) ;
}
2020-01-15 13:33:47 +08:00
/*Crop images.
Args :
images : 4 - D variable of NC4HW4 format .
size : A variable . It takes the shape of ` size ` as output cropped variable ' s shape while omits the values / format of ` size ` .
axis : A int indicating the dimention to crop . Must be > = 2. All dimensions up to but excluding ` axis ` are preserved , while the dimensions including and trailing ` axis ` are cropped .
offset : A vector of int indicating the offsets . length ( ` offset ` ) must be > = 1 and < = 2. If length ( ` offset ` ) is 1 , then all dimensions are offset by this amount . Otherwise , the number of offsets must equal the number of cropped axes in each dimension accordingly .
Returns :
The cropped 4 - D variable of NC4HW4 format .
*/
VARP _Crop ( VARP images , VARP size , int axis , INTS offset ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > crop ( new OpT ) ;
crop - > type = OpType_Crop ;
crop - > main . type = OpParameter_Crop ;
crop - > main . value = new CropT ;
crop - > main . AsCrop ( ) - > axis = axis ;
crop - > main . AsCrop ( ) - > offset = offset ;
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( crop ) , { images , size } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Resize images.
Args :
images : 4 - D variable of NC4HW4 format .
xScale : A float .
yScale : A float .
Returns :
The resized 4 - D variable of NC4HW4 format .
*/
VARP _Resize ( VARP images , float xScale , float yScale ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > resize ( new OpT ) ;
resize - > type = OpType_Resize ;
resize - > main . type = OpParameter_Resize ;
resize - > main . value = new ResizeT ;
resize - > main . AsResize ( ) - > xScale = xScale ;
resize - > main . AsResize ( ) - > yScale = yScale ;
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( resize ) , { images } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Pads a variable.
Args :
x : A variable .
paddings : A variable of type Halide_Type_Int . The shape is [ n , 2 ] where n is the rank of variable .
mode : A enum , One of PadValueMode_CONSTANT , PadValueMode_SYMMETRIC , or PadValueMode_REFLECT .
Returns :
A variable . Has the same type as x .
*/
VARP _Pad ( VARP x , VARP paddings , PadValueMode mode ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > pad ( new OpT ) ;
pad - > type = OpType_Padding ;
pad - > main . type = OpParameter_PadParam ;
pad - > main . value = new PadParamT ;
switch ( mode ) {
case CONSTANT :
pad - > main . AsPadParam ( ) - > mode = MNN : : PadValueMode_CONSTANT ;
break ;
case SYMMETRIC :
pad - > main . AsPadParam ( ) - > mode = MNN : : PadValueMode_SYMMETRIC ;
break ;
case REFLECT :
pad - > main . AsPadParam ( ) - > mode = MNN : : PadValueMode_REFLECT ;
break ;
default :
pad - > main . AsPadParam ( ) - > mode = MNN : : PadValueMode_CONSTANT ;
break ;
}
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( pad ) , { x , paddings } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Returns a variable with an additional dimension inserted at index axis.
Args :
input : A variable .
axis : A int , specifying the dimension index at which to expand the shape of input .
Given an input of D dimensions , axis must be in range [ - ( D + 1 ) , D ] ( inclusive ) .
Returns :
A variable with the same data as input , with an additional dimension inserted at the index specified by axis .
*/
VARP _ExpandDims ( VARP input , int axis ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > expand ( new OpT ) ;
expand - > type = OpType_ExpandDims ;
expand - > main . type = OpParameter_ExpandDims ;
expand - > main . value = new ExpandDimsT ;
expand - > main . AsExpandDims ( ) - > axis = axis ;
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( expand ) , { input } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
VARP _ExpandDims ( VARP input , VARP axis ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > expand ( new OpT ) ;
expand - > type = OpType_ExpandDims ;
expand - > main . type = OpParameter_ExpandDims ;
expand - > main . value = new ExpandDimsT ;
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( expand ) , { input , axis } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Returns the shape of a variable.
Args :
input : A variable .
Returns :
A variable of Halide_Type_Int .
*/
2020-11-05 16:41:56 +08:00
VARP _Shape ( VARP input , bool nchw ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > shape ( new OpT ) ;
shape - > type = OpType_Shape ;
2020-11-05 16:41:56 +08:00
if ( nchw ) {
shape - > defaultDimentionFormat = MNN_DATA_FORMAT_NCHW ;
}
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( shape ) , { input } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
/*Stacks a list of rank-R variables into one rank-(R+1) variable.
Packs the list of variables in ` values ` into a ariable with rank one higher than each variable in values ,
by packing them along the axis dimension .
Given a list of length N of variables of shape ( A , B , C ) ;
if axis = = 0 then the output variable will have the shape ( N , A , B , C ) .
if axis = = 1 then the output variable will have the shape ( A , N , B , C ) . Etc .
Args :
values : A list of variable objects with the same shape and type .
axis : An int . The axis to stack along . Defaults to the first dimension . Negative values wrap around ,
so the valid range is [ - ( R + 1 ) , R + 1 ) .
Returns :
output : A stacked variable with the same type as ` values ` .
*/
VARP _Stack ( VARPS values , int axis ) {
std : : unique_ptr < OpT > pack ( new OpT ) ;
pack - > type = OpType_Pack ;
pack - > main . type = OpParameter_PackParam ;
pack - > main . value = new PackParamT ;
pack - > main . AsPackParam ( ) - > axis = axis ;
return ( Variable : : create ( Expr : : create ( std : : move ( pack ) , values ) ) ) ;
}
2020-01-15 13:33:47 +08:00
/*Extracts crops from the input image variable and resizes them using bilinear sampling or nearest neighbor sampling (possibly with aspect ratio change)
to a common output size specified by crop_size .
Returns a variable with crops from the input image at positions defined at the bounding box locations in boxes .
The cropped boxes are all resized ( with bilinear or nearest neighbor interpolation ) to a fixed size = [ crop_height , crop_width ] .
The result is a 4 - D tensor [ num_boxes , crop_height , crop_width , depth ] ( supposing NHWC format ) .
Arguments :
image : A 4 - D variable of shape [ batch , image_height , image_width , depth ] ( supposing NHWC format ) . Both image_height and image_width need to be positive .
boxes : A 2 - D variable of shape [ num_boxes , 4 ] . The i - th row of the variable specifies the coordinates of a box in the box_ind [ i ] image and is specified in normalized coordinates [ y1 , x1 , y2 , x2 ] .
A normalized coordinate value of y is mapped to the image coordinate at y * ( image_height - 1 ) , so as the [ 0 , 1 ] interval of normalized image height is mapped to [ 0 , image_height - 1 ] in image height coordinates . We do allow y1 > y2 , in which case the sampled crop is an up - down flipped version of the original image . The width dimension is treated similarly . Normalized coordinates outside the [ 0 , 1 ] range are allowed , in which case we use extrapolation_value to extrapolate the input image values .
box_ind : A 1 - D variable of shape [ num_boxes ] with int values in [ 0 , batch ) . The value of box_ind [ i ] specifies the image that the i - th box refers to .
crop_size : A 1 - D variable of 2 elements , size = [ crop_height , crop_width ] . All cropped image patches are resized to this size . The aspect ratio of the image content is not preserved . Both crop_height and crop_width need to be positive .
method : A enum , either CropAndResizeMethod_NEAREST , or CropAndResizeMethod_BILINEAR , default to CropAndResizeMethod_BILINEAR .
extrapolation_value : Value used for extrapolation , when applicable .
Returns :
Output : A 4 - D variable of shape [ num_boxes , crop_height , crop_width , depth ] ( supposing NHWC format ) .
*/
VARP _CropAndResize ( VARP image , VARP boxes , VARP box_ind , VARP crop_size , InterpolationMethod method , float extrapolation_value ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > car ( new OpT ) ;
car - > type = OpType_CropAndResize ;
car - > main . type = OpParameter_CropAndResize ;
car - > main . value = new CropAndResizeT ;
2020-01-15 13:33:47 +08:00
car - > main . AsCropAndResize ( ) - > extrapolationValue = extrapolation_value ;
2019-12-27 22:16:57 +08:00
switch ( method ) {
case NEAREST :
car - > main . AsCropAndResize ( ) - > method = CropAndResizeMethod_NEAREST ;
break ;
case BILINEAR :
default :
car - > main . AsCropAndResize ( ) - > method = CropAndResizeMethod_BILINEAR ;
break ;
}
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( car ) , { image , boxes , box_ind , crop_size } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Creates a variable filled with a scalar value.
Args :
dims : A variable . Must be 1 - D Halide_Type_Int . Represents the shape of the output variable .
value : A variable . 0 - D ( scalar ) . Value to fill the returned variable .
Returns :
A variable . Has the same type as value .
*/
VARP _Fill ( VARP dims , VARP value ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > fill ( new OpT ) ;
fill - > type = OpType_Fill ;
fill - > main . type = OpParameter_Fill ;
fill - > main . value = new FillT ;
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( fill ) , { dims , value } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Constructs a variable by tiling a given variable.
Args :
input : A variable . 1 - D or higher .
multiples : A variable . Must be 1 - D Halide_Type_Int . Length must be the same as the number of dimensions in input .
Returns :
A variable . Has the same type as input .
*/
VARP _Tile ( VARP input , VARP multiples ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > tile ( new OpT ) ;
tile - > type = OpType_Tile ;
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( tile ) , { input , multiples } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Gather slices from params according to indices.
Arguments :
params : The variable from which to gather values .
indices : Index variable . Must be Halide_Type_Int in range [ 0 , ndims ( params ) - 1 ] .
Returns :
Output : Values from params gathered from indices given by indices .
*/
VARP _Gather ( VARP params , VARP indices ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > gather ( new OpT ) ;
gather - > type = OpType_Gather ;
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( std : : move ( gather ) , { params , indices } ) ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
/*Gather slices from params axis according to indices.
Arguments :
params : The variable from which to gather values .
indices : Index variable . Must be Halide_Type_Int in range [ 0 , ndims ( params ) - 1 ] .
axis : A int , the axis in params to gather indices from . Supports negative indexes .
If set to 0 , it ' s same as _Gather . Currently only 0 is supported .
Returns :
Output : Values from params gathered from indices given by indices .
*/
2019-12-27 22:16:57 +08:00
VARP _GatherV2 ( VARP params , VARP indices , VARP axis ) {
std : : unique_ptr < OpT > gather ( new OpT ) ;
gather - > type = OpType_GatherV2 ;
2020-01-15 13:33:47 +08:00
gather - > main . type = OpParameter_GatherV2 ;
2019-12-27 22:16:57 +08:00
gather - > main . value = new GatherV2T ;
if ( axis . get ( ) ) {
return ( Variable : : create ( Expr : : create ( std : : move ( gather ) , { params , indices , axis } ) ) ) ;
} else {
return ( Variable : : create ( Expr : : create ( std : : move ( gather ) , { params , indices } ) ) ) ;
}
}
2020-01-15 13:33:47 +08:00
/*Removes dimensions of size 1 from the shape of a variable.
Args :
input : A variable . The input to squeeze .
axis : A vector , Defaults to { } . If specified , only squeezes the dimensions listed . The dimension index starts at 0.
Must be in the range [ - rank ( input ) , rank ( input ) ) .
Returns :
A variable . Has the same type as input . Contains the same data as input , but has one or more dimensions of size 1 removed .
*/
VARP _Squeeze ( VARP input , INTS axis ) {
2019-12-27 22:16:57 +08:00
std : : unique_ptr < OpT > squeeze ( new OpT ) ;
squeeze - > type = OpType_Squeeze ;
auto squeezeParam = new SqueezeParamT ;
2020-01-15 13:33:47 +08:00
squeezeParam - > squeezeDims = axis ;
2019-12-27 22:16:57 +08:00
squeeze - > main . type = OpParameter_SqueezeParam ;
squeeze - > main . value = squeezeParam ;
2020-01-15 13:33:47 +08:00
return Variable : : create ( Expr : : create ( std : : move ( squeeze ) , { input } ) ) ;
2019-12-27 22:16:57 +08:00
}
2020-01-15 13:33:47 +08:00
VARP _Unsqueeze ( VARP input , INTS axis ) {
std : : unique_ptr < OpT > unsqueeze ( new OpT ) ;
unsqueeze - > type = OpType_Unsqueeze ;
2019-12-27 22:16:57 +08:00
auto squeezeParam = new SqueezeParamT ;
2020-01-15 13:33:47 +08:00
squeezeParam - > squeezeDims = axis ;
unsqueeze - > main . type = OpParameter_SqueezeParam ;
unsqueeze - > main . value = squeezeParam ;
return Variable : : create ( Expr : : create ( std : : move ( unsqueeze ) , { input } ) ) ;
2019-12-27 22:16:57 +08:00
}
/*Computes exponential linear: alpha * (exp(features) - 1) if < 0, features otherwise.
features : A variable of type Halide_Type_Float
alpha : Alpha factor ( positive float )
Returns :
A variable . Has the same type as features .
*/
VARP _Elu ( VARP features , float alpha ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_ELU ;
auto eluParam = new ELUT ;
op - > main . type = OpParameter_ELU ;
eluParam - > alpha = alpha ;
op - > main . value = eluParam ;
return ( Variable : : create ( Expr : : create ( std : : move ( op ) , { features } ) ) ) ;
}
2020-11-05 16:41:56 +08:00
/*Given an input value x, it computes the output as 1.0 if x > threshold and 0.0 if x <= threshold.
features : A variable of type Halide_Type_Float
threshold : threshold value
Returns :
A variable . Has the same type as features .
*/
VARP _Threshold ( VARP features , float threshold ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_Threshold ;
auto eluParam = new ELUT ;
op - > main . type = OpParameter_ELU ;
eluParam - > alpha = threshold ;
op - > main . value = eluParam ;
return ( Variable : : create ( Expr : : create ( std : : move ( op ) , { features } ) ) ) ;
}
2019-12-27 22:16:57 +08:00
/*Computes the size of the variable
Args :
input : A variable of type Halide_Type_Float or Halide_Type_Int
Returns :
A variable . The shape is ( ) , and type is Halide_Type_Int
*/
VARP _Size ( VARP input ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_Size ;
return ( Variable : : create ( Expr : : create ( std : : move ( op ) , { input } ) ) ) ;
}
/*Computes scaled exponential linear: scale * alpha * (exp(features) - 1) if < 0, scale * features otherwise.
Args :
features : A variable of type Halide_Type_Float
scale : Scaling factor ( positive float )
alpha : Alpha factor ( positive float )
Returns :
A variable . Has the same type as features .
*/
VARP _Selu ( VARP features , float scale , float alpha ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_Selu ;
auto seluParam = new SeluT ;
op - > main . type = OpParameter_Selu ;
seluParam - > scale = scale ;
seluParam - > alpha = alpha ;
op - > main . value = seluParam ;
return ( Variable : : create ( Expr : : create ( std : : move ( op ) , { features } ) ) ) ;
}
/*Gather slices from params into a variable with shape specified by indices.
Args :
params : A variable . The variables from which to gather values .
indices : A variable . Must be one of the following types : Halide_Type_Int .
Returns :
A variable . Has the same type as params .
*/
VARP _GatherND ( VARP params , VARP indices ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_GatherND ;
return ( Variable : : create ( Expr : : create ( std : : move ( op ) , { params , indices } ) ) ) ;
}
/*BatchToSpace for N-D variables
This operation reshapes the " batch " dimension 0 into M + 1 dimensions of shape block_shape + [ batch ] ,
interleaves these blocks back into the grid defined by the spatial dimensions [ 1 , . . . , M ] ,
to obtain a result with the same rank as the input .
The spatial dimensions of this intermediate result are then optionally cropped according to crops to
produce the output . This is the reverse of SpaceToBatch . See below for a precise description .
Arguments :
input : must be 4 - D with NC4HW4 format . N - D with shape input_shape = [ batch ] + spatial_shape + remaining_shape , where spatial_shape has M dimensions .
block_shape : 1 - D with shape [ M ] , all values must be > = 1.
crops : 2 - D with shape [ M , 2 ] , all values must be > = 0. crops [ i ] = [ crop_start , crop_end ] specifies the amount to crop from input dimension i + 1 ,
which corresponds to spatial dimension i . It is required that crop_start [ i ] + crop_end [ i ] < = block_shape [ i ] * input_shape [ i + 1 ] .
This operation is equivalent to the following steps :
Reshape input to reshaped of shape : [ block_shape [ 0 ] , . . . , block_shape [ M - 1 ] , batch / prod ( block_shape ) ,
input_shape [ 1 ] , . . . , input_shape [ N - 1 ] ]
Permute dimensions of reshaped to produce permuted of shape
[ batch / prod ( block_shape ) , input_shape [ 1 ] , block_shape [ 0 ] , . . . , input_shape [ M ] , block_shape [ M - 1 ] , input_shape [ M + 1 ] , . . . , input_shape [ N - 1 ] ]
Reshape permuted to produce reshaped_permuted of shape
[ batch / prod ( block_shape ) , input_shape [ 1 ] * block_shape [ 0 ] , . . . , input_shape [ M ] * block_shape [ M - 1 ] , input_shape [ M + 1 ] , . . . , input_shape [ N - 1 ] ]
Crop the start and end of dimensions [ 1 , . . . , M ] of reshaped_permuted according to crops to produce the output of shape :
[ batch / prod ( block_shape ) , input_shape [ 1 ] * block_shape [ 0 ] - crops [ 0 , 0 ] - crops [ 0 , 1 ] , . . . , input_shape [ M ] * block_shape [ M - 1 ] - crops [ M - 1 , 0 ] - crops [ M - 1 , 1 ] , input_shape [ M + 1 ] , . . . , input_shape [ N - 1 ] ]
Some examples :
for the following input of shape [ 4 , 1 , 1 , 3 ] , block_shape = [ 2 , 2 ] , and crops = [[0, 0], [0, 0]] :
[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
The output variable has shape [ 1 , 2 , 2 , 3 ] and value :
x = [[[[1, 2, 3], [4, 5, 6]] ,
[[7, 8, 9], [10, 11, 12]]]]
Returns :
Output : The output variable
*/
VARP _BatchToSpaceND ( VARP input , VARP block_shape , VARP crops ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
std : : unique_ptr < BlobT > blob_blockShape ( new BlobT ) ;
std : : unique_ptr < BlobT > blob_paddings ( new BlobT ) ;
auto info_block_shape = block_shape - > getInfo ( ) ;
auto info_crops = crops - > getInfo ( ) ;
MNN_ASSERT ( info_block_shape ! = nullptr ) ;
MNN_ASSERT ( info_crops ! = nullptr ) ;
MNN_ASSERT ( halide_type_int = = info_block_shape - > type . code ) ;
MNN_ASSERT ( halide_type_int = = info_crops - > type . code ) ;
blob_blockShape - > dims = info_block_shape - > dim ;
blob_blockShape - > dataFormat = ( MNN_DATA_FORMAT ) Utils : : convertFormat ( info_block_shape - > order ) ;
blob_blockShape - > dataType = ( MNN : : DataType ) Utils : : convertDataType ( info_block_shape - > type ) ;
auto data_block_shape = block_shape - > readMap < int > ( ) ;
for ( int i = 0 ; i < info_block_shape - > size ; i + + )
{
blob_blockShape - > int32s . emplace_back ( data_block_shape [ i ] ) ;
}
blob_paddings - > dims = info_crops - > dim ;
blob_paddings - > dataFormat = ( MNN_DATA_FORMAT ) Utils : : convertFormat ( info_crops - > order ) ;
blob_paddings - > dataType = ( MNN : : DataType ) Utils : : convertDataType ( info_crops - > type ) ;
auto data_crop = crops - > readMap < int > ( ) ;
for ( int i = 0 ; i < info_crops - > size ; i + + )
{
blob_paddings - > int32s . emplace_back ( data_crop [ i ] ) ;
}
op - > main . type = OpParameter_SpaceBatch ;
op - > type = OpType_BatchToSpaceND ;
op - > main . value = new SpaceBatchT ;
op - > main . AsSpaceBatch ( ) - > blockShape = std : : move ( blob_blockShape ) ;
op - > main . AsSpaceBatch ( ) - > padding = std : : move ( blob_paddings ) ;
return Variable : : create ( Expr : : create ( std : : move ( op ) , { input } ) ) ;
}
/*Copies a variable setting everything outside a central band in each innermost matrix.
Arguments :
input : Rank k variable .
num_lower : Number of subdiagonals to keep . If negative , keep entire lower triangle .
num_upper : Number of superdiagonals to keep . If negative , keep entire upper triangle .
Returns :
Output : Rank k variable of the same shape as input . The extracted banded tensor .
*/
VARP _MatrixBandPart ( VARP input , VARP num_lower , VARP num_upper ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_MatrixBandPart ;
op - > main . type = OpParameter_NONE ;
return ( Variable : : create ( Expr : : create ( std : : move ( op ) , { input , num_lower , num_upper } ) ) ) ;
}
/*Calculates the mean and variance of x.
Args :
x : A variable . must be 4 - D with NC4HW4 format .
axes : Array of ints . Axes along which to compute mean and variance . Ignored for this implementation : must be { 2 , 3 }
shift : Not used in the current implementation .
keepdims : produce moments with the same dimensionality as the input . Ignored for this implementation : must be true .
Returns :
Two variable objects : mean and variance .
*/
std : : vector < VARP > _Moments ( VARP x , INTS axis , VARP shift , bool keepDims ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
axis = { 2 , 3 } ;
keepDims = true ;
// if axis != {2,3} or keepDims != true, print warning.
// ignore shift.
op - > type = OpType_Moments ;
auto momentsParam = new MomentsParamT ;
op - > main . type = OpParameter_MomentsParam ;
momentsParam - > dim = axis ;
momentsParam - > keepDims = keepDims ;
op - > main . value = momentsParam ;
EXPRP expr = Expr : : create ( std : : move ( op ) , { x } , 2 ) ;
std : : vector < VARP > res ;
res . emplace_back ( Variable : : create ( expr , 0 ) ) ;
res . emplace_back ( Variable : : create ( expr , 1 ) ) ;
return res ;
}
/*Computes the difference between two lists of numbers or strings.
Given a list x and a list y , this operation returns a list out that represents all values that are in x but not in y .
The returned list out is sorted in the same order that the numbers appear in x ( duplicates are preserved ) .
This operation also returns a list idx that represents the position of each out element in x .
Arguments :
x : 1 - D variable of type Halide_Type_Int . Values to keep .
y : 1 - D variable of type Halide_Type_Int . Values to remove .
Returns :
Output out : 1 - D variable of type Halide_Type_Int . Values present in x but not in y .
*/
VARP _SetDiff1D ( VARP x , VARP y ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_SetDiff1D ;
op - > main . type = OpParameter_NONE ;
op - > main . value = nullptr ;
return Variable : : create ( Expr : : create ( std : : move ( op ) , { x , y } ) ) ;
}
/*Rearranges blocks of spatial data, into depth.
More specifically , it outputs a copy of the input variable where values from the height and width dimensions are moved to the depth dimension .
The block_size indicates the input block size .
Non - overlapping blocks of size block_size x block_size are rearranged into depth at each location .
The depth of the output variable is block_size * block_size * input_depth .
The Y , X coordinates within each block of the input become the high order component of the output channel index .
The input variable ' s height and width must be divisible by block_size
Args :
input : A variable .
block_size : An int that is > = 2. The size of the spatial block .
Returns :
A variable . Has the same type as input .
*/
VARP _SpaceToDepth ( VARP input , int block_size ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_SpaceToDepth ;
auto param = new DepthSpaceParamT ;
param - > blockSize = block_size ;
op - > main . type = OpParameter_DepthSpaceParam ;
op - > main . value = param ;
return Variable : : create ( Expr : : create ( std : : move ( op ) , { input } ) ) ;
}
/*This operation divides "spatial" dimensions [1, ..., M] of the input into a grid of blocks of shape block_shape,
and interleaves these blocks with the " batch " dimension
such that in the output , the spatial dimensions [ 1 , . . . , M ] correspond to the position within the grid ,
and the batch dimension combines both the position within a spatial block and the original batch position .
Prior to division into blocks , the spatial dimensions of the input are optionally zero padded according to paddings .
See below for a precise description .
Args :
input : A variable . must be 4 - D with NC4HW4 format . N - D with shape input_shape = [ batch ] + spatial_shape + remaining_shape , where spatial_shape has M dimensions .
block_shape : A variable . Must be one of the following types : int32 , int64 . 1 - D with shape [ M ] , all values must be > = 1.
paddings : A variable . Must be one of the following types : int32 , int64 . 2 - D with shape [ M , 2 ] , all values must be > = 0. paddings [ i ] = [ pad_start , pad_end ] specifies the padding for input dimension i + 1 , which corresponds to spatial dimension i . It is required that block_shape [ i ] divides input_shape [ i + 1 ] + pad_start + pad_end .
Returns :
A variable . Has the same type as input .
*/
VARP _SpaceToBatchND ( VARP input , VARP block_shape , VARP paddings ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
std : : unique_ptr < BlobT > blob_blockShape ( new BlobT ) ;
std : : unique_ptr < BlobT > blob_paddings ( new BlobT ) ;
op - > type = OpType_SpaceToBatchND ;
auto param = new SpaceBatchT ;
auto info_block_shape = block_shape - > getInfo ( ) ;
auto info_paddings = paddings - > getInfo ( ) ;
MNN_ASSERT ( info_block_shape ! = nullptr ) ;
MNN_ASSERT ( info_paddings ! = nullptr ) ;
MNN_ASSERT ( halide_type_int = = info_block_shape - > type . code ) ;
MNN_ASSERT ( halide_type_int = = info_paddings - > type . code ) ;
blob_blockShape - > dims = info_block_shape - > dim ;
blob_blockShape - > dataFormat = ( MNN : : MNN_DATA_FORMAT ) Utils : : convertFormat ( info_block_shape - > order ) ;
blob_blockShape - > dataType = ( MNN : : DataType ) Utils : : convertDataType ( info_block_shape - > type ) ;
auto data_block_shape = block_shape - > readMap < int > ( ) ;
for ( int i = 0 ; i < info_block_shape - > size ; i + + )
{
blob_blockShape - > int32s . emplace_back ( data_block_shape [ i ] ) ;
}
blob_paddings - > dims = info_paddings - > dim ;
blob_paddings - > dataFormat = ( MNN : : MNN_DATA_FORMAT ) Utils : : convertFormat ( info_paddings - > order ) ;
blob_paddings - > dataType = ( MNN : : DataType ) Utils : : convertDataType ( info_paddings - > type ) ;
auto data_paddings = paddings - > readMap < int > ( ) ;
for ( int i = 0 ; i < info_paddings - > size ; i + + )
{
blob_paddings - > int32s . emplace_back ( data_paddings [ i ] ) ;
}
param - > blockShape = std : : move ( blob_blockShape ) ;
param - > padding = std : : move ( blob_paddings ) ;
op - > main . type = OpParameter_SpaceBatch ;
op - > main . value = param ;
return Variable : : create ( Expr : : create ( std : : move ( op ) , { input } ) ) ;
}
/*Creates a variable with all elements set to zero.
Args :
input : A variable .
Returns :
A variable with all elements set to zero .
*/
VARP _ZerosLike ( VARP input ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_ZerosLike ;
op - > main . type = OpParameter_NONE ;
op - > main . value = nullptr ;
return Variable : : create ( Expr : : create ( std : : move ( op ) , { input } ) ) ;
}
/*Unpacks the given dimension of a rank-R tensor into rank-(R-1) variable.
For example , given a variable of shape ( A , B , C , D ) ;
If axis = = 0 then the i ' th variable in output is the slice value [ i , : , : , : ] and each variable in output will have shape ( B , C , D ) .
( Note that the dimension unpacked along is gone , unlike split ) .
If axis = = 1 then the i ' th variable in output is the slice value [ : , i , : , : ] and each variable in output will have shape ( A , C , D ) .
Args :
value : A rank R > 0 variable to be unstacked .
num : An int . The length of the dimension axis . Automatically inferred if None ( the default ) .
axis : An int . The axis to unstack along . Defaults to the first dimension . Negative values wrap around , so the valid range is [ - R , R ) .
Returns :
The list of variable objects unstacked from value .
*/
std : : vector < VARP > _Unstack ( VARP value , int axis ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_Unpack ;
auto info_value = value - > getInfo ( ) ;
MNN_ASSERT ( info_value ! = nullptr ) ;
auto dims = info_value - > dim ;
auto dimsize = dims . size ( ) ;
2021-06-11 17:17:13 +08:00
MNN_ASSERT ( dimsize > = 1 ) ;
2019-12-27 22:16:57 +08:00
axis = axis % dimsize ;
if ( axis < 0 ) {
axis + = dimsize ;
}
auto size = dims [ axis ] ;
MNN_ASSERT ( size > 0 ) ;
auto axisParam = new AxisT ;
axisParam - > axis = axis ;
op - > main . type = OpParameter_Axis ;
op - > main . value = axisParam ;
EXPRP expr = Expr : : create ( std : : move ( op ) , { value } , size ) ;
std : : vector < VARP > res ;
for ( int i = 0 ; i < size ; + + i ) {
res . emplace_back ( Variable : : create ( expr , i ) ) ;
}
return res ;
}
/*Returns the rank of a variable.
Returns a 0 - D int32 variable representing the rank of input .
Note : The rank of a variable is not the same as the rank of a matrix .
It ' s the number of indices required to uniquely select each element of the variable .
It ' s also known as " order " , " degree " , or " ndims. "
Args :
input : A variable .
Returns :
A 0 - D variable of type Halide_Type_Int
*/
VARP _Rank ( VARP input ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_Rank ;
op - > main . type = OpParameter_NONE ;
op - > main . value = nullptr ;
return Variable : : create ( Expr : : create ( std : : move ( op ) , { input } ) ) ;
}
/*Creates a sequence of numbers.
Args :
start : A 0 - D variable ( scalar ) .
limit : A 0 - D variable ( scalar ) .
delta : A 0 - D variable ( scalar ) .
*/
VARP _Range ( VARP start , VARP limit , VARP delta ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_Range ;
auto rangeParam = new RangeT ;
rangeParam - > Tidx = ( MNN : : DataType ) Utils : : convertDataType ( start - > getInfo ( ) - > type ) ;
op - > main . type = OpParameter_Range ;
op - > main . value = rangeParam ;
return Variable : : create ( Expr : : create ( std : : move ( op ) , { start , limit , delta } ) ) ;
}
2020-01-15 13:33:47 +08:00
/*Rearranges data from depth into blocks of spatial data.
It is the reverse transformation of SpaceToDepth . More specifically ,
it outputs a copy of the input variable where values from the depth dimension are moved in spatial blocks to the height and width dimensions .
Args :
input : A variable .
block_size : An int that is > = 2. The size of the spatial block , same as in Space2Depth .
Returns :
A variable . Has the same type as input .
*/
VARP _DepthToSpace ( VARP input , int block_size ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_DepthToSpace ;
auto depthtospaceParam = new DepthSpaceParamT ;
depthtospaceParam - > blockSize = block_size ;
op - > main . type = OpParameter_DepthSpaceParam ;
op - > main . value = depthtospaceParam ;
return Variable : : create ( Expr : : create ( std : : move ( op ) , { input } ) ) ;
}
/*SSD network's priorbox layer.
Args :
feature : A variable . Contains the feature map . Namely bottom [ 0 ] in caffe .
image : A variable . Contains the image . Namely bottom [ 1 ] in caffe .
min_size : Minimum box size ( in pixels ) .
max_size : Maximum box size ( in pixels ) .
aspect_ratio : Various of aspect ratios . Duplicate ratios are ignored . If none is provided , use default 1.0 .
flip : If true , flips each aspect ratio . For example , if there is aspect ratio " r " , generates aspect ratio " 1.0/r " as well . Default true .
clip : If true , clips the prior so that it is within [ 0 , 1 ] . Default false .
variance : Variance for adjusting the prior bboxes .
img_h : image height . If 0 , uses information in image .
img_w : image width . If 0 , uses information in image .
step_h : step in height .
step_w : step in width .
offset : Offset to the top left corner of each cell .
Returns :
A variable .
*/
VARP _PriorBox ( VARP feature , VARP image , std : : vector < float > min_size , std : : vector < float > max_size , std : : vector < float > aspect_ratio ,
bool flip , bool clip , std : : vector < float > variance ,
unsigned int img_h , unsigned int img_w , float step_h , float step_w , float offset ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_PriorBox ;
auto param = new PriorBoxT ;
param - > minSizes = min_size ;
param - > maxSizes = max_size ;
param - > aspectRatios = aspect_ratio ;
param - > flip = flip ;
param - > clip = clip ;
param - > variances = variance ;
param - > imageHeight = img_h ;
param - > imageWidth = img_w ;
param - > stepHeight = step_h ;
param - > stepWidth = step_w ;
param - > offset = offset ;
op - > main . type = OpParameter_PriorBox ;
op - > main . value = param ;
return Variable : : create ( Expr : : create ( std : : move ( op ) , { feature , image } ) ) ;
}
/*SSD network's permute layer.
Args :
input : A variable . Contains the feature map . Namely bottom [ 0 ] in caffe .
dims : A vector . Contains the order .
Returns :
A variable .
*/
VARP _Permute ( VARP input , INTS dims ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_Permute ;
auto param = new PermuteT ;
param - > dims = dims ;
op - > main . type = OpParameter_Permute ;
op - > main . value = param ;
return Variable : : create ( Expr : : create ( std : : move ( op ) , { input } ) ) ;
}
/*SSD network's detectionoutput layer.
Args :
location : A variable .
confidence : A variable .
priorbox : A variable .
num_classes : number of classes .
share_location : indicates wheter share location between different classes , default true .
background_label_id : default = 0.
nms_threshhold : nonmaximumsupression threshhold .
mns_topk : nonmaximumsupression topk .
code_type : indicates the mode to encode bbox , default = CORNER .
variance_encoded_in_target : indicates whether encode variance in target , default false .
keep_top_k : indicates the number of boxes kept , default - 1 ( all boxes are kept ) .
confidence_threshold : the threshhold for confidence .
visualize_threshold : The threshold used to visualize the detection results .
Returns :
A variable .
*/
VARP _DetectionOutput ( VARP location , VARP confidence , VARP priorbox ,
unsigned int num_classes , bool share_location , int background_label_id ,
float nms_threshhold , int nms_topk , int code_type ,
bool variance_encoded_in_target ,
int keep_top_k , float confidence_threshold , float visualize_threshold ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_DetectionOutput ;
auto param = new DetectionOutputT ;
param - > classCount = num_classes ;
param - > shareLocation = share_location ;
param - > backgroundLable = background_label_id ;
param - > nmsThresholdold = nms_threshhold ;
param - > nmsTopK = nms_topk ;
param - > codeType = code_type ;
param - > varianceEncodedTarget = variance_encoded_in_target ;
param - > keepTopK = keep_top_k ;
param - > confidenceThreshold = confidence_threshold ;
param - > objectnessScore = visualize_threshold ;
op - > main . type = OpParameter_DetectionOutput ;
op - > main . value = param ;
return Variable : : create ( Expr : : create ( std : : move ( op ) , { location , confidence , priorbox } ) ) ;
}
2020-06-10 14:08:28 +08:00
/*SSD network's detectionpostprocess layer.
Args :
encode_boxes : A variable .
class_predictions : A variable .
anchors : A variable .
num_classes : number of classes .
max_detections : A int , indicates max detections .
max_class_per_detection : A int , indicates max class per detection .
detections_per_class : A int , indicates detections per class .
nms_threshhold : A float , the threshold for nms .
iou_threshold : A float , the threshold for iou .
use_regular_nms : A bool , indicates whether use regular nms method , only false is implemented currently .
centersize_encoding : A float vector , indicates the centersize encoding .
Returns :
4 variable , detection_boxes , detection_class , detection_scores , num_detections
*/
std : : vector < VARP > _DetectionPostProcess ( VARP encode_boxes , VARP class_predictions , VARP anchors ,
int num_classes , int max_detections ,
int max_class_per_detection , int detections_per_class ,
2020-06-15 09:43:22 +08:00
float nms_threshold , float iou_threshold ,
2020-06-10 14:08:28 +08:00
bool use_regular_nms , std : : vector < float > centersize_encoding ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_DetectionPostProcess ;
auto param = new DetectionPostProcessParamT ;
param - > numClasses = num_classes ;
param - > maxDetections = max_detections ;
param - > maxClassesPerDetection = max_class_per_detection ;
param - > detectionsPerClass = detections_per_class ;
2020-06-15 09:43:22 +08:00
param - > nmsScoreThreshold = nms_threshold ;
2020-06-10 14:08:28 +08:00
param - > iouThreshold = iou_threshold ;
param - > useRegularNMS = use_regular_nms ;
param - > centerSizeEncoding = centersize_encoding ;
op - > main . type = OpParameter_DetectionPostProcessParam ;
op - > main . value = param ;
EXPRP expr = Expr : : create ( std : : move ( op ) , { encode_boxes , class_predictions , anchors } , 4 ) ;
std : : vector < VARP > res ;
for ( int i = 0 ; i < 4 ; + + i ) {
res . emplace_back ( Variable : : create ( expr , i ) ) ;
}
return res ;
}
2019-12-27 22:16:57 +08:00
VARP _Interp ( VARPS xs , float widthScale , float heightScale , int outputWidth , int outputHeight , int resizeType , bool alignCorners ) {
std : : unique_ptr < OpT > interp ( new OpT ) ;
interp - > type = OpType_Interp ;
auto param = new InterpT ;
param - > widthScale = widthScale ;
param - > heightScale = heightScale ;
param - > outputWidth = outputWidth ;
param - > outputHeight = outputHeight ;
param - > resizeType = resizeType ;
param - > alignCorners = alignCorners ;
interp - > main . value = param ;
interp - > main . type = OpParameter_Interp ;
return Variable : : create ( Expr : : create ( std : : move ( interp ) , xs ) ) ;
}
2020-01-15 13:33:47 +08:00
VARP _ZeroGrad ( VARP x ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_ZeroGrad ;
return Variable : : create ( Expr : : create ( std : : move ( op ) , { x } ) ) ;
}
VARP _Conv ( std : : vector < int8_t > & & weight , std : : vector < int > & & bias , std : : vector < float > & & scale , VARP x , INTS channel , INTS kernelSize ,
2020-11-05 16:41:56 +08:00
PaddingMode pad , INTS stride , INTS dilate , int group , INTS pads , bool relu , int nbits ) {
2020-01-15 13:33:47 +08:00
std : : unique_ptr < OpT > convOp ( new OpT ) ;
convOp - > type = OpType_ConvInt8 ;
if ( channel [ 0 ] = = channel [ 1 ] & & channel [ 0 ] = = group ) {
convOp - > type = OpType_DepthwiseConvInt8 ;
}
convOp - > main . type = OpParameter_Convolution2D ;
convOp - > main . value = new Convolution2DT ;
auto conv2D = convOp - > main . AsConvolution2D ( ) ;
conv2D - > common . reset ( new Convolution2DCommonT ) ;
conv2D - > common - > padMode = _convertPadMode ( pad ) ;
2021-06-11 17:17:13 +08:00
if ( pads . size ( ) = = 2 ) {
conv2D - > common - > padX = pads [ 0 ] ;
conv2D - > common - > padY = pads [ 1 ] ;
} else {
conv2D - > common - > pads = std : : move ( pads ) ;
}
2020-01-15 13:33:47 +08:00
conv2D - > common - > strideX = stride [ 0 ] ;
conv2D - > common - > strideY = stride [ 1 ] ;
conv2D - > common - > group = group ;
conv2D - > common - > outputCount = channel [ 1 ] ;
conv2D - > common - > inputCount = channel [ 0 ] ;
conv2D - > common - > dilateX = dilate [ 0 ] ;
conv2D - > common - > dilateY = dilate [ 1 ] ;
conv2D - > common - > kernelX = kernelSize [ 0 ] ;
conv2D - > common - > kernelY = kernelSize [ 1 ] ;
2020-02-26 09:57:17 +08:00
conv2D - > common - > relu = relu ;
2020-01-15 13:33:47 +08:00
MNN_ASSERT ( weight . size ( ) = = channel [ 1 ] * ( channel [ 0 ] / group ) * kernelSize [ 0 ] * kernelSize [ 1 ] ) ;
conv2D - > symmetricQuan . reset ( new QuantizedFloatParamT ) ;
conv2D - > symmetricQuan - > bias = std : : move ( bias ) ;
conv2D - > symmetricQuan - > scale = std : : move ( scale ) ;
conv2D - > symmetricQuan - > weight = std : : move ( weight ) ;
2020-11-05 16:41:56 +08:00
conv2D - > symmetricQuan - > nbits = nbits ;
2020-01-15 13:33:47 +08:00
return ( Variable : : create ( Expr : : create ( convOp . get ( ) , { x } ) ) ) ;
}
2019-12-27 22:16:57 +08:00
2021-01-06 16:29:37 +08:00
VARP _Conv ( std : : vector < int8_t > & & weight , std : : vector < int > & & bias , std : : vector < float > & & scale ,
VARP x , INTS channel , INTS kernelSize ,
PaddingMode pad , INTS stride , INTS dilate , int group , INTS pads , bool relu ,
int8_t inputZeroPoint , int8_t outputZeroPoint ,
int8_t minValue , int8_t maxValue , bool accumulateToInt16 ) {
std : : unique_ptr < OpT > convOp ( new OpT ) ;
convOp - > type = OpType_ConvInt8 ;
if ( channel [ 0 ] = = channel [ 1 ] & & channel [ 0 ] = = group ) {
convOp - > type = OpType_DepthwiseConvInt8 ;
}
convOp - > main . type = OpParameter_Convolution2D ;
convOp - > main . value = new Convolution2DT ;
auto conv2D = convOp - > main . AsConvolution2D ( ) ;
conv2D - > common . reset ( new Convolution2DCommonT ) ;
conv2D - > common - > padMode = _convertPadMode ( pad ) ;
2021-06-11 17:17:13 +08:00
if ( pads . size ( ) = = 2 ) {
conv2D - > common - > padX = pads [ 0 ] ;
conv2D - > common - > padY = pads [ 1 ] ;
} else {
conv2D - > common - > pads = std : : move ( pads ) ;
}
2021-01-06 16:29:37 +08:00
conv2D - > common - > strideX = stride [ 0 ] ;
conv2D - > common - > strideY = stride [ 1 ] ;
conv2D - > common - > group = group ;
conv2D - > common - > outputCount = channel [ 1 ] ;
conv2D - > common - > inputCount = channel [ 0 ] ;
conv2D - > common - > dilateX = dilate [ 0 ] ;
conv2D - > common - > dilateY = dilate [ 1 ] ;
conv2D - > common - > kernelX = kernelSize [ 0 ] ;
conv2D - > common - > kernelY = kernelSize [ 1 ] ;
conv2D - > common - > relu = relu ;
[MNN:Sync] Sync internal github
Commits:
8148ae75c 弗人 bugfix
14cb8ec7f 弗人 [Converter:Bugfix] bugfix for onnx depthwise convtranspose
476fbcd90 雁行 [MNN:Feature] Open AVX cast and bugfix for contentCFG.
5e26b9fd3 雁行 [Test:Feature] Add android test.
37e147b25 雁行 [MNN:Bugfix] Bugfix for floordiv.
144c185f5 tianbu.xsw hangxing fix hiai
b4fd429d6 tianbu.xsw updateCacheFile bugfix -- update cache size
d4ba572a8 雁行 [MNN:Bugfix] Support int8 in AVX2 and some Bugfix.
43061f07e xiaying [MNN:Bugfix] Fix bug for module mode run part of model
398cc5ab6 tianhang.yth refactor demo
736380600 xiaying [Express:Bugfix] Fix memory leak for copy branch
b8dab0a27 tianhang.yth MNNFloat2Int8 sizeQuad=0 crash fix
94b95bfed ghz [BugFix]1.Better method for fast pack valid check
6a921f85e xiaying [Converter:Bugfix] Fix bug for Fuseconsttosubgraph
5f77ae889 tianhang.yth numThread bugfix
a807ef879 tianhang.yth add createSession(configs, runtimeinfo) API, add pymnn demo, pymnn logcat bugfix
ad05409d3 xiaying [MNN:Bugfix] Fix bug for StaticModule's sizecompute overflow, add error print for module mode
9d81b8299 xiaying [MNN:Bugfix] Fix bug for Unique op for output size = 1
03b15e9af xiaying [Test:Feature] Add MatMulBConst Test, Fix bug for single Convert
c944a76ee tianhang.yth add auto backend and getSessionInfo @tianbu
91fa7267b ghz [BugFix]1.fix the error in eP check
bf0041f77 ghz [BugFix]1.Fix the logic error in eP check. 2.Fix the sp align error
693871672 雁行 [CPU:Bugfix] rm adrp instruction for clang compiler bug.
1b8f6b3d8 ghz 1.Fix the wronly use of r13 in arm32 version. 2.Fix the missing callee register save and restore process.
feb7ecc4c 弗人 modify log of python offline quant
040c04811 ghz [BufFix]1.replace platform-related regs. 2.fix the same problem in arm32 version
609f37db8 弗人 add log for python quant, python convert
5511dd30a ghz [BugFix]1.Add testcases in SparseConv to check all functional code branch. 2. Fix the bug in "MNNPackC4ForMatMul_A.S" in arm64, which is caused by the missing check of eReal parameter.
a93ff9280 tianhang.yth add tf.Unique op support
9729ff773 allen.lk [Bugfix] Fix one arm32 instruction syntax that clang works but gcc DOES NOT work. use index instruction instead.
297c1ad14 雁行 [Expr:Bugfix] bugfix for tensor content used by shape compute.
ef8c369e3 弗人 catch exception
07c2dd670 弗人 add dependence to setup, base64 encode url, add time log
177e590c1 弗人 [Python:Feature] add aliyun log for python quant tool
40a7928cf allen.lk [Debug:Sparse] 1.Add group parameter in torchscript converter. 2. Stop split running to avoid memory corruption when check failed in TransformGroupConvolution 3. fix Op split issue in TransformGroupConvolution
3bdea84a1 allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction.
c3c6fbdbd allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction.
bc590eee4 雁行 [Converter:Bugfix] bugfix for onnx instancenormalization convert.
d8918593f tianhang.yth add auto backend and getSessionInfo @tianbu
83a198ed7 杭行 update
d0dd3e09b 杭行 update
99540202e xiaying [Converter:Optimize] Opt the tensor convert insert
333d8db82 allen.lk [Debug:Sparse] Fix All platform-register r9 / x18 issue on arm32 and arm64.
db5994672 杭行 merge
6293de7b8 tianbu.xsw fix pymnn updateCacheFile
5c2e11cb1 tianbu.xsw do updateCache in createSession
6e7641ff4 tianbu.xsw do not limit cacheFile for a model
5287a65e4 tianbu.xsw bugfix
52ba53a91 tianbu.xsw revert pymnn api
60284d830 tianbu.xsw bugfix
6d8077490 tianbu.xsw rename updateCacheFile api params
3cb172710 tianhang.yth updateCacheFile API size default value is 0
c5b69aabf tianbu.xsw updateCacheFile python api fix
5d5da7aa5 tianbu.xsw reflector code
5707877a4 雁行 [MNN:Speed] Speedup for softmax in x86 and arm.
2a211825c tianbu.xsw reflector code for updateCacheFile
76db3a835 tianbu.xsw [Cache Feature]: Add updateCacheFile API for increment cache
b06b0fd43 allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction.
e68bfa495 雁行 [Converter:Feature] Add UUID when model convert.
a9cb935dc xiaying [MNN:Speed] Support c4nhwc for more fastblit
019f40353 xiaying [Converter:Refractor] Reduce memory used by MNNConvert(bert from 5G -> 1G)
d2a6d3d05 xiaying [MNN:Bugfix] Fix bug for identity output not find
604d0801b xiaying [Converter:Bugfix] Fix bug for FuseGeLu
4bada2367 xiaying [MNN:Refractor] SegmentMean rewrite as segment
82070e708 xiaying [MNN:Bugfix] Fix bug for GeometryBinary
e8ea4266e xiaying Fix bug for ShapeTensorConvert compute for dim = 1 error
1f1cf1991 xiaying [Tools:Bugfix] Fix system compability for fastTestOnnx
6f422efe2 xiaying [Tools:Bugfix] Remove color for checkDir for easy to dump
968f7ec88 xiaying [MNN:Speed] Support turn broadcast binary to loop
3e7aaf46f xiaying [MNN:Refractor] Set Convolution1x1Strassen support variable input/output ptr
1f65ab163 xiaying [MNN:Bugfix] Fix bug for mini mnn can't convert model
d65953d47 xiaying [MNN:Bugfix] Fix bug for armv7a - android-14 + ARM82
8b68be45c xiaying [MNN:Feature] Add segment
8a8f264f5 xiaying [Vulkan:Bugfix] Remove unuseful print
025bb0fda xiaying [Converter:Bugfix] Fix bug for oneof don't support
43900251e tianbu.xsw enable setCacheFile python API
ebfb05c74 tianbu.xsw [Metal Feature] support metallib obtain from walle transfer task
9665c0a79 弗人 add check for path in json file
c66fef224 xiaying [Converter:Bugfix] Fix bug for oneof don't support
42f192852 xiaying [MNN:Bugfix] Fix bug for not set output / saveTensor into origin Schedule's outputs
1b95354ff 雁行 [Feature]: Support shape compute for SetDiff1D, and null input for Prod.
83966d043 xiaying [Test:Feature] Add test for static module
42d1be933 xiaying [Converter:Bugfix] Fix bug for mnn convert and static model add more outputs for origin model
9067531c3 xiaying [Converter:Refractor] formatLicence
99558bed9 xiaying [Converter:Bugfix] Count the op for unuseful and controlflow
4f6da0fa7 allen.lk [Feature:GRUMultiOutput] fix multi output dimension type
c6b219bce xiaying [Converter:Feature] Turn torch converter to object
dd4e68a37 xiaying [Converter:Feature] Support dump supported ops
80b6a60a3 xiaying [Converter:Info] If has output name, print output name instead of computed
015278fc3 xiaying [MNN:Refractor] Revert IfModule's debug info
23ac967c4 xiaying Don't transform for multi-input convolution/deconvolution
b02b0d4de xiaying Fix bug for multi-input for conv1d
254d8b1d4 xiaying Fix bug for Conv1dSqueezeMove for multi input convolution 1d
d47d0b9ca xiaying Fix bug for CPURaster's fuse nc4hw4
357c5bd33 xiaying Fix ConvBiasAdd for conv's inputs op > 1
55b1f0c9c xiaying [Converter:Bugfix] Don't transform for multi-input convolution/deconvolution
1902a30f5 xiaying [Converter:Bugfix] Fix bug for Conv1dSqueezeMove for multi input convolution 1d
c23fe617b xiaying [MNN:Bugfix] Fix bug for multi-input for conv1d
8ff018426 xiaying [MNN:Bugfix] Fix bug for CPURaster's fuse nc4hw4
d4e8cd602 xiaying [Converter:Bugfix] Fix ConvBiasAdd for conv's inputs op > 1
846266b42 tianbu.xsw return when program and tune both nullptr
fd67c76a9 xiaying [Converter:Bugfix] DepthwiseConvWeightMerge only valid for tflite
e77a242c4 xiaying [Converter:Feature] Support tflite's half pixel
be054c377 tianbu.xsw [OpenCL Bugfix] do not rewrite cache when binary program is produced
51e65aa35 xiaying [Converter:Feature] Support tflite for fp16 and multi-input convolution
1ccdfdeb5 tianbu.xsw redefine svm macro name
31234d372 tianbu.xsw [OpenCL SVM] add macro for only use wrapper
d739e35da xiaying [MNN:Bugfix] Fix compile bug for grid op
24ab13c79 Joker feat(arm82): add GridSample op support in arm82 backend, AVX(by xiaying)
7b142978e xiaying [AVX512:Speed] Optimize for e <= 8
5f6febe7b tianbu.xsw code refactor
998d91b57 xiaying [Express:Speed] Merge submodule for speed
22c89146f tianhang.yth fix alpha div by zero bug and arm server compile bug
8f829a170 tianbu.xsw [OpenCL Pad] unify conv/deconv pad computing
4a28f603e xiaying [Express:Speed] Shared Const for All Submodule
c74cf28f3 xiaying [MNN:Refractor] Seperate Const init and schedule
2a1eebb7a xiaying [Tools:Bugfix] Fix bug for modelTest.py count size
72f04008c xiaying [MNN:Refractor] Delete unuseful const op
1e735d03c xiaying [Converter:Bugfix] Fix bug for static module gen
4dfadbc6e xiaying [MNN:Refractor] Rewrite const init mode
1fcf0417a xiaying [MNN:Bugfix] Fix bug for deconvolutin multi-input for multi-batch
41d429cfd xiaying [Train:Bugfix] Revert convert NCHW for mnistTrain
f947a5f01 xiaying [Test:Feature] Add testTrain
dad59b6f6 tianbu.xsw move realize code from Backend.hpp to Tensor.cpp
cf4473ad1 xiaying [Train:Bugfix] Support pad for GeometryPoolGrad
91ab13734 xiaying [MNN:Bugfix] Fix compile bug for avx512
742e80f47 xiaying [MNN:Refractor] Opt the logic for checknan judge
12543b841 xiaying [ARM82:Bugfix] Fix compile bug for ios
3a2b0a49f xiaying [ARM82:Speed] Opt Pack / Unpack for armv8
c0f1995cd xiaying [ARM82:Speed] Opt MNNPackC8FP16 and MNNUnpackC8FP16 by asm
e0fc77dcf xiaying [MNN:Speed] Fix bug for DeconvolutionWithStride for C4HW4, open it
584bec578 xiaying [MNN:Bugfix] Fix bug for format set error for onnx
d5bd4148d xiaying [MNN:Bugfix] Fix bug for format set error for onnx
b00265841 xiaying [MNN:Bugfix] Fix bug for SparseConvolutionTiledExecutor
bb09188ac xiaying [Test:Bugfix] Fix bug for run into sparse auto
426d1babd xiaying [MNN:Refractor] Small bugfix for Group convolution and pack
7d0ea1c46 tianbu.xsw [testModel Feature] support testModel.out input resize
4169c54ce xiaying [MNN:Bugfix] Fix bug for checkNAN for origin
412a82222 xiaying [Test:Bugfix] Fix bug for CheckNAN's error of matmul
319b1d425 xiaying [MNN:Bugfix] Fix bug for multi-batch for ConvInt8
050b728a6 xiaying [Test:Bugfix] Use NCHW for ConvInt8Test
7db3423a1 xiaying [OpenCL:Bugfix] Fix bug for opencl::image,opencl::buffer for C4HW4
adcec6a7f xiaying [Vulkan:Bugfix] Fix bug for invalid tensor size limit
d2a7cf4e9 xiaying [Vulkan:Bugfix] Fix bug for onCopyBuffer of nc4hw4
557bebdd3 xiaying [MNN:Bugfix] Fix bug for BF16-ARM32
bbe186649 tianbu.xsw [Update AUTO mode]: fix MNN_FORWARD_AUTO choose priority
6deb23439 xiaying [MNN:Bugfix] Fix bug for GeometryBinary don't care about NC4HW4 same size
b137590e4 xiaying [MNN:Bugfix] Fix bug for GeometryBinary don't care about NC4HW4 same size
7003558ea xiaying [Converter:Bugfix] Fix bug for onnx pad for serveral case
b5f8cae5a xiaying [Converter:Bugfix] Fix bug for onnx pad for serveral case
29b09e125 xiaying [MNN:Bugfix] Fix bug for arm64-bf16
42ce00770 xiaying [MNN:Bugfix] Fix bug for ARM64 - float
a2d89fc18 雁行 [Converter:Feature] Support Binary Unary for Torch.
7f1c0deb1 xiaying [MNN:Bugfix] Fix bug for Raster for Int8
8335a6f18 tianbu.xsw [OpenCL Shared Memory] modify data_format method
b359e031b xiaying [ARM82:Bugfix] Fix bug for arm82 and speed up pack / unpack c8
24bf3fc88 雁行 [Convert:Feature] Support LayerNormFuse without gamma beta.
3e629624b xiaying [MNN:Bugfix] Fix bug for float - armv7a
2b7908ec7 tianbu.xsw modify workItemSize
3cee0d413 xiaying [MNN:Bugfix] test wrong clear
9cbbfb998 xiaying [MNN:Bugfix] fix compile bug for c++ < 14
2d7a44484 xiaying [MNN:Bugfix] fix compile bug for c++ < 14
eb7d0cb53 xiaying [Test:Bugfix] Don't test for NC4HW4 directly
7b40ca8d1 xiaying [MNN:Bugfix] Fix bug for ConvolutionGroup
2694d8a91 xiaying [MNN:Bugfix] Fix bug for CPUGridSample
f89af60f6 xiaying [MNN:Bugfix] Fix compile bug for arm
a151abcdd xiaying [MNN:Bugfix] Fix bug for convert for int8 / int16
b254dbe61 雁行 [MNN:Bugfix] Bugfix for Conv onClone.
d08150631 xiaying [MNN:Bugfix] Fix bug for fast rcnn
e5568a0df xiaying [MNN:Bugfix] Fix bug for CPURaster treat NC4HW4 fast blit
128318933 雁行 [Raster:Bugfix] bugfix for Raster merge onResize.
03caacbea xiaying [MNN:Bugfix] fix bug for CPUDeconvolution and Convolution1x1Strassen for iw != ow
e1e3c245c xiaying [MNN:Bugfix] Fix bug for ConvolutionWinograd
2524cbc6d xiaying [MNN:Bugfix] Fix bug for CPUSoftmax
44ec79b8f xiaying [MNN:Bugfix] Fix bug for CPUConvolutionDepthwise / Scale / DeconvolutionDW
21ae956ce xiaying [MNN:Bugfix] Fix bug for Multi-Batch-TiledExecutor
09a5069c7 xiaying [MNN:Speed] Add offset for src and dst
6776c6784 xiaying [MNN:Bugfix] Fix bug for trainable model
cc83ae30b xiaying [MNN:Bugfix] Fix bug for trainable model
2021-07-29 11:46:59 +08:00
MNN_ASSERT ( weight . size ( ) > = channel [ 1 ] * ( channel [ 0 ] / group ) * kernelSize [ 0 ] * kernelSize [ 1 ] ) ;
2021-01-06 16:29:37 +08:00
conv2D - > symmetricQuan . reset ( new QuantizedFloatParamT ) ;
if ( bias . size ( ) = = 0 ) {
bias . resize ( channel [ 1 ] ) ;
std : : fill ( bias . begin ( ) , bias . end ( ) , 0 ) ;
}
conv2D - > symmetricQuan - > bias = std : : move ( bias ) ;
conv2D - > symmetricQuan - > scale = std : : move ( scale ) ;
conv2D - > symmetricQuan - > zeroPoint = std : : move ( inputZeroPoint ) ;
conv2D - > symmetricQuan - > outputZeroPoint = std : : move ( outputZeroPoint ) ;
MNN_ASSERT ( maxValue > minValue ) ;
conv2D - > symmetricQuan - > clampMin = minValue ;
conv2D - > symmetricQuan - > clampMax = maxValue ;
conv2D - > symmetricQuan - > weight = std : : move ( weight ) ;
if ( accumulateToInt16 ) {
conv2D - > symmetricQuan - > method = MNN : : QuantizeAlgo : : QuantizeAlgo_OVERFLOW_AWARE ;
}
2021-04-08 14:24:07 +08:00
return ( Variable : : create ( Expr : : create ( convOp . get ( ) , { x } ) ) ) ;
}
VARP _Conv ( std : : vector < int8_t > & & weight , std : : vector < float > & & bias , std : : vector < float > & & weightScale ,
VARP x , INTS channel , INTS kernelSize ,
PaddingMode pad , INTS stride , INTS dilate , int group , INTS pads , bool relu ,
float scaleIn , float scaleOut ,
int8_t inputZeroPoint , int8_t outputZeroPoint ,
int8_t minValue , int8_t maxValue , float weightClampValue , bool accumulateToInt16 ) {
std : : unique_ptr < OpT > convOp ( new OpT ) ;
convOp - > type = OpType_ConvInt8 ;
if ( channel [ 0 ] = = channel [ 1 ] & & channel [ 0 ] = = group ) {
convOp - > type = OpType_DepthwiseConvInt8 ;
}
convOp - > main . type = OpParameter_Convolution2D ;
convOp - > main . value = new Convolution2DT ;
auto conv2D = convOp - > main . AsConvolution2D ( ) ;
conv2D - > common . reset ( new Convolution2DCommonT ) ;
conv2D - > common - > padMode = _convertPadMode ( pad ) ;
2021-06-11 17:17:13 +08:00
if ( pads . size ( ) = = 2 ) {
conv2D - > common - > padX = pads [ 0 ] ;
conv2D - > common - > padY = pads [ 1 ] ;
} else {
conv2D - > common - > pads = std : : move ( pads ) ;
}
2021-04-08 14:24:07 +08:00
conv2D - > common - > strideX = stride [ 0 ] ;
conv2D - > common - > strideY = stride [ 1 ] ;
conv2D - > common - > group = group ;
conv2D - > common - > outputCount = channel [ 1 ] ;
conv2D - > common - > inputCount = channel [ 0 ] ;
conv2D - > common - > dilateX = dilate [ 0 ] ;
conv2D - > common - > dilateY = dilate [ 1 ] ;
conv2D - > common - > kernelX = kernelSize [ 0 ] ;
conv2D - > common - > kernelY = kernelSize [ 1 ] ;
conv2D - > common - > relu = relu ;
MNN_ASSERT ( weight . size ( ) = = channel [ 1 ] * ( channel [ 0 ] / group ) * kernelSize [ 0 ] * kernelSize [ 1 ] ) ;
conv2D - > symmetricQuan . reset ( new QuantizedFloatParamT ) ;
if ( bias . size ( ) = = 0 ) {
bias . resize ( channel [ 1 ] ) ;
std : : fill ( bias . begin ( ) , bias . end ( ) , 0 ) ;
}
conv2D - > bias = bias ;
2021-04-08 20:15:32 +08:00
conv2D - > symmetricQuan - > weight = std : : move ( weight ) ;
2021-04-08 14:24:07 +08:00
conv2D - > symmetricQuan - > zeroPoint = std : : move ( inputZeroPoint ) ;
conv2D - > symmetricQuan - > outputZeroPoint = std : : move ( outputZeroPoint ) ;
MNN_ASSERT ( maxValue > minValue ) ;
conv2D - > symmetricQuan - > clampMin = minValue ;
conv2D - > symmetricQuan - > clampMax = maxValue ;
2021-04-28 18:02:10 +08:00
conv2D - > symmetricQuan - > nbits = int ( std : : log ( weightClampValue * 2 + 2 ) / std : : log ( 2.0f ) ) ;
2021-04-08 14:24:07 +08:00
2021-04-08 20:15:32 +08:00
// const int kn = conv2D->common->outputCount;
// const int ks = weight.size() / kn;
// std::vector<float> scales(kn, 1.0f);
// std::vector<float> weightFloat;
// for (int i = 0; i < weight.size(); i++) {
// weightFloat.emplace_back(weight[i] * weightScale[i / ks]);
// }
// conv2D->quanParameter = IDSTEncoder::encode(weightFloat, weightScale, ks, kn, false, weight.data(), -int(weightClampValue));
2021-04-08 14:24:07 +08:00
2021-04-08 20:15:32 +08:00
conv2D - > quanParameter . reset ( new IDSTQuanT ) ;
conv2D - > quanParameter - > alpha = std : : move ( weightScale ) ;
2021-04-08 14:24:07 +08:00
conv2D - > quanParameter - > scaleIn = scaleIn ;
conv2D - > quanParameter - > scaleOut = scaleOut ;
2021-04-08 20:15:32 +08:00
conv2D - > quanParameter - > aMin = - int ( weightClampValue ) ;
2021-04-08 14:24:07 +08:00
if ( accumulateToInt16 ) {
conv2D - > symmetricQuan - > method = MNN : : QuantizeAlgo : : QuantizeAlgo_OVERFLOW_AWARE ;
}
2021-01-06 16:29:37 +08:00
return ( Variable : : create ( Expr : : create ( convOp . get ( ) , { x } ) ) ) ;
}
2020-11-05 16:41:56 +08:00
VARP _CosineSimilarity ( VARP input0 , VARP input1 , VARP inputDim ) {
std : : unique_ptr < MNN : : OpT > cosineSimilarityOp ( new MNN : : OpT ) ;
cosineSimilarityOp - > type = MNN : : OpType_CosineSimilarity ;
return ( Variable : : create ( Expr : : create ( std : : move ( cosineSimilarityOp ) , { input0 , input1 , inputDim } ) ) ) ;
}
2021-04-08 15:34:23 +08:00
VARP _GridSample ( VARP input , VARP grid , InterpolationMethod mode , GridSamplePaddingMode paddingMode , bool alignCorners ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_GridSample ;
op - > main . type = OpParameter_GridSample ;
op - > main . value = new GridSampleT ;
switch ( mode ) {
case NEAREST :
op - > main . AsGridSample ( ) - > mode = SampleMode_NEAREST ;
break ;
case BILINEAR :
default :
op - > main . AsGridSample ( ) - > mode = SampleMode_BILINEAR ;
break ;
}
switch ( paddingMode ) {
case GRID_SAMPLE_PADDING_BORDER :
op - > main . AsGridSample ( ) - > paddingMode = BorderMode_CLAMP ;
break ;
case GRID_SAMPLE_PADDING_REFLECTION :
op - > main . AsGridSample ( ) - > paddingMode = BorderMode_REFLECTION ;
break ;
case GRID_SAMPLE_PADDING_ZEROS :
default :
op - > main . AsGridSample ( ) - > paddingMode = BorderMode_ZEROS ;
break ;
}
op - > main . AsGridSample ( ) - > alignCorners = alignCorners ;
return ( Variable : : create ( Expr : : create ( std : : move ( op ) , { input , grid } ) ) ) ;
}
2020-02-26 09:57:17 +08:00
VARP _FloatToInt8 ( VARP x , VARP scale , char minValue /*For future*/ , char maxValue /*For future*/ ) {
auto xInfo = x - > getInfo ( ) ;
auto scaleInfo = scale - > getInfo ( ) ;
auto scalePtr = scale - > readMap < float > ( ) ;
if ( nullptr = = scalePtr | | nullptr = = xInfo | | nullptr = = scaleInfo ) {
MNN_ERROR ( " Error for FloatToInt8 because var not ready \n " ) ;
return nullptr ;
}
2021-04-28 18:02:10 +08:00
if ( xInfo - > order ! = NC4HW4 | | xInfo - > type . code ! = halide_type_float ) {
2020-02-26 09:57:17 +08:00
MNN_ERROR ( " Not Support Input for FloatToInt8 because var not NC4HW4 or not float \n " ) ;
return nullptr ;
}
2021-04-08 15:34:23 +08:00
if ( ( scaleInfo - > size ! = xInfo - > dim [ 1 ] ) & & ( scaleInfo - > size ! = 1 ) ) {
2020-02-26 09:57:17 +08:00
MNN_ERROR ( " Scale's size not match input's channel: %d - %d \n " , scaleInfo - > size , xInfo - > dim [ 1 ] ) ;
return nullptr ;
}
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_FloatToInt8 ;
op - > main . type = OpParameter_QuantizedFloatParam ;
op - > main . value = new QuantizedFloatParamT ;
op - > main . AsQuantizedFloatParam ( ) - > tensorScale . resize ( scaleInfo - > size ) ;
: : memcpy ( op - > main . AsQuantizedFloatParam ( ) - > tensorScale . data ( ) , scalePtr , scaleInfo - > size * sizeof ( float ) ) ;
return Variable : : create ( Expr : : create ( op . get ( ) , { x } ) ) ;
}
2021-01-06 16:29:37 +08:00
VARP _FloatToInt8 ( VARP x , VARP scale , int8_t minValue , int8_t maxValue , int8_t zeroPoint ) {
auto xInfo = x - > getInfo ( ) ;
auto scaleInfo = scale - > getInfo ( ) ;
auto scalePtr = scale - > readMap < float > ( ) ;
if ( nullptr = = scalePtr | | nullptr = = xInfo | | nullptr = = scaleInfo ) {
MNN_ERROR ( " Error for FloatToInt8 because var not ready \n " ) ;
return nullptr ;
}
2021-04-28 18:02:10 +08:00
if ( xInfo - > order ! = NC4HW4 | | xInfo - > type . code ! = halide_type_float ) {
2021-01-06 16:29:37 +08:00
MNN_ERROR ( " Not Support Input for FloatToInt8 because var not NC4HW4 or not float \n " ) ;
return nullptr ;
}
2021-04-08 15:34:23 +08:00
if ( ( scaleInfo - > size ! = xInfo - > dim [ 1 ] ) & & ( scaleInfo - > size ! = 1 ) ) {
2021-01-06 16:29:37 +08:00
MNN_ERROR ( " Scale's size not match input's channel: %d - %d \n " , scaleInfo - > size , xInfo - > dim [ 1 ] ) ;
return nullptr ;
}
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_FloatToInt8 ;
op - > main . type = OpParameter_QuantizedFloatParam ;
op - > main . value = new QuantizedFloatParamT ;
op - > main . AsQuantizedFloatParam ( ) - > tensorScale . resize ( scaleInfo - > size ) ;
: : memcpy ( op - > main . AsQuantizedFloatParam ( ) - > tensorScale . data ( ) , scalePtr , scaleInfo - > size * sizeof ( float ) ) ;
op - > main . AsQuantizedFloatParam ( ) - > zeroPoint = zeroPoint ;
MNN_ASSERT ( maxValue > minValue ) ;
op - > main . AsQuantizedFloatParam ( ) - > clampMin = int8_t ( minValue ) ;
op - > main . AsQuantizedFloatParam ( ) - > clampMax = int8_t ( maxValue ) ;
return Variable : : create ( Expr : : create ( op . get ( ) , { x } ) ) ;
}
2020-02-26 09:57:17 +08:00
VARP _Int8ToFloat ( VARP x , VARP scale ) {
auto xInfo = x - > getInfo ( ) ;
auto scaleInfo = scale - > getInfo ( ) ;
auto scalePtr = scale - > readMap < float > ( ) ;
if ( nullptr = = scalePtr | | nullptr = = xInfo | | nullptr = = scaleInfo ) {
MNN_ERROR ( " Error for _Int8ToFloat because var not ready \n " ) ;
return nullptr ;
}
if ( xInfo - > order ! = NC4HW4 | | xInfo - > type . code ! = halide_type_int ) {
MNN_ERROR ( " Not Support Input for _Int8ToFloat because var not NC4HW4 or not int8 \n " ) ;
return nullptr ;
}
2021-04-08 15:34:23 +08:00
if ( ( scaleInfo - > size ! = xInfo - > dim [ 1 ] ) & & ( scaleInfo - > size ! = 1 ) ) {
2020-02-26 09:57:17 +08:00
MNN_ERROR ( " _Int8ToFloat Scale's size not match input's channel \n " ) ;
return nullptr ;
}
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_Int8ToFloat ;
op - > main . type = OpParameter_QuantizedFloatParam ;
op - > main . value = new QuantizedFloatParamT ;
op - > main . AsQuantizedFloatParam ( ) - > tensorScale . resize ( scaleInfo - > size ) ;
: : memcpy ( op - > main . AsQuantizedFloatParam ( ) - > tensorScale . data ( ) , scalePtr , scaleInfo - > size * sizeof ( float ) ) ;
return Variable : : create ( Expr : : create ( op . get ( ) , { x } ) ) ;
}
2021-01-06 16:29:37 +08:00
VARP _Int8ToFloat ( VARP x , VARP scale , int8_t zeroPoint ) {
auto xInfo = x - > getInfo ( ) ;
auto scaleInfo = scale - > getInfo ( ) ;
auto scalePtr = scale - > readMap < float > ( ) ;
if ( nullptr = = scalePtr | | nullptr = = xInfo | | nullptr = = scaleInfo ) {
MNN_ERROR ( " Error for _Int8ToFloat because var not ready \n " ) ;
return nullptr ;
}
if ( xInfo - > order ! = NC4HW4 | | xInfo - > type . code ! = halide_type_int ) {
MNN_ERROR ( " Not Support Input for _Int8ToFloat because var not NC4HW4 or not int8 \n " ) ;
return nullptr ;
}
2021-04-08 15:34:23 +08:00
if ( ( scaleInfo - > size ! = xInfo - > dim [ 1 ] ) & & ( scaleInfo - > size ! = 1 ) ) {
2021-01-06 16:29:37 +08:00
MNN_ERROR ( " _Int8ToFloat Scale's size not match input's channel \n " ) ;
return nullptr ;
}
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_Int8ToFloat ;
op - > main . type = OpParameter_QuantizedFloatParam ;
op - > main . value = new QuantizedFloatParamT ;
op - > main . AsQuantizedFloatParam ( ) - > tensorScale . resize ( scaleInfo - > size ) ;
: : memcpy ( op - > main . AsQuantizedFloatParam ( ) - > tensorScale . data ( ) , scalePtr , scaleInfo - > size * sizeof ( float ) ) ;
op - > main . AsQuantizedFloatParam ( ) - > zeroPoint = zeroPoint ;
return Variable : : create ( Expr : : create ( op . get ( ) , { x } ) ) ;
}
2020-05-15 20:39:07 +08:00
VARP _Select ( VARP select , VARP input0 , VARP input1 ) {
std : : unique_ptr < MNN : : OpT > selectOp ( new MNN : : OpT ) ;
selectOp - > type = MNN : : OpType_Select ;
return ( Variable : : create ( Expr : : create ( std : : move ( selectOp ) , { select , input0 , input1 } ) ) ) ;
}
2021-04-08 15:34:23 +08:00
std : : vector < VARP > _TopKV2 ( VARP input0 , VARP input1 ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_TopKV2 ;
auto expr = Expr : : create ( op . get ( ) , { input0 , input1 } , 2 ) ;
std : : vector < VARP > res ( 2 ) ;
res [ 0 ] = Variable : : create ( expr , 0 ) ;
res [ 1 ] = Variable : : create ( expr , 1 ) ;
return res ;
}
2021-11-30 10:10:53 +08:00
VARP _ImageProcess ( VARP input , CV : : ImageProcess : : Config config , CV : : Matrix matrix , int oh , int ow , int oc , int dtype , uint8_t padVal ) {
std : : unique_ptr < MNN : : OpT > op ( new MNN : : OpT ) ;
op - > type = MNN : : OpType_ImageProcess ;
op - > main . type = OpParameter_ImageProcessParam ;
auto process = new ImageProcessParamT ;
op - > main . value = process ;
process - > destFormat = ( MNN : : ImageFormatType ) config . destFormat ;
process - > sourceFormat = ( MNN : : ImageFormatType ) config . sourceFormat ;
process - > filterType = ( MNN : : FilterType ) config . filterType ;
process - > wrap = ( MNN : : WrapType ) config . wrap ;
process - > shape = { 1 , oc , oh , ow } ;
process - > outputType = ( DataType ) dtype ;
process - > paddingValue = padVal ;
process - > mean . resize ( 4 ) ;
process - > normal . resize ( 4 ) ;
process - > transform . resize ( 9 ) ;
for ( int i = 0 ; i < 4 ; i + + ) {
process - > mean [ i ] = config . mean [ i ] ;
process - > normal [ i ] = config . normal [ i ] ;
}
for ( int i = 0 ; i < 9 ; i + + ) {
process - > transform [ i ] = matrix . get ( i ) ;
}
return ( Variable : : create ( Expr : : create ( std : : move ( op ) , { input } ) ) ) ;
}
VARP _Where ( VARP x ) {
std : : unique_ptr < MNN : : OpT > op ( new MNN : : OpT ) ;
op - > type = MNN : : OpType_Where ;
op - > main . type = OpParameter_Extra ;
op - > main . value = new ExtraT ;
return ( Variable : : create ( Expr : : create ( std : : move ( op ) , { x } ) ) ) ;
}
2021-04-08 15:34:23 +08:00
2022-02-18 11:30:27 +08:00
VARP _Sort ( VARP x , int axis , bool arg , bool descend ) {
std : : unique_ptr < OpT > op ( new OpT ) ;
op - > type = OpType_TopKV2 ;
op - > main . type = OpParameter_TopKV2 ;
auto topk = new TopKV2T ;
topk - > largest = descend ;
op - > main . value = topk ;
auto shape = x - > getInfo ( ) - > dim ;
axis = axis < 0 ? shape . size ( ) + axis : axis ;
int k = x - > getInfo ( ) - > dim [ axis ] ;
std : : vector < VARP > inputs { x , _Scalar ( k ) } ;
if ( axis + 1 ! = shape . size ( ) ) {
inputs . push_back ( _Scalar ( axis ) ) ;
}
auto expr = Expr : : create ( op . get ( ) , inputs , 2 ) ;
return Variable : : create ( expr , arg ) ;
}
VARP _Raster ( const std : : vector < VARP > & vars , const std : : vector < int > & region , const std : : vector < int > & shape ) {
std : : unique_ptr < MNN : : OpT > op ( new MNN : : OpT ) ;
op - > type = OpType_Raster ;
auto extra = new ExtraT ;
// set shape
std : : unique_ptr < AttributeT > shapeAttr ( new AttributeT ) ;
shapeAttr - > key = " shape " ;
shapeAttr - > list . reset ( new ListValueT ) ;
shapeAttr - > list - > i = shape ;
extra - > attr . push_back ( std : : move ( shapeAttr ) ) ;
// set region
std : : unique_ptr < AttributeT > regionAttr ( new AttributeT ) ;
regionAttr - > key = " region " ;
regionAttr - > list . reset ( new ListValueT ) ;
regionAttr - > list - > i = region ;
extra - > attr . push_back ( std : : move ( regionAttr ) ) ;
op - > main . type = OpParameter_Extra ;
op - > main . value = extra ;
return ( Variable : : create ( Expr : : create ( std : : move ( op ) , vars ) ) ) ;
}
VARP _Nms ( VARP boxes , VARP scores , int maxDetections , float iouThreshold , float scoreThreshold ) {
std : : unique_ptr < MNN : : OpT > op ( new MNN : : OpT ) ;
op - > type = OpType_NonMaxSuppressionV2 ;
std : : vector < VARP > vars { boxes , scores , _Scalar ( maxDetections ) } ;
if ( iouThreshold > = 0 ) {
vars . push_back ( _Scalar ( iouThreshold ) ) ;
}
if ( scoreThreshold > = 0 ) {
vars . push_back ( _Scalar ( scoreThreshold ) ) ;
}
return ( Variable : : create ( Expr : : create ( std : : move ( op ) , vars ) ) ) ;
}
2019-12-27 22:16:57 +08:00
} // namespace Express
} // namespace MNN