MNN/schema/current/TFQuantizeOp_generated.h

2980 lines
109 KiB
C
Raw Normal View History

2019-12-27 22:16:57 +08:00
// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_TFQUANTIZEOP_MNN_H_
#define FLATBUFFERS_GENERATED_TFQUANTIZEOP_MNN_H_
#include "flatbuffers/flatbuffers.h"
#include "CaffeOp_generated.h"
#include "Tensor_generated.h"
#include "Type_generated.h"
namespace MNN {
struct QuantizedParam;
struct QuantizedParamT;
struct QuantizedAdd;
struct QuantizedAddT;
struct Dequantize;
struct DequantizeT;
struct QuantizedAvgPool;
struct QuantizedAvgPoolT;
struct QuantizedBiasAdd;
struct QuantizedBiasAddT;
struct QuantizedConcat;
struct QuantizedConcatT;
struct QuantizedLogistic;
struct QuantizedLogisticT;
struct QuantizedMatMul;
struct QuantizedMatMulT;
struct QuantizedMaxPool;
struct QuantizedMaxPoolT;
struct QuantizedRelu;
struct QuantizedReluT;
struct QuantizedRelu6;
struct QuantizedRelu6T;
struct QuantizedReshape;
struct QuantizedReshapeT;
struct QuantizedSoftmax;
struct QuantizedSoftmaxT;
struct QuantizeV2;
struct QuantizeV2T;
struct RequantizationRange;
struct RequantizationRangeT;
struct Requantize;
struct RequantizeT;
struct TfQuantizedConv2D;
struct TfQuantizedConv2DT;
inline const flatbuffers::TypeTable *QuantizedParamTypeTable();
inline const flatbuffers::TypeTable *QuantizedAddTypeTable();
inline const flatbuffers::TypeTable *DequantizeTypeTable();
inline const flatbuffers::TypeTable *QuantizedAvgPoolTypeTable();
inline const flatbuffers::TypeTable *QuantizedBiasAddTypeTable();
inline const flatbuffers::TypeTable *QuantizedConcatTypeTable();
inline const flatbuffers::TypeTable *QuantizedLogisticTypeTable();
inline const flatbuffers::TypeTable *QuantizedMatMulTypeTable();
inline const flatbuffers::TypeTable *QuantizedMaxPoolTypeTable();
inline const flatbuffers::TypeTable *QuantizedReluTypeTable();
inline const flatbuffers::TypeTable *QuantizedRelu6TypeTable();
inline const flatbuffers::TypeTable *QuantizedReshapeTypeTable();
inline const flatbuffers::TypeTable *QuantizedSoftmaxTypeTable();
inline const flatbuffers::TypeTable *QuantizeV2TypeTable();
inline const flatbuffers::TypeTable *RequantizationRangeTypeTable();
inline const flatbuffers::TypeTable *RequantizeTypeTable();
inline const flatbuffers::TypeTable *TfQuantizedConv2DTypeTable();
enum FusedActivation {
FusedActivation_kTfLiteActNone = 0,
FusedActivation_kTfLiteActRelu = 1,
FusedActivation_kTfLiteActRelu1 = 2,
FusedActivation_kTfLiteActRelu6 = 3,
FusedActivation_kTfLiteActTanh = 4,
FusedActivation_kTfLiteActSignBit = 5,
FusedActivation_kTfLiteActSigmoid = 6,
FusedActivation_MIN = FusedActivation_kTfLiteActNone,
FusedActivation_MAX = FusedActivation_kTfLiteActSigmoid
};
inline const FusedActivation (&EnumValuesFusedActivation())[7] {
static const FusedActivation values[] = {
FusedActivation_kTfLiteActNone,
FusedActivation_kTfLiteActRelu,
FusedActivation_kTfLiteActRelu1,
FusedActivation_kTfLiteActRelu6,
FusedActivation_kTfLiteActTanh,
FusedActivation_kTfLiteActSignBit,
FusedActivation_kTfLiteActSigmoid
};
return values;
}
inline const char * const *EnumNamesFusedActivation() {
static const char * const names[] = {
"kTfLiteActNone",
"kTfLiteActRelu",
"kTfLiteActRelu1",
"kTfLiteActRelu6",
"kTfLiteActTanh",
"kTfLiteActSignBit",
"kTfLiteActSigmoid",
nullptr
};
return names;
}
inline const char *EnumNameFusedActivation(FusedActivation e) {
if (e < FusedActivation_kTfLiteActNone || e > FusedActivation_kTfLiteActSigmoid) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesFusedActivation()[index];
}
enum ModeFormat {
ModeFormat_TENSORFLOW = 0,
ModeFormat_TFLITE = 1,
ModeFormat_MIN = ModeFormat_TENSORFLOW,
ModeFormat_MAX = ModeFormat_TFLITE
};
inline const ModeFormat (&EnumValuesModeFormat())[2] {
static const ModeFormat values[] = {
ModeFormat_TENSORFLOW,
ModeFormat_TFLITE
};
return values;
}
inline const char * const *EnumNamesModeFormat() {
static const char * const names[] = {
"TENSORFLOW",
"TFLITE",
nullptr
};
return names;
}
inline const char *EnumNameModeFormat(ModeFormat e) {
if (e < ModeFormat_TENSORFLOW || e > ModeFormat_TFLITE) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesModeFormat()[index];
}
enum QuantizeMode {
QuantizeMode_MIN_COMBINED = 0,
QuantizeMode_MIN_FIRST = 1,
QuantizeMode_SCALED = 2,
QuantizeMode_MIN = QuantizeMode_MIN_COMBINED,
QuantizeMode_MAX = QuantizeMode_SCALED
};
inline const QuantizeMode (&EnumValuesQuantizeMode())[3] {
static const QuantizeMode values[] = {
QuantizeMode_MIN_COMBINED,
QuantizeMode_MIN_FIRST,
QuantizeMode_SCALED
};
return values;
}
inline const char * const *EnumNamesQuantizeMode() {
static const char * const names[] = {
"MIN_COMBINED",
"MIN_FIRST",
"SCALED",
nullptr
};
return names;
}
inline const char *EnumNameQuantizeMode(QuantizeMode e) {
if (e < QuantizeMode_MIN_COMBINED || e > QuantizeMode_SCALED) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesQuantizeMode()[index];
}
enum QuantizeRoundMode {
QuantizeRoundMode_HALF_AWAY_FROM_ZERO = 0,
QuantizeRoundMode_HALF_TO_EVEN = 1,
QuantizeRoundMode_MIN = QuantizeRoundMode_HALF_AWAY_FROM_ZERO,
QuantizeRoundMode_MAX = QuantizeRoundMode_HALF_TO_EVEN
};
inline const QuantizeRoundMode (&EnumValuesQuantizeRoundMode())[2] {
static const QuantizeRoundMode values[] = {
QuantizeRoundMode_HALF_AWAY_FROM_ZERO,
QuantizeRoundMode_HALF_TO_EVEN
};
return values;
}
inline const char * const *EnumNamesQuantizeRoundMode() {
static const char * const names[] = {
"HALF_AWAY_FROM_ZERO",
"HALF_TO_EVEN",
nullptr
};
return names;
}
inline const char *EnumNameQuantizeRoundMode(QuantizeRoundMode e) {
if (e < QuantizeRoundMode_HALF_AWAY_FROM_ZERO || e > QuantizeRoundMode_HALF_TO_EVEN) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesQuantizeRoundMode()[index];
}
struct QuantizedParamT : public flatbuffers::NativeTable {
typedef QuantizedParam TableType;
int32_t zeroPoint;
float scale;
QuantizedParamT()
: zeroPoint(0),
scale(0.0f) {
}
};
struct QuantizedParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizedParamT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizedParamTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ZEROPOINT = 4,
VT_SCALE = 6
};
int32_t zeroPoint() const {
return GetField<int32_t>(VT_ZEROPOINT, 0);
}
float scale() const {
return GetField<float>(VT_SCALE, 0.0f);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_ZEROPOINT) &&
VerifyField<float>(verifier, VT_SCALE) &&
verifier.EndTable();
}
QuantizedParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizedParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizedParam> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizedParamBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_zeroPoint(int32_t zeroPoint) {
fbb_.AddElement<int32_t>(QuantizedParam::VT_ZEROPOINT, zeroPoint, 0);
}
void add_scale(float scale) {
fbb_.AddElement<float>(QuantizedParam::VT_SCALE, scale, 0.0f);
}
explicit QuantizedParamBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizedParamBuilder &operator=(const QuantizedParamBuilder &);
flatbuffers::Offset<QuantizedParam> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizedParam>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizedParam> CreateQuantizedParam(
flatbuffers::FlatBufferBuilder &_fbb,
int32_t zeroPoint = 0,
float scale = 0.0f) {
QuantizedParamBuilder builder_(_fbb);
builder_.add_scale(scale);
builder_.add_zeroPoint(zeroPoint);
return builder_.Finish();
}
flatbuffers::Offset<QuantizedParam> CreateQuantizedParam(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct QuantizedAddT : public flatbuffers::NativeTable {
typedef QuantizedAdd TableType;
FusedActivation activationType;
std::unique_ptr<QuantizedParamT> input1QuantizedParam;
std::unique_ptr<QuantizedParamT> input2QuantizedParam;
std::unique_ptr<QuantizedParamT> outputQuantizedParam;
QuantizedAddT()
: activationType(FusedActivation_kTfLiteActNone) {
}
};
struct QuantizedAdd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizedAddT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizedAddTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ACTIVATIONTYPE = 4,
VT_INPUT1QUANTIZEDPARAM = 6,
VT_INPUT2QUANTIZEDPARAM = 8,
VT_OUTPUTQUANTIZEDPARAM = 10
};
FusedActivation activationType() const {
return static_cast<FusedActivation>(GetField<int8_t>(VT_ACTIVATIONTYPE, 0));
}
const QuantizedParam *input1QuantizedParam() const {
return GetPointer<const QuantizedParam *>(VT_INPUT1QUANTIZEDPARAM);
}
const QuantizedParam *input2QuantizedParam() const {
return GetPointer<const QuantizedParam *>(VT_INPUT2QUANTIZEDPARAM);
}
const QuantizedParam *outputQuantizedParam() const {
return GetPointer<const QuantizedParam *>(VT_OUTPUTQUANTIZEDPARAM);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int8_t>(verifier, VT_ACTIVATIONTYPE) &&
VerifyOffset(verifier, VT_INPUT1QUANTIZEDPARAM) &&
verifier.VerifyTable(input1QuantizedParam()) &&
VerifyOffset(verifier, VT_INPUT2QUANTIZEDPARAM) &&
verifier.VerifyTable(input2QuantizedParam()) &&
VerifyOffset(verifier, VT_OUTPUTQUANTIZEDPARAM) &&
verifier.VerifyTable(outputQuantizedParam()) &&
verifier.EndTable();
}
QuantizedAddT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizedAddT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizedAdd> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAddT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizedAddBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_activationType(FusedActivation activationType) {
fbb_.AddElement<int8_t>(QuantizedAdd::VT_ACTIVATIONTYPE, static_cast<int8_t>(activationType), 0);
}
void add_input1QuantizedParam(flatbuffers::Offset<QuantizedParam> input1QuantizedParam) {
fbb_.AddOffset(QuantizedAdd::VT_INPUT1QUANTIZEDPARAM, input1QuantizedParam);
}
void add_input2QuantizedParam(flatbuffers::Offset<QuantizedParam> input2QuantizedParam) {
fbb_.AddOffset(QuantizedAdd::VT_INPUT2QUANTIZEDPARAM, input2QuantizedParam);
}
void add_outputQuantizedParam(flatbuffers::Offset<QuantizedParam> outputQuantizedParam) {
fbb_.AddOffset(QuantizedAdd::VT_OUTPUTQUANTIZEDPARAM, outputQuantizedParam);
}
explicit QuantizedAddBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizedAddBuilder &operator=(const QuantizedAddBuilder &);
flatbuffers::Offset<QuantizedAdd> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizedAdd>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizedAdd> CreateQuantizedAdd(
flatbuffers::FlatBufferBuilder &_fbb,
FusedActivation activationType = FusedActivation_kTfLiteActNone,
flatbuffers::Offset<QuantizedParam> input1QuantizedParam = 0,
flatbuffers::Offset<QuantizedParam> input2QuantizedParam = 0,
flatbuffers::Offset<QuantizedParam> outputQuantizedParam = 0) {
QuantizedAddBuilder builder_(_fbb);
builder_.add_outputQuantizedParam(outputQuantizedParam);
builder_.add_input2QuantizedParam(input2QuantizedParam);
builder_.add_input1QuantizedParam(input1QuantizedParam);
builder_.add_activationType(activationType);
return builder_.Finish();
}
flatbuffers::Offset<QuantizedAdd> CreateQuantizedAdd(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAddT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct DequantizeT : public flatbuffers::NativeTable {
typedef Dequantize TableType;
std::unique_ptr<QuantizedParamT> inputQuantizedParam;
QuantizeMode mode;
ModeFormat modelFormat;
DataType type;
DequantizeT()
: mode(QuantizeMode_MIN_COMBINED),
modelFormat(ModeFormat_TENSORFLOW),
type(DataType_DT_INVALID) {
}
};
struct Dequantize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef DequantizeT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return DequantizeTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_INPUTQUANTIZEDPARAM = 4,
VT_MODE = 6,
VT_MODELFORMAT = 8,
VT_TYPE = 10
};
const QuantizedParam *inputQuantizedParam() const {
return GetPointer<const QuantizedParam *>(VT_INPUTQUANTIZEDPARAM);
}
QuantizeMode mode() const {
return static_cast<QuantizeMode>(GetField<int8_t>(VT_MODE, 0));
}
ModeFormat modelFormat() const {
return static_cast<ModeFormat>(GetField<int8_t>(VT_MODELFORMAT, 0));
}
DataType type() const {
return static_cast<DataType>(GetField<int32_t>(VT_TYPE, 0));
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_INPUTQUANTIZEDPARAM) &&
verifier.VerifyTable(inputQuantizedParam()) &&
VerifyField<int8_t>(verifier, VT_MODE) &&
VerifyField<int8_t>(verifier, VT_MODELFORMAT) &&
VerifyField<int32_t>(verifier, VT_TYPE) &&
verifier.EndTable();
}
DequantizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(DequantizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<Dequantize> Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct DequantizeBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_inputQuantizedParam(flatbuffers::Offset<QuantizedParam> inputQuantizedParam) {
fbb_.AddOffset(Dequantize::VT_INPUTQUANTIZEDPARAM, inputQuantizedParam);
}
void add_mode(QuantizeMode mode) {
fbb_.AddElement<int8_t>(Dequantize::VT_MODE, static_cast<int8_t>(mode), 0);
}
void add_modelFormat(ModeFormat modelFormat) {
fbb_.AddElement<int8_t>(Dequantize::VT_MODELFORMAT, static_cast<int8_t>(modelFormat), 0);
}
void add_type(DataType type) {
fbb_.AddElement<int32_t>(Dequantize::VT_TYPE, static_cast<int32_t>(type), 0);
}
explicit DequantizeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
DequantizeBuilder &operator=(const DequantizeBuilder &);
flatbuffers::Offset<Dequantize> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Dequantize>(end);
return o;
}
};
inline flatbuffers::Offset<Dequantize> CreateDequantize(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<QuantizedParam> inputQuantizedParam = 0,
QuantizeMode mode = QuantizeMode_MIN_COMBINED,
ModeFormat modelFormat = ModeFormat_TENSORFLOW,
DataType type = DataType_DT_INVALID) {
DequantizeBuilder builder_(_fbb);
builder_.add_type(type);
builder_.add_inputQuantizedParam(inputQuantizedParam);
builder_.add_modelFormat(modelFormat);
builder_.add_mode(mode);
return builder_.Finish();
}
flatbuffers::Offset<Dequantize> CreateDequantize(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct QuantizedAvgPoolT : public flatbuffers::NativeTable {
typedef QuantizedAvgPool TableType;
int32_t kernelX;
int32_t kernelY;
ModeFormat modelFormat;
int32_t outputActivationMax;
int32_t outputActivationMin;
PoolPadType padType;
int32_t padX;
int32_t padY;
int32_t strideX;
int32_t strideY;
DataType type;
QuantizedAvgPoolT()
: kernelX(0),
kernelY(0),
modelFormat(ModeFormat_TENSORFLOW),
outputActivationMax(0),
outputActivationMin(0),
padType(PoolPadType_CAFFE),
padX(0),
padY(0),
strideX(0),
strideY(0),
type(DataType_DT_INVALID) {
}
};
struct QuantizedAvgPool FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizedAvgPoolT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizedAvgPoolTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_KERNELX = 4,
VT_KERNELY = 6,
VT_MODELFORMAT = 8,
VT_OUTPUTACTIVATIONMAX = 10,
VT_OUTPUTACTIVATIONMIN = 12,
VT_PADTYPE = 14,
VT_PADX = 16,
VT_PADY = 18,
VT_STRIDEX = 20,
VT_STRIDEY = 22,
VT_TYPE = 24
};
int32_t kernelX() const {
return GetField<int32_t>(VT_KERNELX, 0);
}
int32_t kernelY() const {
return GetField<int32_t>(VT_KERNELY, 0);
}
ModeFormat modelFormat() const {
return static_cast<ModeFormat>(GetField<int8_t>(VT_MODELFORMAT, 0));
}
int32_t outputActivationMax() const {
return GetField<int32_t>(VT_OUTPUTACTIVATIONMAX, 0);
}
int32_t outputActivationMin() const {
return GetField<int32_t>(VT_OUTPUTACTIVATIONMIN, 0);
}
PoolPadType padType() const {
return static_cast<PoolPadType>(GetField<int8_t>(VT_PADTYPE, 0));
}
int32_t padX() const {
return GetField<int32_t>(VT_PADX, 0);
}
int32_t padY() const {
return GetField<int32_t>(VT_PADY, 0);
}
int32_t strideX() const {
return GetField<int32_t>(VT_STRIDEX, 0);
}
int32_t strideY() const {
return GetField<int32_t>(VT_STRIDEY, 0);
}
DataType type() const {
return static_cast<DataType>(GetField<int32_t>(VT_TYPE, 0));
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_KERNELX) &&
VerifyField<int32_t>(verifier, VT_KERNELY) &&
VerifyField<int8_t>(verifier, VT_MODELFORMAT) &&
VerifyField<int32_t>(verifier, VT_OUTPUTACTIVATIONMAX) &&
VerifyField<int32_t>(verifier, VT_OUTPUTACTIVATIONMIN) &&
VerifyField<int8_t>(verifier, VT_PADTYPE) &&
VerifyField<int32_t>(verifier, VT_PADX) &&
VerifyField<int32_t>(verifier, VT_PADY) &&
VerifyField<int32_t>(verifier, VT_STRIDEX) &&
VerifyField<int32_t>(verifier, VT_STRIDEY) &&
VerifyField<int32_t>(verifier, VT_TYPE) &&
verifier.EndTable();
}
QuantizedAvgPoolT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizedAvgPoolT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizedAvgPool> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAvgPoolT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizedAvgPoolBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_kernelX(int32_t kernelX) {
fbb_.AddElement<int32_t>(QuantizedAvgPool::VT_KERNELX, kernelX, 0);
}
void add_kernelY(int32_t kernelY) {
fbb_.AddElement<int32_t>(QuantizedAvgPool::VT_KERNELY, kernelY, 0);
}
void add_modelFormat(ModeFormat modelFormat) {
fbb_.AddElement<int8_t>(QuantizedAvgPool::VT_MODELFORMAT, static_cast<int8_t>(modelFormat), 0);
}
void add_outputActivationMax(int32_t outputActivationMax) {
fbb_.AddElement<int32_t>(QuantizedAvgPool::VT_OUTPUTACTIVATIONMAX, outputActivationMax, 0);
}
void add_outputActivationMin(int32_t outputActivationMin) {
fbb_.AddElement<int32_t>(QuantizedAvgPool::VT_OUTPUTACTIVATIONMIN, outputActivationMin, 0);
}
void add_padType(PoolPadType padType) {
fbb_.AddElement<int8_t>(QuantizedAvgPool::VT_PADTYPE, static_cast<int8_t>(padType), 0);
}
void add_padX(int32_t padX) {
fbb_.AddElement<int32_t>(QuantizedAvgPool::VT_PADX, padX, 0);
}
void add_padY(int32_t padY) {
fbb_.AddElement<int32_t>(QuantizedAvgPool::VT_PADY, padY, 0);
}
void add_strideX(int32_t strideX) {
fbb_.AddElement<int32_t>(QuantizedAvgPool::VT_STRIDEX, strideX, 0);
}
void add_strideY(int32_t strideY) {
fbb_.AddElement<int32_t>(QuantizedAvgPool::VT_STRIDEY, strideY, 0);
}
void add_type(DataType type) {
fbb_.AddElement<int32_t>(QuantizedAvgPool::VT_TYPE, static_cast<int32_t>(type), 0);
}
explicit QuantizedAvgPoolBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizedAvgPoolBuilder &operator=(const QuantizedAvgPoolBuilder &);
flatbuffers::Offset<QuantizedAvgPool> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizedAvgPool>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizedAvgPool> CreateQuantizedAvgPool(
flatbuffers::FlatBufferBuilder &_fbb,
int32_t kernelX = 0,
int32_t kernelY = 0,
ModeFormat modelFormat = ModeFormat_TENSORFLOW,
int32_t outputActivationMax = 0,
int32_t outputActivationMin = 0,
PoolPadType padType = PoolPadType_CAFFE,
int32_t padX = 0,
int32_t padY = 0,
int32_t strideX = 0,
int32_t strideY = 0,
DataType type = DataType_DT_INVALID) {
QuantizedAvgPoolBuilder builder_(_fbb);
builder_.add_type(type);
builder_.add_strideY(strideY);
builder_.add_strideX(strideX);
builder_.add_padY(padY);
builder_.add_padX(padX);
builder_.add_outputActivationMin(outputActivationMin);
builder_.add_outputActivationMax(outputActivationMax);
builder_.add_kernelY(kernelY);
builder_.add_kernelX(kernelX);
builder_.add_padType(padType);
builder_.add_modelFormat(modelFormat);
return builder_.Finish();
}
flatbuffers::Offset<QuantizedAvgPool> CreateQuantizedAvgPool(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAvgPoolT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct QuantizedBiasAddT : public flatbuffers::NativeTable {
typedef QuantizedBiasAdd TableType;
std::vector<int32_t> bias;
DataType inputType;
int32_t max;
int32_t min;
DataType outputType;
QuantizedBiasAddT()
: inputType(DataType_DT_INVALID),
max(0),
min(0),
outputType(DataType_DT_INVALID) {
}
};
struct QuantizedBiasAdd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizedBiasAddT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizedBiasAddTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_BIAS = 4,
VT_INPUTTYPE = 6,
VT_MAX = 8,
VT_MIN = 10,
VT_OUTPUTTYPE = 12
};
const flatbuffers::Vector<int32_t> *bias() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_BIAS);
}
DataType inputType() const {
return static_cast<DataType>(GetField<int32_t>(VT_INPUTTYPE, 0));
}
int32_t max() const {
return GetField<int32_t>(VT_MAX, 0);
}
int32_t min() const {
return GetField<int32_t>(VT_MIN, 0);
}
DataType outputType() const {
return static_cast<DataType>(GetField<int32_t>(VT_OUTPUTTYPE, 0));
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_BIAS) &&
verifier.VerifyVector(bias()) &&
VerifyField<int32_t>(verifier, VT_INPUTTYPE) &&
VerifyField<int32_t>(verifier, VT_MAX) &&
VerifyField<int32_t>(verifier, VT_MIN) &&
VerifyField<int32_t>(verifier, VT_OUTPUTTYPE) &&
verifier.EndTable();
}
QuantizedBiasAddT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizedBiasAddT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizedBiasAdd> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedBiasAddT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizedBiasAddBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_bias(flatbuffers::Offset<flatbuffers::Vector<int32_t>> bias) {
fbb_.AddOffset(QuantizedBiasAdd::VT_BIAS, bias);
}
void add_inputType(DataType inputType) {
fbb_.AddElement<int32_t>(QuantizedBiasAdd::VT_INPUTTYPE, static_cast<int32_t>(inputType), 0);
}
void add_max(int32_t max) {
fbb_.AddElement<int32_t>(QuantizedBiasAdd::VT_MAX, max, 0);
}
void add_min(int32_t min) {
fbb_.AddElement<int32_t>(QuantizedBiasAdd::VT_MIN, min, 0);
}
void add_outputType(DataType outputType) {
fbb_.AddElement<int32_t>(QuantizedBiasAdd::VT_OUTPUTTYPE, static_cast<int32_t>(outputType), 0);
}
explicit QuantizedBiasAddBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizedBiasAddBuilder &operator=(const QuantizedBiasAddBuilder &);
flatbuffers::Offset<QuantizedBiasAdd> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizedBiasAdd>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizedBiasAdd> CreateQuantizedBiasAdd(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> bias = 0,
DataType inputType = DataType_DT_INVALID,
int32_t max = 0,
int32_t min = 0,
DataType outputType = DataType_DT_INVALID) {
QuantizedBiasAddBuilder builder_(_fbb);
builder_.add_outputType(outputType);
builder_.add_min(min);
builder_.add_max(max);
builder_.add_inputType(inputType);
builder_.add_bias(bias);
return builder_.Finish();
}
inline flatbuffers::Offset<QuantizedBiasAdd> CreateQuantizedBiasAddDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<int32_t> *bias = nullptr,
DataType inputType = DataType_DT_INVALID,
int32_t max = 0,
int32_t min = 0,
DataType outputType = DataType_DT_INVALID) {
auto bias__ = bias ? _fbb.CreateVector<int32_t>(*bias) : 0;
return MNN::CreateQuantizedBiasAdd(
_fbb,
bias__,
inputType,
max,
min,
outputType);
}
flatbuffers::Offset<QuantizedBiasAdd> CreateQuantizedBiasAdd(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedBiasAddT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct QuantizedConcatT : public flatbuffers::NativeTable {
typedef QuantizedConcat TableType;
FusedActivation activationType;
int32_t axis;
std::vector<float> inputScale;
std::vector<int32_t> inputZeroPoint;
std::unique_ptr<QuantizedParamT> outputQuantizedParam;
QuantizedConcatT()
: activationType(FusedActivation_kTfLiteActNone),
axis(0) {
}
};
struct QuantizedConcat FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizedConcatT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizedConcatTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ACTIVATIONTYPE = 4,
VT_AXIS = 6,
VT_INPUTSCALE = 8,
VT_INPUTZEROPOINT = 10,
VT_OUTPUTQUANTIZEDPARAM = 12
};
FusedActivation activationType() const {
return static_cast<FusedActivation>(GetField<int8_t>(VT_ACTIVATIONTYPE, 0));
}
int32_t axis() const {
return GetField<int32_t>(VT_AXIS, 0);
}
const flatbuffers::Vector<float> *inputScale() const {
return GetPointer<const flatbuffers::Vector<float> *>(VT_INPUTSCALE);
}
const flatbuffers::Vector<int32_t> *inputZeroPoint() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTZEROPOINT);
}
const QuantizedParam *outputQuantizedParam() const {
return GetPointer<const QuantizedParam *>(VT_OUTPUTQUANTIZEDPARAM);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int8_t>(verifier, VT_ACTIVATIONTYPE) &&
VerifyField<int32_t>(verifier, VT_AXIS) &&
VerifyOffset(verifier, VT_INPUTSCALE) &&
verifier.VerifyVector(inputScale()) &&
VerifyOffset(verifier, VT_INPUTZEROPOINT) &&
verifier.VerifyVector(inputZeroPoint()) &&
VerifyOffset(verifier, VT_OUTPUTQUANTIZEDPARAM) &&
verifier.VerifyTable(outputQuantizedParam()) &&
verifier.EndTable();
}
QuantizedConcatT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizedConcatT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizedConcat> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedConcatT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizedConcatBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_activationType(FusedActivation activationType) {
fbb_.AddElement<int8_t>(QuantizedConcat::VT_ACTIVATIONTYPE, static_cast<int8_t>(activationType), 0);
}
void add_axis(int32_t axis) {
fbb_.AddElement<int32_t>(QuantizedConcat::VT_AXIS, axis, 0);
}
void add_inputScale(flatbuffers::Offset<flatbuffers::Vector<float>> inputScale) {
fbb_.AddOffset(QuantizedConcat::VT_INPUTSCALE, inputScale);
}
void add_inputZeroPoint(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputZeroPoint) {
fbb_.AddOffset(QuantizedConcat::VT_INPUTZEROPOINT, inputZeroPoint);
}
void add_outputQuantizedParam(flatbuffers::Offset<QuantizedParam> outputQuantizedParam) {
fbb_.AddOffset(QuantizedConcat::VT_OUTPUTQUANTIZEDPARAM, outputQuantizedParam);
}
explicit QuantizedConcatBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizedConcatBuilder &operator=(const QuantizedConcatBuilder &);
flatbuffers::Offset<QuantizedConcat> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizedConcat>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizedConcat> CreateQuantizedConcat(
flatbuffers::FlatBufferBuilder &_fbb,
FusedActivation activationType = FusedActivation_kTfLiteActNone,
int32_t axis = 0,
flatbuffers::Offset<flatbuffers::Vector<float>> inputScale = 0,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputZeroPoint = 0,
flatbuffers::Offset<QuantizedParam> outputQuantizedParam = 0) {
QuantizedConcatBuilder builder_(_fbb);
builder_.add_outputQuantizedParam(outputQuantizedParam);
builder_.add_inputZeroPoint(inputZeroPoint);
builder_.add_inputScale(inputScale);
builder_.add_axis(axis);
builder_.add_activationType(activationType);
return builder_.Finish();
}
inline flatbuffers::Offset<QuantizedConcat> CreateQuantizedConcatDirect(
flatbuffers::FlatBufferBuilder &_fbb,
FusedActivation activationType = FusedActivation_kTfLiteActNone,
int32_t axis = 0,
const std::vector<float> *inputScale = nullptr,
const std::vector<int32_t> *inputZeroPoint = nullptr,
flatbuffers::Offset<QuantizedParam> outputQuantizedParam = 0) {
auto inputScale__ = inputScale ? _fbb.CreateVector<float>(*inputScale) : 0;
auto inputZeroPoint__ = inputZeroPoint ? _fbb.CreateVector<int32_t>(*inputZeroPoint) : 0;
return MNN::CreateQuantizedConcat(
_fbb,
activationType,
axis,
inputScale__,
inputZeroPoint__,
outputQuantizedParam);
}
flatbuffers::Offset<QuantizedConcat> CreateQuantizedConcat(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedConcatT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct QuantizedLogisticT : public flatbuffers::NativeTable {
typedef QuantizedLogistic TableType;
std::unique_ptr<QuantizedParamT> inputQuantizedParam;
std::unique_ptr<QuantizedParamT> outputQuantizedParam;
QuantizedLogisticT() {
}
};
struct QuantizedLogistic FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizedLogisticT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizedLogisticTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_INPUTQUANTIZEDPARAM = 4,
VT_OUTPUTQUANTIZEDPARAM = 6
};
const QuantizedParam *inputQuantizedParam() const {
return GetPointer<const QuantizedParam *>(VT_INPUTQUANTIZEDPARAM);
}
const QuantizedParam *outputQuantizedParam() const {
return GetPointer<const QuantizedParam *>(VT_OUTPUTQUANTIZEDPARAM);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_INPUTQUANTIZEDPARAM) &&
verifier.VerifyTable(inputQuantizedParam()) &&
VerifyOffset(verifier, VT_OUTPUTQUANTIZEDPARAM) &&
verifier.VerifyTable(outputQuantizedParam()) &&
verifier.EndTable();
}
QuantizedLogisticT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizedLogisticT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizedLogistic> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedLogisticT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizedLogisticBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_inputQuantizedParam(flatbuffers::Offset<QuantizedParam> inputQuantizedParam) {
fbb_.AddOffset(QuantizedLogistic::VT_INPUTQUANTIZEDPARAM, inputQuantizedParam);
}
void add_outputQuantizedParam(flatbuffers::Offset<QuantizedParam> outputQuantizedParam) {
fbb_.AddOffset(QuantizedLogistic::VT_OUTPUTQUANTIZEDPARAM, outputQuantizedParam);
}
explicit QuantizedLogisticBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizedLogisticBuilder &operator=(const QuantizedLogisticBuilder &);
flatbuffers::Offset<QuantizedLogistic> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizedLogistic>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizedLogistic> CreateQuantizedLogistic(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<QuantizedParam> inputQuantizedParam = 0,
flatbuffers::Offset<QuantizedParam> outputQuantizedParam = 0) {
QuantizedLogisticBuilder builder_(_fbb);
builder_.add_outputQuantizedParam(outputQuantizedParam);
builder_.add_inputQuantizedParam(inputQuantizedParam);
return builder_.Finish();
}
flatbuffers::Offset<QuantizedLogistic> CreateQuantizedLogistic(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedLogisticT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct QuantizedMatMulT : public flatbuffers::NativeTable {
typedef QuantizedMatMul TableType;
bool transposeA;
bool transposeB;
QuantizedMatMulT()
: transposeA(false),
transposeB(false) {
}
};
struct QuantizedMatMul FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizedMatMulT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizedMatMulTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_TRANSPOSEA = 4,
VT_TRANSPOSEB = 6
};
bool transposeA() const {
return GetField<uint8_t>(VT_TRANSPOSEA, 0) != 0;
}
bool transposeB() const {
return GetField<uint8_t>(VT_TRANSPOSEB, 0) != 0;
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_TRANSPOSEA) &&
VerifyField<uint8_t>(verifier, VT_TRANSPOSEB) &&
verifier.EndTable();
}
QuantizedMatMulT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizedMatMulT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizedMatMul> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMatMulT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizedMatMulBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_transposeA(bool transposeA) {
fbb_.AddElement<uint8_t>(QuantizedMatMul::VT_TRANSPOSEA, static_cast<uint8_t>(transposeA), 0);
}
void add_transposeB(bool transposeB) {
fbb_.AddElement<uint8_t>(QuantizedMatMul::VT_TRANSPOSEB, static_cast<uint8_t>(transposeB), 0);
}
explicit QuantizedMatMulBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizedMatMulBuilder &operator=(const QuantizedMatMulBuilder &);
flatbuffers::Offset<QuantizedMatMul> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizedMatMul>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizedMatMul> CreateQuantizedMatMul(
flatbuffers::FlatBufferBuilder &_fbb,
bool transposeA = false,
bool transposeB = false) {
QuantizedMatMulBuilder builder_(_fbb);
builder_.add_transposeB(transposeB);
builder_.add_transposeA(transposeA);
return builder_.Finish();
}
flatbuffers::Offset<QuantizedMatMul> CreateQuantizedMatMul(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMatMulT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct QuantizedMaxPoolT : public flatbuffers::NativeTable {
typedef QuantizedMaxPool TableType;
int32_t kernelX;
int32_t kernelY;
ModeFormat modelFormat;
int32_t outputActivationMax;
int32_t outputActivationMin;
PoolPadType padType;
int32_t padX;
int32_t padY;
int32_t strideX;
int32_t strideY;
DataType type;
QuantizedMaxPoolT()
: kernelX(0),
kernelY(0),
modelFormat(ModeFormat_TENSORFLOW),
outputActivationMax(0),
outputActivationMin(0),
padType(PoolPadType_CAFFE),
padX(0),
padY(0),
strideX(0),
strideY(0),
type(DataType_DT_INVALID) {
}
};
struct QuantizedMaxPool FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizedMaxPoolT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizedMaxPoolTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_KERNELX = 4,
VT_KERNELY = 6,
VT_MODELFORMAT = 8,
VT_OUTPUTACTIVATIONMAX = 10,
VT_OUTPUTACTIVATIONMIN = 12,
VT_PADTYPE = 14,
VT_PADX = 16,
VT_PADY = 18,
VT_STRIDEX = 20,
VT_STRIDEY = 22,
VT_TYPE = 24
};
int32_t kernelX() const {
return GetField<int32_t>(VT_KERNELX, 0);
}
int32_t kernelY() const {
return GetField<int32_t>(VT_KERNELY, 0);
}
ModeFormat modelFormat() const {
return static_cast<ModeFormat>(GetField<int8_t>(VT_MODELFORMAT, 0));
}
int32_t outputActivationMax() const {
return GetField<int32_t>(VT_OUTPUTACTIVATIONMAX, 0);
}
int32_t outputActivationMin() const {
return GetField<int32_t>(VT_OUTPUTACTIVATIONMIN, 0);
}
PoolPadType padType() const {
return static_cast<PoolPadType>(GetField<int8_t>(VT_PADTYPE, 0));
}
int32_t padX() const {
return GetField<int32_t>(VT_PADX, 0);
}
int32_t padY() const {
return GetField<int32_t>(VT_PADY, 0);
}
int32_t strideX() const {
return GetField<int32_t>(VT_STRIDEX, 0);
}
int32_t strideY() const {
return GetField<int32_t>(VT_STRIDEY, 0);
}
DataType type() const {
return static_cast<DataType>(GetField<int32_t>(VT_TYPE, 0));
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_KERNELX) &&
VerifyField<int32_t>(verifier, VT_KERNELY) &&
VerifyField<int8_t>(verifier, VT_MODELFORMAT) &&
VerifyField<int32_t>(verifier, VT_OUTPUTACTIVATIONMAX) &&
VerifyField<int32_t>(verifier, VT_OUTPUTACTIVATIONMIN) &&
VerifyField<int8_t>(verifier, VT_PADTYPE) &&
VerifyField<int32_t>(verifier, VT_PADX) &&
VerifyField<int32_t>(verifier, VT_PADY) &&
VerifyField<int32_t>(verifier, VT_STRIDEX) &&
VerifyField<int32_t>(verifier, VT_STRIDEY) &&
VerifyField<int32_t>(verifier, VT_TYPE) &&
verifier.EndTable();
}
QuantizedMaxPoolT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizedMaxPoolT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizedMaxPool> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMaxPoolT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizedMaxPoolBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_kernelX(int32_t kernelX) {
fbb_.AddElement<int32_t>(QuantizedMaxPool::VT_KERNELX, kernelX, 0);
}
void add_kernelY(int32_t kernelY) {
fbb_.AddElement<int32_t>(QuantizedMaxPool::VT_KERNELY, kernelY, 0);
}
void add_modelFormat(ModeFormat modelFormat) {
fbb_.AddElement<int8_t>(QuantizedMaxPool::VT_MODELFORMAT, static_cast<int8_t>(modelFormat), 0);
}
void add_outputActivationMax(int32_t outputActivationMax) {
fbb_.AddElement<int32_t>(QuantizedMaxPool::VT_OUTPUTACTIVATIONMAX, outputActivationMax, 0);
}
void add_outputActivationMin(int32_t outputActivationMin) {
fbb_.AddElement<int32_t>(QuantizedMaxPool::VT_OUTPUTACTIVATIONMIN, outputActivationMin, 0);
}
void add_padType(PoolPadType padType) {
fbb_.AddElement<int8_t>(QuantizedMaxPool::VT_PADTYPE, static_cast<int8_t>(padType), 0);
}
void add_padX(int32_t padX) {
fbb_.AddElement<int32_t>(QuantizedMaxPool::VT_PADX, padX, 0);
}
void add_padY(int32_t padY) {
fbb_.AddElement<int32_t>(QuantizedMaxPool::VT_PADY, padY, 0);
}
void add_strideX(int32_t strideX) {
fbb_.AddElement<int32_t>(QuantizedMaxPool::VT_STRIDEX, strideX, 0);
}
void add_strideY(int32_t strideY) {
fbb_.AddElement<int32_t>(QuantizedMaxPool::VT_STRIDEY, strideY, 0);
}
void add_type(DataType type) {
fbb_.AddElement<int32_t>(QuantizedMaxPool::VT_TYPE, static_cast<int32_t>(type), 0);
}
explicit QuantizedMaxPoolBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizedMaxPoolBuilder &operator=(const QuantizedMaxPoolBuilder &);
flatbuffers::Offset<QuantizedMaxPool> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizedMaxPool>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizedMaxPool> CreateQuantizedMaxPool(
flatbuffers::FlatBufferBuilder &_fbb,
int32_t kernelX = 0,
int32_t kernelY = 0,
ModeFormat modelFormat = ModeFormat_TENSORFLOW,
int32_t outputActivationMax = 0,
int32_t outputActivationMin = 0,
PoolPadType padType = PoolPadType_CAFFE,
int32_t padX = 0,
int32_t padY = 0,
int32_t strideX = 0,
int32_t strideY = 0,
DataType type = DataType_DT_INVALID) {
QuantizedMaxPoolBuilder builder_(_fbb);
builder_.add_type(type);
builder_.add_strideY(strideY);
builder_.add_strideX(strideX);
builder_.add_padY(padY);
builder_.add_padX(padX);
builder_.add_outputActivationMin(outputActivationMin);
builder_.add_outputActivationMax(outputActivationMax);
builder_.add_kernelY(kernelY);
builder_.add_kernelX(kernelX);
builder_.add_padType(padType);
builder_.add_modelFormat(modelFormat);
return builder_.Finish();
}
flatbuffers::Offset<QuantizedMaxPool> CreateQuantizedMaxPool(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMaxPoolT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct QuantizedReluT : public flatbuffers::NativeTable {
typedef QuantizedRelu TableType;
DataType type;
QuantizedReluT()
: type(DataType_DT_INVALID) {
}
};
struct QuantizedRelu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizedReluT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizedReluTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_TYPE = 4
};
DataType type() const {
return static_cast<DataType>(GetField<int32_t>(VT_TYPE, 0));
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_TYPE) &&
verifier.EndTable();
}
QuantizedReluT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizedReluT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizedRelu> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReluT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizedReluBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_type(DataType type) {
fbb_.AddElement<int32_t>(QuantizedRelu::VT_TYPE, static_cast<int32_t>(type), 0);
}
explicit QuantizedReluBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizedReluBuilder &operator=(const QuantizedReluBuilder &);
flatbuffers::Offset<QuantizedRelu> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizedRelu>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizedRelu> CreateQuantizedRelu(
flatbuffers::FlatBufferBuilder &_fbb,
DataType type = DataType_DT_INVALID) {
QuantizedReluBuilder builder_(_fbb);
builder_.add_type(type);
return builder_.Finish();
}
flatbuffers::Offset<QuantizedRelu> CreateQuantizedRelu(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReluT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct QuantizedRelu6T : public flatbuffers::NativeTable {
typedef QuantizedRelu6 TableType;
DataType type;
QuantizedRelu6T()
: type(DataType_DT_INVALID) {
}
};
struct QuantizedRelu6 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizedRelu6T NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizedRelu6TypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_TYPE = 4
};
DataType type() const {
return static_cast<DataType>(GetField<int32_t>(VT_TYPE, 0));
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_TYPE) &&
verifier.EndTable();
}
QuantizedRelu6T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizedRelu6T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizedRelu6> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedRelu6T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizedRelu6Builder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_type(DataType type) {
fbb_.AddElement<int32_t>(QuantizedRelu6::VT_TYPE, static_cast<int32_t>(type), 0);
}
explicit QuantizedRelu6Builder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizedRelu6Builder &operator=(const QuantizedRelu6Builder &);
flatbuffers::Offset<QuantizedRelu6> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizedRelu6>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizedRelu6> CreateQuantizedRelu6(
flatbuffers::FlatBufferBuilder &_fbb,
DataType type = DataType_DT_INVALID) {
QuantizedRelu6Builder builder_(_fbb);
builder_.add_type(type);
return builder_.Finish();
}
flatbuffers::Offset<QuantizedRelu6> CreateQuantizedRelu6(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedRelu6T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct QuantizedReshapeT : public flatbuffers::NativeTable {
typedef QuantizedReshape TableType;
std::vector<int32_t> dims;
ModeFormat modelFormat;
QuantizedReshapeT()
: modelFormat(ModeFormat_TENSORFLOW) {
}
};
struct QuantizedReshape FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizedReshapeT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizedReshapeTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_DIMS = 4,
VT_MODELFORMAT = 6
};
const flatbuffers::Vector<int32_t> *dims() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_DIMS);
}
ModeFormat modelFormat() const {
return static_cast<ModeFormat>(GetField<int8_t>(VT_MODELFORMAT, 0));
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_DIMS) &&
verifier.VerifyVector(dims()) &&
VerifyField<int8_t>(verifier, VT_MODELFORMAT) &&
verifier.EndTable();
}
QuantizedReshapeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizedReshapeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizedReshape> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReshapeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizedReshapeBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_dims(flatbuffers::Offset<flatbuffers::Vector<int32_t>> dims) {
fbb_.AddOffset(QuantizedReshape::VT_DIMS, dims);
}
void add_modelFormat(ModeFormat modelFormat) {
fbb_.AddElement<int8_t>(QuantizedReshape::VT_MODELFORMAT, static_cast<int8_t>(modelFormat), 0);
}
explicit QuantizedReshapeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizedReshapeBuilder &operator=(const QuantizedReshapeBuilder &);
flatbuffers::Offset<QuantizedReshape> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizedReshape>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizedReshape> CreateQuantizedReshape(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> dims = 0,
ModeFormat modelFormat = ModeFormat_TENSORFLOW) {
QuantizedReshapeBuilder builder_(_fbb);
builder_.add_dims(dims);
builder_.add_modelFormat(modelFormat);
return builder_.Finish();
}
inline flatbuffers::Offset<QuantizedReshape> CreateQuantizedReshapeDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<int32_t> *dims = nullptr,
ModeFormat modelFormat = ModeFormat_TENSORFLOW) {
auto dims__ = dims ? _fbb.CreateVector<int32_t>(*dims) : 0;
return MNN::CreateQuantizedReshape(
_fbb,
dims__,
modelFormat);
}
flatbuffers::Offset<QuantizedReshape> CreateQuantizedReshape(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReshapeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct QuantizedSoftmaxT : public flatbuffers::NativeTable {
typedef QuantizedSoftmax TableType;
float beta;
float inputScale;
QuantizedSoftmaxT()
: beta(0.0f),
inputScale(0.0f) {
}
};
struct QuantizedSoftmax FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizedSoftmaxT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizedSoftmaxTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_BETA = 4,
VT_INPUTSCALE = 6
};
float beta() const {
return GetField<float>(VT_BETA, 0.0f);
}
float inputScale() const {
return GetField<float>(VT_INPUTSCALE, 0.0f);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<float>(verifier, VT_BETA) &&
VerifyField<float>(verifier, VT_INPUTSCALE) &&
verifier.EndTable();
}
QuantizedSoftmaxT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizedSoftmaxT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizedSoftmax> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedSoftmaxT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizedSoftmaxBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_beta(float beta) {
fbb_.AddElement<float>(QuantizedSoftmax::VT_BETA, beta, 0.0f);
}
void add_inputScale(float inputScale) {
fbb_.AddElement<float>(QuantizedSoftmax::VT_INPUTSCALE, inputScale, 0.0f);
}
explicit QuantizedSoftmaxBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizedSoftmaxBuilder &operator=(const QuantizedSoftmaxBuilder &);
flatbuffers::Offset<QuantizedSoftmax> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizedSoftmax>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizedSoftmax> CreateQuantizedSoftmax(
flatbuffers::FlatBufferBuilder &_fbb,
float beta = 0.0f,
float inputScale = 0.0f) {
QuantizedSoftmaxBuilder builder_(_fbb);
builder_.add_inputScale(inputScale);
builder_.add_beta(beta);
return builder_.Finish();
}
flatbuffers::Offset<QuantizedSoftmax> CreateQuantizedSoftmax(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedSoftmaxT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct QuantizeV2T : public flatbuffers::NativeTable {
typedef QuantizeV2 TableType;
DataType type;
QuantizeMode mode;
QuantizeRoundMode roundMode;
QuantizeV2T()
: type(DataType_DT_INVALID),
mode(QuantizeMode_MIN_COMBINED),
roundMode(QuantizeRoundMode_HALF_AWAY_FROM_ZERO) {
}
};
struct QuantizeV2 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef QuantizeV2T NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return QuantizeV2TypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_TYPE = 4,
VT_MODE = 6,
VT_ROUNDMODE = 8
};
DataType type() const {
return static_cast<DataType>(GetField<int32_t>(VT_TYPE, 0));
}
QuantizeMode mode() const {
return static_cast<QuantizeMode>(GetField<int8_t>(VT_MODE, 0));
}
QuantizeRoundMode roundMode() const {
return static_cast<QuantizeRoundMode>(GetField<int8_t>(VT_ROUNDMODE, 0));
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_TYPE) &&
VerifyField<int8_t>(verifier, VT_MODE) &&
VerifyField<int8_t>(verifier, VT_ROUNDMODE) &&
verifier.EndTable();
}
QuantizeV2T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(QuantizeV2T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<QuantizeV2> Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeV2T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct QuantizeV2Builder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_type(DataType type) {
fbb_.AddElement<int32_t>(QuantizeV2::VT_TYPE, static_cast<int32_t>(type), 0);
}
void add_mode(QuantizeMode mode) {
fbb_.AddElement<int8_t>(QuantizeV2::VT_MODE, static_cast<int8_t>(mode), 0);
}
void add_roundMode(QuantizeRoundMode roundMode) {
fbb_.AddElement<int8_t>(QuantizeV2::VT_ROUNDMODE, static_cast<int8_t>(roundMode), 0);
}
explicit QuantizeV2Builder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
QuantizeV2Builder &operator=(const QuantizeV2Builder &);
flatbuffers::Offset<QuantizeV2> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<QuantizeV2>(end);
return o;
}
};
inline flatbuffers::Offset<QuantizeV2> CreateQuantizeV2(
flatbuffers::FlatBufferBuilder &_fbb,
DataType type = DataType_DT_INVALID,
QuantizeMode mode = QuantizeMode_MIN_COMBINED,
QuantizeRoundMode roundMode = QuantizeRoundMode_HALF_AWAY_FROM_ZERO) {
QuantizeV2Builder builder_(_fbb);
builder_.add_type(type);
builder_.add_roundMode(roundMode);
builder_.add_mode(mode);
return builder_.Finish();
}
flatbuffers::Offset<QuantizeV2> CreateQuantizeV2(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeV2T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct RequantizationRangeT : public flatbuffers::NativeTable {
typedef RequantizationRange TableType;
RequantizationRangeT() {
}
};
struct RequantizationRange FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef RequantizationRangeT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return RequantizationRangeTypeTable();
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
verifier.EndTable();
}
RequantizationRangeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(RequantizationRangeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<RequantizationRange> Pack(flatbuffers::FlatBufferBuilder &_fbb, const RequantizationRangeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct RequantizationRangeBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
explicit RequantizationRangeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
RequantizationRangeBuilder &operator=(const RequantizationRangeBuilder &);
flatbuffers::Offset<RequantizationRange> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<RequantizationRange>(end);
return o;
}
};
inline flatbuffers::Offset<RequantizationRange> CreateRequantizationRange(
flatbuffers::FlatBufferBuilder &_fbb) {
RequantizationRangeBuilder builder_(_fbb);
return builder_.Finish();
}
flatbuffers::Offset<RequantizationRange> CreateRequantizationRange(flatbuffers::FlatBufferBuilder &_fbb, const RequantizationRangeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct RequantizeT : public flatbuffers::NativeTable {
typedef Requantize TableType;
RequantizeT() {
}
};
struct Requantize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef RequantizeT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return RequantizeTypeTable();
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
verifier.EndTable();
}
RequantizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(RequantizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<Requantize> Pack(flatbuffers::FlatBufferBuilder &_fbb, const RequantizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct RequantizeBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
explicit RequantizeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
RequantizeBuilder &operator=(const RequantizeBuilder &);
flatbuffers::Offset<Requantize> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Requantize>(end);
return o;
}
};
inline flatbuffers::Offset<Requantize> CreateRequantize(
flatbuffers::FlatBufferBuilder &_fbb) {
RequantizeBuilder builder_(_fbb);
return builder_.Finish();
}
flatbuffers::Offset<Requantize> CreateRequantize(flatbuffers::FlatBufferBuilder &_fbb, const RequantizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct TfQuantizedConv2DT : public flatbuffers::NativeTable {
typedef TfQuantizedConv2D TableType;
std::vector<int32_t> bias;
bool biasflag;
std::unique_ptr<Convolution2DCommonT> common;
std::vector<uint8_t> weight;
FusedActivation activationType;
int32_t multiplier;
int32_t outMax;
int32_t outMin;
int32_t shift;
std::unique_ptr<QuantizedParamT> biasQuantizedParam;
int32_t depthMultiplier;
std::unique_ptr<QuantizedParamT> filterQuantizedParam;
std::unique_ptr<QuantizedParamT> inputQuantizedParam;
ModeFormat modelFormat;
std::unique_ptr<QuantizedParamT> outputQuantizedParam;
TfQuantizedConv2DT()
: biasflag(false),
activationType(FusedActivation_kTfLiteActNone),
multiplier(0),
outMax(0),
outMin(0),
shift(0),
depthMultiplier(0),
modelFormat(ModeFormat_TENSORFLOW) {
}
};
struct TfQuantizedConv2D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef TfQuantizedConv2DT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return TfQuantizedConv2DTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_BIAS = 4,
VT_BIASFLAG = 6,
VT_COMMON = 8,
VT_WEIGHT = 10,
VT_ACTIVATIONTYPE = 12,
VT_MULTIPLIER = 14,
VT_OUTMAX = 16,
VT_OUTMIN = 18,
VT_SHIFT = 20,
VT_BIASQUANTIZEDPARAM = 22,
VT_DEPTHMULTIPLIER = 24,
VT_FILTERQUANTIZEDPARAM = 26,
VT_INPUTQUANTIZEDPARAM = 28,
VT_MODELFORMAT = 30,
VT_OUTPUTQUANTIZEDPARAM = 32
};
const flatbuffers::Vector<int32_t> *bias() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_BIAS);
}
bool biasflag() const {
return GetField<uint8_t>(VT_BIASFLAG, 0) != 0;
}
const Convolution2DCommon *common() const {
return GetPointer<const Convolution2DCommon *>(VT_COMMON);
}
const flatbuffers::Vector<uint8_t> *weight() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_WEIGHT);
}
FusedActivation activationType() const {
return static_cast<FusedActivation>(GetField<int8_t>(VT_ACTIVATIONTYPE, 0));
}
int32_t multiplier() const {
return GetField<int32_t>(VT_MULTIPLIER, 0);
}
int32_t outMax() const {
return GetField<int32_t>(VT_OUTMAX, 0);
}
int32_t outMin() const {
return GetField<int32_t>(VT_OUTMIN, 0);
}
int32_t shift() const {
return GetField<int32_t>(VT_SHIFT, 0);
}
const QuantizedParam *biasQuantizedParam() const {
return GetPointer<const QuantizedParam *>(VT_BIASQUANTIZEDPARAM);
}
int32_t depthMultiplier() const {
return GetField<int32_t>(VT_DEPTHMULTIPLIER, 0);
}
const QuantizedParam *filterQuantizedParam() const {
return GetPointer<const QuantizedParam *>(VT_FILTERQUANTIZEDPARAM);
}
const QuantizedParam *inputQuantizedParam() const {
return GetPointer<const QuantizedParam *>(VT_INPUTQUANTIZEDPARAM);
}
ModeFormat modelFormat() const {
return static_cast<ModeFormat>(GetField<int8_t>(VT_MODELFORMAT, 0));
}
const QuantizedParam *outputQuantizedParam() const {
return GetPointer<const QuantizedParam *>(VT_OUTPUTQUANTIZEDPARAM);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_BIAS) &&
verifier.VerifyVector(bias()) &&
VerifyField<uint8_t>(verifier, VT_BIASFLAG) &&
VerifyOffset(verifier, VT_COMMON) &&
verifier.VerifyTable(common()) &&
VerifyOffset(verifier, VT_WEIGHT) &&
verifier.VerifyVector(weight()) &&
VerifyField<int8_t>(verifier, VT_ACTIVATIONTYPE) &&
VerifyField<int32_t>(verifier, VT_MULTIPLIER) &&
VerifyField<int32_t>(verifier, VT_OUTMAX) &&
VerifyField<int32_t>(verifier, VT_OUTMIN) &&
VerifyField<int32_t>(verifier, VT_SHIFT) &&
VerifyOffset(verifier, VT_BIASQUANTIZEDPARAM) &&
verifier.VerifyTable(biasQuantizedParam()) &&
VerifyField<int32_t>(verifier, VT_DEPTHMULTIPLIER) &&
VerifyOffset(verifier, VT_FILTERQUANTIZEDPARAM) &&
verifier.VerifyTable(filterQuantizedParam()) &&
VerifyOffset(verifier, VT_INPUTQUANTIZEDPARAM) &&
verifier.VerifyTable(inputQuantizedParam()) &&
VerifyField<int8_t>(verifier, VT_MODELFORMAT) &&
VerifyOffset(verifier, VT_OUTPUTQUANTIZEDPARAM) &&
verifier.VerifyTable(outputQuantizedParam()) &&
verifier.EndTable();
}
TfQuantizedConv2DT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(TfQuantizedConv2DT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<TfQuantizedConv2D> Pack(flatbuffers::FlatBufferBuilder &_fbb, const TfQuantizedConv2DT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct TfQuantizedConv2DBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_bias(flatbuffers::Offset<flatbuffers::Vector<int32_t>> bias) {
fbb_.AddOffset(TfQuantizedConv2D::VT_BIAS, bias);
}
void add_biasflag(bool biasflag) {
fbb_.AddElement<uint8_t>(TfQuantizedConv2D::VT_BIASFLAG, static_cast<uint8_t>(biasflag), 0);
}
void add_common(flatbuffers::Offset<Convolution2DCommon> common) {
fbb_.AddOffset(TfQuantizedConv2D::VT_COMMON, common);
}
void add_weight(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> weight) {
fbb_.AddOffset(TfQuantizedConv2D::VT_WEIGHT, weight);
}
void add_activationType(FusedActivation activationType) {
fbb_.AddElement<int8_t>(TfQuantizedConv2D::VT_ACTIVATIONTYPE, static_cast<int8_t>(activationType), 0);
}
void add_multiplier(int32_t multiplier) {
fbb_.AddElement<int32_t>(TfQuantizedConv2D::VT_MULTIPLIER, multiplier, 0);
}
void add_outMax(int32_t outMax) {
fbb_.AddElement<int32_t>(TfQuantizedConv2D::VT_OUTMAX, outMax, 0);
}
void add_outMin(int32_t outMin) {
fbb_.AddElement<int32_t>(TfQuantizedConv2D::VT_OUTMIN, outMin, 0);
}
void add_shift(int32_t shift) {
fbb_.AddElement<int32_t>(TfQuantizedConv2D::VT_SHIFT, shift, 0);
}
void add_biasQuantizedParam(flatbuffers::Offset<QuantizedParam> biasQuantizedParam) {
fbb_.AddOffset(TfQuantizedConv2D::VT_BIASQUANTIZEDPARAM, biasQuantizedParam);
}
void add_depthMultiplier(int32_t depthMultiplier) {
fbb_.AddElement<int32_t>(TfQuantizedConv2D::VT_DEPTHMULTIPLIER, depthMultiplier, 0);
}
void add_filterQuantizedParam(flatbuffers::Offset<QuantizedParam> filterQuantizedParam) {
fbb_.AddOffset(TfQuantizedConv2D::VT_FILTERQUANTIZEDPARAM, filterQuantizedParam);
}
void add_inputQuantizedParam(flatbuffers::Offset<QuantizedParam> inputQuantizedParam) {
fbb_.AddOffset(TfQuantizedConv2D::VT_INPUTQUANTIZEDPARAM, inputQuantizedParam);
}
void add_modelFormat(ModeFormat modelFormat) {
fbb_.AddElement<int8_t>(TfQuantizedConv2D::VT_MODELFORMAT, static_cast<int8_t>(modelFormat), 0);
}
void add_outputQuantizedParam(flatbuffers::Offset<QuantizedParam> outputQuantizedParam) {
fbb_.AddOffset(TfQuantizedConv2D::VT_OUTPUTQUANTIZEDPARAM, outputQuantizedParam);
}
explicit TfQuantizedConv2DBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
TfQuantizedConv2DBuilder &operator=(const TfQuantizedConv2DBuilder &);
flatbuffers::Offset<TfQuantizedConv2D> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<TfQuantizedConv2D>(end);
return o;
}
};
inline flatbuffers::Offset<TfQuantizedConv2D> CreateTfQuantizedConv2D(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> bias = 0,
bool biasflag = false,
flatbuffers::Offset<Convolution2DCommon> common = 0,
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> weight = 0,
FusedActivation activationType = FusedActivation_kTfLiteActNone,
int32_t multiplier = 0,
int32_t outMax = 0,
int32_t outMin = 0,
int32_t shift = 0,
flatbuffers::Offset<QuantizedParam> biasQuantizedParam = 0,
int32_t depthMultiplier = 0,
flatbuffers::Offset<QuantizedParam> filterQuantizedParam = 0,
flatbuffers::Offset<QuantizedParam> inputQuantizedParam = 0,
ModeFormat modelFormat = ModeFormat_TENSORFLOW,
flatbuffers::Offset<QuantizedParam> outputQuantizedParam = 0) {
TfQuantizedConv2DBuilder builder_(_fbb);
builder_.add_outputQuantizedParam(outputQuantizedParam);
builder_.add_inputQuantizedParam(inputQuantizedParam);
builder_.add_filterQuantizedParam(filterQuantizedParam);
builder_.add_depthMultiplier(depthMultiplier);
builder_.add_biasQuantizedParam(biasQuantizedParam);
builder_.add_shift(shift);
builder_.add_outMin(outMin);
builder_.add_outMax(outMax);
builder_.add_multiplier(multiplier);
builder_.add_weight(weight);
builder_.add_common(common);
builder_.add_bias(bias);
builder_.add_modelFormat(modelFormat);
builder_.add_activationType(activationType);
builder_.add_biasflag(biasflag);
return builder_.Finish();
}
inline flatbuffers::Offset<TfQuantizedConv2D> CreateTfQuantizedConv2DDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<int32_t> *bias = nullptr,
bool biasflag = false,
flatbuffers::Offset<Convolution2DCommon> common = 0,
const std::vector<uint8_t> *weight = nullptr,
FusedActivation activationType = FusedActivation_kTfLiteActNone,
int32_t multiplier = 0,
int32_t outMax = 0,
int32_t outMin = 0,
int32_t shift = 0,
flatbuffers::Offset<QuantizedParam> biasQuantizedParam = 0,
int32_t depthMultiplier = 0,
flatbuffers::Offset<QuantizedParam> filterQuantizedParam = 0,
flatbuffers::Offset<QuantizedParam> inputQuantizedParam = 0,
ModeFormat modelFormat = ModeFormat_TENSORFLOW,
flatbuffers::Offset<QuantizedParam> outputQuantizedParam = 0) {
auto bias__ = bias ? _fbb.CreateVector<int32_t>(*bias) : 0;
auto weight__ = weight ? _fbb.CreateVector<uint8_t>(*weight) : 0;
return MNN::CreateTfQuantizedConv2D(
_fbb,
bias__,
biasflag,
common,
weight__,
activationType,
multiplier,
outMax,
outMin,
shift,
biasQuantizedParam,
depthMultiplier,
filterQuantizedParam,
inputQuantizedParam,
modelFormat,
outputQuantizedParam);
}
flatbuffers::Offset<TfQuantizedConv2D> CreateTfQuantizedConv2D(flatbuffers::FlatBufferBuilder &_fbb, const TfQuantizedConv2DT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
inline QuantizedParamT *QuantizedParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizedParamT();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizedParam::UnPackTo(QuantizedParamT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = zeroPoint(); _o->zeroPoint = _e; };
{ auto _e = scale(); _o->scale = _e; };
}
inline flatbuffers::Offset<QuantizedParam> QuantizedParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizedParam(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizedParam> CreateQuantizedParam(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _zeroPoint = _o->zeroPoint;
auto _scale = _o->scale;
return MNN::CreateQuantizedParam(
_fbb,
_zeroPoint,
_scale);
}
inline QuantizedAddT *QuantizedAdd::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizedAddT();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizedAdd::UnPackTo(QuantizedAddT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = activationType(); _o->activationType = _e; };
{ auto _e = input1QuantizedParam(); if (_e) _o->input1QuantizedParam = std::unique_ptr<QuantizedParamT>(_e->UnPack(_resolver)); };
{ auto _e = input2QuantizedParam(); if (_e) _o->input2QuantizedParam = std::unique_ptr<QuantizedParamT>(_e->UnPack(_resolver)); };
{ auto _e = outputQuantizedParam(); if (_e) _o->outputQuantizedParam = std::unique_ptr<QuantizedParamT>(_e->UnPack(_resolver)); };
}
inline flatbuffers::Offset<QuantizedAdd> QuantizedAdd::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAddT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizedAdd(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizedAdd> CreateQuantizedAdd(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAddT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedAddT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _activationType = _o->activationType;
auto _input1QuantizedParam = _o->input1QuantizedParam ? CreateQuantizedParam(_fbb, _o->input1QuantizedParam.get(), _rehasher) : 0;
auto _input2QuantizedParam = _o->input2QuantizedParam ? CreateQuantizedParam(_fbb, _o->input2QuantizedParam.get(), _rehasher) : 0;
auto _outputQuantizedParam = _o->outputQuantizedParam ? CreateQuantizedParam(_fbb, _o->outputQuantizedParam.get(), _rehasher) : 0;
return MNN::CreateQuantizedAdd(
_fbb,
_activationType,
_input1QuantizedParam,
_input2QuantizedParam,
_outputQuantizedParam);
}
inline DequantizeT *Dequantize::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new DequantizeT();
UnPackTo(_o, _resolver);
return _o;
}
inline void Dequantize::UnPackTo(DequantizeT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = inputQuantizedParam(); if (_e) _o->inputQuantizedParam = std::unique_ptr<QuantizedParamT>(_e->UnPack(_resolver)); };
{ auto _e = mode(); _o->mode = _e; };
{ auto _e = modelFormat(); _o->modelFormat = _e; };
{ auto _e = type(); _o->type = _e; };
}
inline flatbuffers::Offset<Dequantize> Dequantize::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateDequantize(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<Dequantize> CreateDequantize(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DequantizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _inputQuantizedParam = _o->inputQuantizedParam ? CreateQuantizedParam(_fbb, _o->inputQuantizedParam.get(), _rehasher) : 0;
auto _mode = _o->mode;
auto _modelFormat = _o->modelFormat;
auto _type = _o->type;
return MNN::CreateDequantize(
_fbb,
_inputQuantizedParam,
_mode,
_modelFormat,
_type);
}
inline QuantizedAvgPoolT *QuantizedAvgPool::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizedAvgPoolT();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizedAvgPool::UnPackTo(QuantizedAvgPoolT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = kernelX(); _o->kernelX = _e; };
{ auto _e = kernelY(); _o->kernelY = _e; };
{ auto _e = modelFormat(); _o->modelFormat = _e; };
{ auto _e = outputActivationMax(); _o->outputActivationMax = _e; };
{ auto _e = outputActivationMin(); _o->outputActivationMin = _e; };
{ auto _e = padType(); _o->padType = _e; };
{ auto _e = padX(); _o->padX = _e; };
{ auto _e = padY(); _o->padY = _e; };
{ auto _e = strideX(); _o->strideX = _e; };
{ auto _e = strideY(); _o->strideY = _e; };
{ auto _e = type(); _o->type = _e; };
}
inline flatbuffers::Offset<QuantizedAvgPool> QuantizedAvgPool::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAvgPoolT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizedAvgPool(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizedAvgPool> CreateQuantizedAvgPool(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAvgPoolT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedAvgPoolT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _kernelX = _o->kernelX;
auto _kernelY = _o->kernelY;
auto _modelFormat = _o->modelFormat;
auto _outputActivationMax = _o->outputActivationMax;
auto _outputActivationMin = _o->outputActivationMin;
auto _padType = _o->padType;
auto _padX = _o->padX;
auto _padY = _o->padY;
auto _strideX = _o->strideX;
auto _strideY = _o->strideY;
auto _type = _o->type;
return MNN::CreateQuantizedAvgPool(
_fbb,
_kernelX,
_kernelY,
_modelFormat,
_outputActivationMax,
_outputActivationMin,
_padType,
_padX,
_padY,
_strideX,
_strideY,
_type);
}
inline QuantizedBiasAddT *QuantizedBiasAdd::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizedBiasAddT();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizedBiasAdd::UnPackTo(QuantizedBiasAddT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } };
{ auto _e = inputType(); _o->inputType = _e; };
{ auto _e = max(); _o->max = _e; };
{ auto _e = min(); _o->min = _e; };
{ auto _e = outputType(); _o->outputType = _e; };
}
inline flatbuffers::Offset<QuantizedBiasAdd> QuantizedBiasAdd::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedBiasAddT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizedBiasAdd(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizedBiasAdd> CreateQuantizedBiasAdd(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedBiasAddT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedBiasAddT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0;
auto _inputType = _o->inputType;
auto _max = _o->max;
auto _min = _o->min;
auto _outputType = _o->outputType;
return MNN::CreateQuantizedBiasAdd(
_fbb,
_bias,
_inputType,
_max,
_min,
_outputType);
}
inline QuantizedConcatT *QuantizedConcat::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizedConcatT();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizedConcat::UnPackTo(QuantizedConcatT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = activationType(); _o->activationType = _e; };
{ auto _e = axis(); _o->axis = _e; };
{ auto _e = inputScale(); if (_e) { _o->inputScale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputScale[_i] = _e->Get(_i); } } };
{ auto _e = inputZeroPoint(); if (_e) { _o->inputZeroPoint.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputZeroPoint[_i] = _e->Get(_i); } } };
{ auto _e = outputQuantizedParam(); if (_e) _o->outputQuantizedParam = std::unique_ptr<QuantizedParamT>(_e->UnPack(_resolver)); };
}
inline flatbuffers::Offset<QuantizedConcat> QuantizedConcat::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedConcatT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizedConcat(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizedConcat> CreateQuantizedConcat(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedConcatT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedConcatT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _activationType = _o->activationType;
auto _axis = _o->axis;
auto _inputScale = _o->inputScale.size() ? _fbb.CreateVector(_o->inputScale) : 0;
auto _inputZeroPoint = _o->inputZeroPoint.size() ? _fbb.CreateVector(_o->inputZeroPoint) : 0;
auto _outputQuantizedParam = _o->outputQuantizedParam ? CreateQuantizedParam(_fbb, _o->outputQuantizedParam.get(), _rehasher) : 0;
return MNN::CreateQuantizedConcat(
_fbb,
_activationType,
_axis,
_inputScale,
_inputZeroPoint,
_outputQuantizedParam);
}
inline QuantizedLogisticT *QuantizedLogistic::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizedLogisticT();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizedLogistic::UnPackTo(QuantizedLogisticT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = inputQuantizedParam(); if (_e) _o->inputQuantizedParam = std::unique_ptr<QuantizedParamT>(_e->UnPack(_resolver)); };
{ auto _e = outputQuantizedParam(); if (_e) _o->outputQuantizedParam = std::unique_ptr<QuantizedParamT>(_e->UnPack(_resolver)); };
}
inline flatbuffers::Offset<QuantizedLogistic> QuantizedLogistic::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedLogisticT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizedLogistic(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizedLogistic> CreateQuantizedLogistic(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedLogisticT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedLogisticT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _inputQuantizedParam = _o->inputQuantizedParam ? CreateQuantizedParam(_fbb, _o->inputQuantizedParam.get(), _rehasher) : 0;
auto _outputQuantizedParam = _o->outputQuantizedParam ? CreateQuantizedParam(_fbb, _o->outputQuantizedParam.get(), _rehasher) : 0;
return MNN::CreateQuantizedLogistic(
_fbb,
_inputQuantizedParam,
_outputQuantizedParam);
}
inline QuantizedMatMulT *QuantizedMatMul::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizedMatMulT();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizedMatMul::UnPackTo(QuantizedMatMulT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = transposeA(); _o->transposeA = _e; };
{ auto _e = transposeB(); _o->transposeB = _e; };
}
inline flatbuffers::Offset<QuantizedMatMul> QuantizedMatMul::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMatMulT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizedMatMul(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizedMatMul> CreateQuantizedMatMul(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMatMulT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedMatMulT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _transposeA = _o->transposeA;
auto _transposeB = _o->transposeB;
return MNN::CreateQuantizedMatMul(
_fbb,
_transposeA,
_transposeB);
}
inline QuantizedMaxPoolT *QuantizedMaxPool::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizedMaxPoolT();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizedMaxPool::UnPackTo(QuantizedMaxPoolT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = kernelX(); _o->kernelX = _e; };
{ auto _e = kernelY(); _o->kernelY = _e; };
{ auto _e = modelFormat(); _o->modelFormat = _e; };
{ auto _e = outputActivationMax(); _o->outputActivationMax = _e; };
{ auto _e = outputActivationMin(); _o->outputActivationMin = _e; };
{ auto _e = padType(); _o->padType = _e; };
{ auto _e = padX(); _o->padX = _e; };
{ auto _e = padY(); _o->padY = _e; };
{ auto _e = strideX(); _o->strideX = _e; };
{ auto _e = strideY(); _o->strideY = _e; };
{ auto _e = type(); _o->type = _e; };
}
inline flatbuffers::Offset<QuantizedMaxPool> QuantizedMaxPool::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMaxPoolT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizedMaxPool(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizedMaxPool> CreateQuantizedMaxPool(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMaxPoolT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedMaxPoolT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _kernelX = _o->kernelX;
auto _kernelY = _o->kernelY;
auto _modelFormat = _o->modelFormat;
auto _outputActivationMax = _o->outputActivationMax;
auto _outputActivationMin = _o->outputActivationMin;
auto _padType = _o->padType;
auto _padX = _o->padX;
auto _padY = _o->padY;
auto _strideX = _o->strideX;
auto _strideY = _o->strideY;
auto _type = _o->type;
return MNN::CreateQuantizedMaxPool(
_fbb,
_kernelX,
_kernelY,
_modelFormat,
_outputActivationMax,
_outputActivationMin,
_padType,
_padX,
_padY,
_strideX,
_strideY,
_type);
}
inline QuantizedReluT *QuantizedRelu::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizedReluT();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizedRelu::UnPackTo(QuantizedReluT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = type(); _o->type = _e; };
}
inline flatbuffers::Offset<QuantizedRelu> QuantizedRelu::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReluT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizedRelu(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizedRelu> CreateQuantizedRelu(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReluT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedReluT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _type = _o->type;
return MNN::CreateQuantizedRelu(
_fbb,
_type);
}
inline QuantizedRelu6T *QuantizedRelu6::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizedRelu6T();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizedRelu6::UnPackTo(QuantizedRelu6T *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = type(); _o->type = _e; };
}
inline flatbuffers::Offset<QuantizedRelu6> QuantizedRelu6::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedRelu6T* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizedRelu6(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizedRelu6> CreateQuantizedRelu6(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedRelu6T *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedRelu6T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _type = _o->type;
return MNN::CreateQuantizedRelu6(
_fbb,
_type);
}
inline QuantizedReshapeT *QuantizedReshape::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizedReshapeT();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizedReshape::UnPackTo(QuantizedReshapeT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = dims(); if (_e) { _o->dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dims[_i] = _e->Get(_i); } } };
{ auto _e = modelFormat(); _o->modelFormat = _e; };
}
inline flatbuffers::Offset<QuantizedReshape> QuantizedReshape::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReshapeT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizedReshape(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizedReshape> CreateQuantizedReshape(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReshapeT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedReshapeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _dims = _o->dims.size() ? _fbb.CreateVector(_o->dims) : 0;
auto _modelFormat = _o->modelFormat;
return MNN::CreateQuantizedReshape(
_fbb,
_dims,
_modelFormat);
}
inline QuantizedSoftmaxT *QuantizedSoftmax::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizedSoftmaxT();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizedSoftmax::UnPackTo(QuantizedSoftmaxT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = beta(); _o->beta = _e; };
{ auto _e = inputScale(); _o->inputScale = _e; };
}
inline flatbuffers::Offset<QuantizedSoftmax> QuantizedSoftmax::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedSoftmaxT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizedSoftmax(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizedSoftmax> CreateQuantizedSoftmax(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedSoftmaxT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedSoftmaxT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _beta = _o->beta;
auto _inputScale = _o->inputScale;
return MNN::CreateQuantizedSoftmax(
_fbb,
_beta,
_inputScale);
}
inline QuantizeV2T *QuantizeV2::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new QuantizeV2T();
UnPackTo(_o, _resolver);
return _o;
}
inline void QuantizeV2::UnPackTo(QuantizeV2T *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = type(); _o->type = _e; };
{ auto _e = mode(); _o->mode = _e; };
{ auto _e = roundMode(); _o->roundMode = _e; };
}
inline flatbuffers::Offset<QuantizeV2> QuantizeV2::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeV2T* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateQuantizeV2(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<QuantizeV2> CreateQuantizeV2(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeV2T *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizeV2T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _type = _o->type;
auto _mode = _o->mode;
auto _roundMode = _o->roundMode;
return MNN::CreateQuantizeV2(
_fbb,
_type,
_mode,
_roundMode);
}
inline RequantizationRangeT *RequantizationRange::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new RequantizationRangeT();
UnPackTo(_o, _resolver);
return _o;
}
inline void RequantizationRange::UnPackTo(RequantizationRangeT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
}
inline flatbuffers::Offset<RequantizationRange> RequantizationRange::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RequantizationRangeT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateRequantizationRange(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<RequantizationRange> CreateRequantizationRange(flatbuffers::FlatBufferBuilder &_fbb, const RequantizationRangeT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RequantizationRangeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
return MNN::CreateRequantizationRange(
_fbb);
}
inline RequantizeT *Requantize::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new RequantizeT();
UnPackTo(_o, _resolver);
return _o;
}
inline void Requantize::UnPackTo(RequantizeT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
}
inline flatbuffers::Offset<Requantize> Requantize::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RequantizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateRequantize(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<Requantize> CreateRequantize(flatbuffers::FlatBufferBuilder &_fbb, const RequantizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RequantizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
return MNN::CreateRequantize(
_fbb);
}
inline TfQuantizedConv2DT *TfQuantizedConv2D::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new TfQuantizedConv2DT();
UnPackTo(_o, _resolver);
return _o;
}
inline void TfQuantizedConv2D::UnPackTo(TfQuantizedConv2DT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } };
{ auto _e = biasflag(); _o->biasflag = _e; };
{ auto _e = common(); if (_e) _o->common = std::unique_ptr<Convolution2DCommonT>(_e->UnPack(_resolver)); };
{ auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } };
{ auto _e = activationType(); _o->activationType = _e; };
{ auto _e = multiplier(); _o->multiplier = _e; };
{ auto _e = outMax(); _o->outMax = _e; };
{ auto _e = outMin(); _o->outMin = _e; };
{ auto _e = shift(); _o->shift = _e; };
{ auto _e = biasQuantizedParam(); if (_e) _o->biasQuantizedParam = std::unique_ptr<QuantizedParamT>(_e->UnPack(_resolver)); };
{ auto _e = depthMultiplier(); _o->depthMultiplier = _e; };
{ auto _e = filterQuantizedParam(); if (_e) _o->filterQuantizedParam = std::unique_ptr<QuantizedParamT>(_e->UnPack(_resolver)); };
{ auto _e = inputQuantizedParam(); if (_e) _o->inputQuantizedParam = std::unique_ptr<QuantizedParamT>(_e->UnPack(_resolver)); };
{ auto _e = modelFormat(); _o->modelFormat = _e; };
{ auto _e = outputQuantizedParam(); if (_e) _o->outputQuantizedParam = std::unique_ptr<QuantizedParamT>(_e->UnPack(_resolver)); };
}
inline flatbuffers::Offset<TfQuantizedConv2D> TfQuantizedConv2D::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TfQuantizedConv2DT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateTfQuantizedConv2D(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<TfQuantizedConv2D> CreateTfQuantizedConv2D(flatbuffers::FlatBufferBuilder &_fbb, const TfQuantizedConv2DT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TfQuantizedConv2DT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0;
auto _biasflag = _o->biasflag;
auto _common = _o->common ? CreateConvolution2DCommon(_fbb, _o->common.get(), _rehasher) : 0;
auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0;
auto _activationType = _o->activationType;
auto _multiplier = _o->multiplier;
auto _outMax = _o->outMax;
auto _outMin = _o->outMin;
auto _shift = _o->shift;
auto _biasQuantizedParam = _o->biasQuantizedParam ? CreateQuantizedParam(_fbb, _o->biasQuantizedParam.get(), _rehasher) : 0;
auto _depthMultiplier = _o->depthMultiplier;
auto _filterQuantizedParam = _o->filterQuantizedParam ? CreateQuantizedParam(_fbb, _o->filterQuantizedParam.get(), _rehasher) : 0;
auto _inputQuantizedParam = _o->inputQuantizedParam ? CreateQuantizedParam(_fbb, _o->inputQuantizedParam.get(), _rehasher) : 0;
auto _modelFormat = _o->modelFormat;
auto _outputQuantizedParam = _o->outputQuantizedParam ? CreateQuantizedParam(_fbb, _o->outputQuantizedParam.get(), _rehasher) : 0;
return MNN::CreateTfQuantizedConv2D(
_fbb,
_bias,
_biasflag,
_common,
_weight,
_activationType,
_multiplier,
_outMax,
_outMin,
_shift,
_biasQuantizedParam,
_depthMultiplier,
_filterQuantizedParam,
_inputQuantizedParam,
_modelFormat,
_outputQuantizedParam);
}
inline const flatbuffers::TypeTable *FusedActivationTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
FusedActivationTypeTable
};
static const char * const names[] = {
"kTfLiteActNone",
"kTfLiteActRelu",
"kTfLiteActRelu1",
"kTfLiteActRelu6",
"kTfLiteActTanh",
"kTfLiteActSignBit",
"kTfLiteActSigmoid"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_ENUM, 7, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *ModeFormatTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
ModeFormatTypeTable
};
static const char * const names[] = {
"TENSORFLOW",
"TFLITE"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizeModeTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
QuantizeModeTypeTable
};
static const char * const names[] = {
"MIN_COMBINED",
"MIN_FIRST",
"SCALED"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizeRoundModeTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
QuantizeRoundModeTypeTable
};
static const char * const names[] = {
"HALF_AWAY_FROM_ZERO",
"HALF_TO_EVEN"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizedParamTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_FLOAT, 0, -1 }
};
static const char * const names[] = {
"zeroPoint",
"scale"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizedAddTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 0, 1 },
{ flatbuffers::ET_SEQUENCE, 0, 1 },
{ flatbuffers::ET_SEQUENCE, 0, 1 }
};
static const flatbuffers::TypeFunction type_refs[] = {
FusedActivationTypeTable,
QuantizedParamTypeTable
};
static const char * const names[] = {
"activationType",
"input1QuantizedParam",
"input2QuantizedParam",
"outputQuantizedParam"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *DequantizeTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 1 },
{ flatbuffers::ET_CHAR, 0, 2 },
{ flatbuffers::ET_INT, 0, 3 }
};
static const flatbuffers::TypeFunction type_refs[] = {
QuantizedParamTypeTable,
QuantizeModeTypeTable,
ModeFormatTypeTable,
DataTypeTypeTable
};
static const char * const names[] = {
"inputQuantizedParam",
"mode",
"modelFormat",
"type"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizedAvgPoolTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_CHAR, 0, 1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, 2 }
};
static const flatbuffers::TypeFunction type_refs[] = {
ModeFormatTypeTable,
PoolPadTypeTypeTable,
DataTypeTypeTable
};
static const char * const names[] = {
"kernelX",
"kernelY",
"modelFormat",
"outputActivationMax",
"outputActivationMin",
"padType",
"padX",
"padY",
"strideX",
"strideY",
"type"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 11, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizedBiasAddTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 1, -1 },
{ flatbuffers::ET_INT, 0, 0 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
DataTypeTypeTable
};
static const char * const names[] = {
"bias",
"inputType",
"max",
"min",
"outputType"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizedConcatTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_FLOAT, 1, -1 },
{ flatbuffers::ET_INT, 1, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 1 }
};
static const flatbuffers::TypeFunction type_refs[] = {
FusedActivationTypeTable,
QuantizedParamTypeTable
};
static const char * const names[] = {
"activationType",
"axis",
"inputScale",
"inputZeroPoint",
"outputQuantizedParam"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizedLogisticTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
QuantizedParamTypeTable
};
static const char * const names[] = {
"inputQuantizedParam",
"outputQuantizedParam"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizedMatMulTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_BOOL, 0, -1 },
{ flatbuffers::ET_BOOL, 0, -1 }
};
static const char * const names[] = {
"transposeA",
"transposeB"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizedMaxPoolTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_CHAR, 0, 1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, 2 }
};
static const flatbuffers::TypeFunction type_refs[] = {
ModeFormatTypeTable,
PoolPadTypeTypeTable,
DataTypeTypeTable
};
static const char * const names[] = {
"kernelX",
"kernelY",
"modelFormat",
"outputActivationMax",
"outputActivationMin",
"padType",
"padX",
"padY",
"strideX",
"strideY",
"type"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 11, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizedReluTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
DataTypeTypeTable
};
static const char * const names[] = {
"type"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizedRelu6TypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
DataTypeTypeTable
};
static const char * const names[] = {
"type"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizedReshapeTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 1, -1 },
{ flatbuffers::ET_CHAR, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
ModeFormatTypeTable
};
static const char * const names[] = {
"dims",
"modelFormat"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizedSoftmaxTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_FLOAT, 0, -1 },
{ flatbuffers::ET_FLOAT, 0, -1 }
};
static const char * const names[] = {
"beta",
"inputScale"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *QuantizeV2TypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 1 },
{ flatbuffers::ET_CHAR, 0, 2 }
};
static const flatbuffers::TypeFunction type_refs[] = {
DataTypeTypeTable,
QuantizeModeTypeTable,
QuantizeRoundModeTypeTable
};
static const char * const names[] = {
"type",
"mode",
"roundMode"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *RequantizationRangeTypeTable() {
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr
};
return &tt;
}
inline const flatbuffers::TypeTable *RequantizeTypeTable() {
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr
};
return &tt;
}
inline const flatbuffers::TypeTable *TfQuantizedConv2DTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 1, -1 },
{ flatbuffers::ET_BOOL, 0, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_UCHAR, 1, -1 },
{ flatbuffers::ET_CHAR, 0, 1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 2 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 2 },
{ flatbuffers::ET_SEQUENCE, 0, 2 },
{ flatbuffers::ET_CHAR, 0, 3 },
{ flatbuffers::ET_SEQUENCE, 0, 2 }
};
static const flatbuffers::TypeFunction type_refs[] = {
Convolution2DCommonTypeTable,
FusedActivationTypeTable,
QuantizedParamTypeTable,
ModeFormatTypeTable
};
static const char * const names[] = {
"bias",
"biasflag",
"common",
"weight",
"activationType",
"multiplier",
"outMax",
"outMin",
"shift",
"biasQuantizedParam",
"depthMultiplier",
"filterQuantizedParam",
"inputQuantizedParam",
"modelFormat",
"outputQuantizedParam"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 15, type_codes, type_refs, nullptr, names
};
return &tt;
}
} // namespace MNN
#endif // FLATBUFFERS_GENERATED_TFQUANTIZEOP_MNN_H_