MNN/schema/current/BasicOptimizer_generated.h

389 lines
14 KiB
C++

// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_BASICOPTIMIZER_MNN_OPTIMIZER_H_
#define FLATBUFFERS_GENERATED_BASICOPTIMIZER_MNN_OPTIMIZER_H_
#include "CaffeOp_generated.h"
#include "GpuLibrary_generated.h"
#include "MNN_generated.h"
#include "TFQuantizeOp_generated.h"
#include "Tensor_generated.h"
#include "TensorflowOp_generated.h"
#include "Type_generated.h"
#include "UserDefine_generated.h"
namespace MNN {
namespace Optimizer {
struct BackendConfig;
struct BackendConfigT;
struct Merge;
struct MergeT;
inline const flatbuffers::TypeTable *BackendConfigTypeTable();
inline const flatbuffers::TypeTable *MergeTypeTable();
struct BackendConfigT : public flatbuffers::NativeTable {
typedef BackendConfig TableType;
int32_t memroy;
MNN::ForwardType type;
int32_t precision;
int32_t power;
int32_t numberThread;
BackendConfigT()
: memroy(0),
type(MNN::ForwardType_CPU),
precision(0),
power(0),
numberThread(1) {
}
};
struct BackendConfig FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef BackendConfigT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return BackendConfigTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_MEMROY = 4,
VT_TYPE = 6,
VT_PRECISION = 8,
VT_POWER = 10,
VT_NUMBERTHREAD = 12
};
int32_t memroy() const {
return GetField<int32_t>(VT_MEMROY, 0);
}
MNN::ForwardType type() const {
return static_cast<MNN::ForwardType>(GetField<int8_t>(VT_TYPE, 0));
}
int32_t precision() const {
return GetField<int32_t>(VT_PRECISION, 0);
}
int32_t power() const {
return GetField<int32_t>(VT_POWER, 0);
}
int32_t numberThread() const {
return GetField<int32_t>(VT_NUMBERTHREAD, 1);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_MEMROY) &&
VerifyField<int8_t>(verifier, VT_TYPE) &&
VerifyField<int32_t>(verifier, VT_PRECISION) &&
VerifyField<int32_t>(verifier, VT_POWER) &&
VerifyField<int32_t>(verifier, VT_NUMBERTHREAD) &&
verifier.EndTable();
}
BackendConfigT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(BackendConfigT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<BackendConfig> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BackendConfigT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct BackendConfigBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_memroy(int32_t memroy) {
fbb_.AddElement<int32_t>(BackendConfig::VT_MEMROY, memroy, 0);
}
void add_type(MNN::ForwardType type) {
fbb_.AddElement<int8_t>(BackendConfig::VT_TYPE, static_cast<int8_t>(type), 0);
}
void add_precision(int32_t precision) {
fbb_.AddElement<int32_t>(BackendConfig::VT_PRECISION, precision, 0);
}
void add_power(int32_t power) {
fbb_.AddElement<int32_t>(BackendConfig::VT_POWER, power, 0);
}
void add_numberThread(int32_t numberThread) {
fbb_.AddElement<int32_t>(BackendConfig::VT_NUMBERTHREAD, numberThread, 1);
}
explicit BackendConfigBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
BackendConfigBuilder &operator=(const BackendConfigBuilder &);
flatbuffers::Offset<BackendConfig> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<BackendConfig>(end);
return o;
}
};
inline flatbuffers::Offset<BackendConfig> CreateBackendConfig(
flatbuffers::FlatBufferBuilder &_fbb,
int32_t memroy = 0,
MNN::ForwardType type = MNN::ForwardType_CPU,
int32_t precision = 0,
int32_t power = 0,
int32_t numberThread = 1) {
BackendConfigBuilder builder_(_fbb);
builder_.add_numberThread(numberThread);
builder_.add_power(power);
builder_.add_precision(precision);
builder_.add_memroy(memroy);
builder_.add_type(type);
return builder_.Finish();
}
flatbuffers::Offset<BackendConfig> CreateBackendConfig(flatbuffers::FlatBufferBuilder &_fbb, const BackendConfigT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct MergeT : public flatbuffers::NativeTable {
typedef Merge TableType;
std::vector<int32_t> outputIndexes;
std::vector<int32_t> inputIndexes;
int32_t tensorNumber;
std::unique_ptr<BackendConfigT> backend;
std::vector<std::unique_ptr<MNN::OpT>> oplists;
MergeT()
: tensorNumber(0) {
}
};
struct Merge FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef MergeT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return MergeTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_OUTPUTINDEXES = 4,
VT_INPUTINDEXES = 6,
VT_TENSORNUMBER = 8,
VT_BACKEND = 10,
VT_OPLISTS = 12
};
const flatbuffers::Vector<int32_t> *outputIndexes() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTINDEXES);
}
const flatbuffers::Vector<int32_t> *inputIndexes() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTINDEXES);
}
int32_t tensorNumber() const {
return GetField<int32_t>(VT_TENSORNUMBER, 0);
}
const BackendConfig *backend() const {
return GetPointer<const BackendConfig *>(VT_BACKEND);
}
const flatbuffers::Vector<flatbuffers::Offset<MNN::Op>> *oplists() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<MNN::Op>> *>(VT_OPLISTS);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_OUTPUTINDEXES) &&
verifier.VerifyVector(outputIndexes()) &&
VerifyOffset(verifier, VT_INPUTINDEXES) &&
verifier.VerifyVector(inputIndexes()) &&
VerifyField<int32_t>(verifier, VT_TENSORNUMBER) &&
VerifyOffset(verifier, VT_BACKEND) &&
verifier.VerifyTable(backend()) &&
VerifyOffset(verifier, VT_OPLISTS) &&
verifier.VerifyVector(oplists()) &&
verifier.VerifyVectorOfTables(oplists()) &&
verifier.EndTable();
}
MergeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(MergeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<Merge> Pack(flatbuffers::FlatBufferBuilder &_fbb, const MergeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct MergeBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_outputIndexes(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputIndexes) {
fbb_.AddOffset(Merge::VT_OUTPUTINDEXES, outputIndexes);
}
void add_inputIndexes(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputIndexes) {
fbb_.AddOffset(Merge::VT_INPUTINDEXES, inputIndexes);
}
void add_tensorNumber(int32_t tensorNumber) {
fbb_.AddElement<int32_t>(Merge::VT_TENSORNUMBER, tensorNumber, 0);
}
void add_backend(flatbuffers::Offset<BackendConfig> backend) {
fbb_.AddOffset(Merge::VT_BACKEND, backend);
}
void add_oplists(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<MNN::Op>>> oplists) {
fbb_.AddOffset(Merge::VT_OPLISTS, oplists);
}
explicit MergeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
MergeBuilder &operator=(const MergeBuilder &);
flatbuffers::Offset<Merge> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<Merge>(end);
return o;
}
};
inline flatbuffers::Offset<Merge> CreateMerge(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputIndexes = 0,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputIndexes = 0,
int32_t tensorNumber = 0,
flatbuffers::Offset<BackendConfig> backend = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<MNN::Op>>> oplists = 0) {
MergeBuilder builder_(_fbb);
builder_.add_oplists(oplists);
builder_.add_backend(backend);
builder_.add_tensorNumber(tensorNumber);
builder_.add_inputIndexes(inputIndexes);
builder_.add_outputIndexes(outputIndexes);
return builder_.Finish();
}
inline flatbuffers::Offset<Merge> CreateMergeDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<int32_t> *outputIndexes = nullptr,
const std::vector<int32_t> *inputIndexes = nullptr,
int32_t tensorNumber = 0,
flatbuffers::Offset<BackendConfig> backend = 0,
const std::vector<flatbuffers::Offset<MNN::Op>> *oplists = nullptr) {
auto outputIndexes__ = outputIndexes ? _fbb.CreateVector<int32_t>(*outputIndexes) : 0;
auto inputIndexes__ = inputIndexes ? _fbb.CreateVector<int32_t>(*inputIndexes) : 0;
auto oplists__ = oplists ? _fbb.CreateVector<flatbuffers::Offset<MNN::Op>>(*oplists) : 0;
return MNN::Optimizer::CreateMerge(
_fbb,
outputIndexes__,
inputIndexes__,
tensorNumber,
backend,
oplists__);
}
flatbuffers::Offset<Merge> CreateMerge(flatbuffers::FlatBufferBuilder &_fbb, const MergeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
inline BackendConfigT *BackendConfig::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new BackendConfigT();
UnPackTo(_o, _resolver);
return _o;
}
inline void BackendConfig::UnPackTo(BackendConfigT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = memroy(); _o->memroy = _e; };
{ auto _e = type(); _o->type = _e; };
{ auto _e = precision(); _o->precision = _e; };
{ auto _e = power(); _o->power = _e; };
{ auto _e = numberThread(); _o->numberThread = _e; };
}
inline flatbuffers::Offset<BackendConfig> BackendConfig::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BackendConfigT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateBackendConfig(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<BackendConfig> CreateBackendConfig(flatbuffers::FlatBufferBuilder &_fbb, const BackendConfigT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BackendConfigT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _memroy = _o->memroy;
auto _type = _o->type;
auto _precision = _o->precision;
auto _power = _o->power;
auto _numberThread = _o->numberThread;
return MNN::Optimizer::CreateBackendConfig(
_fbb,
_memroy,
_type,
_precision,
_power,
_numberThread);
}
inline MergeT *Merge::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new MergeT();
UnPackTo(_o, _resolver);
return _o;
}
inline void Merge::UnPackTo(MergeT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = outputIndexes(); if (_e) { _o->outputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputIndexes[_i] = _e->Get(_i); } } };
{ auto _e = inputIndexes(); if (_e) { _o->inputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputIndexes[_i] = _e->Get(_i); } } };
{ auto _e = tensorNumber(); _o->tensorNumber = _e; };
{ auto _e = backend(); if (_e) _o->backend = std::unique_ptr<BackendConfigT>(_e->UnPack(_resolver)); };
{ auto _e = oplists(); if (_e) { _o->oplists.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->oplists[_i] = std::unique_ptr<MNN::OpT>(_e->Get(_i)->UnPack(_resolver)); } } };
}
inline flatbuffers::Offset<Merge> Merge::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MergeT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateMerge(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<Merge> CreateMerge(flatbuffers::FlatBufferBuilder &_fbb, const MergeT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MergeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _outputIndexes = _o->outputIndexes.size() ? _fbb.CreateVector(_o->outputIndexes) : 0;
auto _inputIndexes = _o->inputIndexes.size() ? _fbb.CreateVector(_o->inputIndexes) : 0;
auto _tensorNumber = _o->tensorNumber;
auto _backend = _o->backend ? CreateBackendConfig(_fbb, _o->backend.get(), _rehasher) : 0;
auto _oplists = _o->oplists.size() ? _fbb.CreateVector<flatbuffers::Offset<MNN::Op>> (_o->oplists.size(), [](size_t i, _VectorArgs *__va) { return CreateOp(*__va->__fbb, __va->__o->oplists[i].get(), __va->__rehasher); }, &_va ) : 0;
return MNN::Optimizer::CreateMerge(
_fbb,
_outputIndexes,
_inputIndexes,
_tensorNumber,
_backend,
_oplists);
}
inline const flatbuffers::TypeTable *BackendConfigTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 0, -1 }
};
static const flatbuffers::TypeFunction type_refs[] = {
MNN::ForwardTypeTypeTable
};
static const char * const names[] = {
"memroy",
"type",
"precision",
"power",
"numberThread"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *MergeTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 1, -1 },
{ flatbuffers::ET_INT, 1, -1 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_SEQUENCE, 0, 0 },
{ flatbuffers::ET_SEQUENCE, 1, 1 }
};
static const flatbuffers::TypeFunction type_refs[] = {
BackendConfigTypeTable,
MNN::OpTypeTable
};
static const char * const names[] = {
"outputIndexes",
"inputIndexes",
"tensorNumber",
"backend",
"oplists"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names
};
return &tt;
}
} // namespace Optimizer
} // namespace MNN
#endif // FLATBUFFERS_GENERATED_BASICOPTIMIZER_MNN_OPTIMIZER_H_