MNN/schema/current/GpuLibrary_generated.h

1035 lines
41 KiB
C++

// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_GPULIBRARY_MNN_H_
#define FLATBUFFERS_GENERATED_GPULIBRARY_MNN_H_
#include "Tensor_generated.h"
#include "Type_generated.h"
namespace MNN {
struct GpuBuffer;
struct GpuBufferT;
struct GpuPipeline;
struct GpuPipelineT;
struct GpuStage;
struct GpuStageT;
struct GpuFunction;
struct GpuFunctionT;
struct GpuLibrary;
struct GpuLibraryT;
inline const flatbuffers::TypeTable *GpuBufferTypeTable();
inline const flatbuffers::TypeTable *GpuPipelineTypeTable();
inline const flatbuffers::TypeTable *GpuStageTypeTable();
inline const flatbuffers::TypeTable *GpuFunctionTypeTable();
inline const flatbuffers::TypeTable *GpuLibraryTypeTable();
enum STORAGE_TYPE {
STORAGE_TYPE_BUFFER = 0,
STORAGE_TYPE_UNIFORM = 1,
STORAGE_TYPE_IMAGE = 2,
STORAGE_TYPE_MIN = STORAGE_TYPE_BUFFER,
STORAGE_TYPE_MAX = STORAGE_TYPE_IMAGE
};
inline const STORAGE_TYPE (&EnumValuesSTORAGE_TYPE())[3] {
static const STORAGE_TYPE values[] = {
STORAGE_TYPE_BUFFER,
STORAGE_TYPE_UNIFORM,
STORAGE_TYPE_IMAGE
};
return values;
}
inline const char * const *EnumNamesSTORAGE_TYPE() {
static const char * const names[] = {
"BUFFER",
"UNIFORM",
"IMAGE",
nullptr
};
return names;
}
inline const char *EnumNameSTORAGE_TYPE(STORAGE_TYPE e) {
if (e < STORAGE_TYPE_BUFFER || e > STORAGE_TYPE_IMAGE) return "";
const size_t index = static_cast<int>(e);
return EnumNamesSTORAGE_TYPE()[index];
}
enum ACCESS_TYPE {
ACCESS_TYPE_READ_ONLY = 0,
ACCESS_TYPE_WRITE_ONLY = 1,
ACCESS_TYPE_READ_WRITE = 2,
ACCESS_TYPE_MIN = ACCESS_TYPE_READ_ONLY,
ACCESS_TYPE_MAX = ACCESS_TYPE_READ_WRITE
};
inline const ACCESS_TYPE (&EnumValuesACCESS_TYPE())[3] {
static const ACCESS_TYPE values[] = {
ACCESS_TYPE_READ_ONLY,
ACCESS_TYPE_WRITE_ONLY,
ACCESS_TYPE_READ_WRITE
};
return values;
}
inline const char * const *EnumNamesACCESS_TYPE() {
static const char * const names[] = {
"READ_ONLY",
"WRITE_ONLY",
"READ_WRITE",
nullptr
};
return names;
}
inline const char *EnumNameACCESS_TYPE(ACCESS_TYPE e) {
if (e < ACCESS_TYPE_READ_ONLY || e > ACCESS_TYPE_READ_WRITE) return "";
const size_t index = static_cast<int>(e);
return EnumNamesACCESS_TYPE()[index];
}
struct GpuBufferT : public flatbuffers::NativeTable {
typedef GpuBuffer TableType;
ACCESS_TYPE access;
STORAGE_TYPE storage;
std::unique_ptr<BlobT> content;
GpuBufferT()
: access(ACCESS_TYPE_READ_ONLY),
storage(STORAGE_TYPE_BUFFER) {
}
};
struct GpuBuffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef GpuBufferT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return GpuBufferTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ACCESS = 4,
VT_STORAGE = 6,
VT_CONTENT = 8
};
ACCESS_TYPE access() const {
return static_cast<ACCESS_TYPE>(GetField<int8_t>(VT_ACCESS, 0));
}
STORAGE_TYPE storage() const {
return static_cast<STORAGE_TYPE>(GetField<int8_t>(VT_STORAGE, 0));
}
const Blob *content() const {
return GetPointer<const Blob *>(VT_CONTENT);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int8_t>(verifier, VT_ACCESS) &&
VerifyField<int8_t>(verifier, VT_STORAGE) &&
VerifyOffset(verifier, VT_CONTENT) &&
verifier.VerifyTable(content()) &&
verifier.EndTable();
}
GpuBufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(GpuBufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<GpuBuffer> Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuBufferT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct GpuBufferBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_access(ACCESS_TYPE access) {
fbb_.AddElement<int8_t>(GpuBuffer::VT_ACCESS, static_cast<int8_t>(access), 0);
}
void add_storage(STORAGE_TYPE storage) {
fbb_.AddElement<int8_t>(GpuBuffer::VT_STORAGE, static_cast<int8_t>(storage), 0);
}
void add_content(flatbuffers::Offset<Blob> content) {
fbb_.AddOffset(GpuBuffer::VT_CONTENT, content);
}
explicit GpuBufferBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
GpuBufferBuilder &operator=(const GpuBufferBuilder &);
flatbuffers::Offset<GpuBuffer> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<GpuBuffer>(end);
return o;
}
};
inline flatbuffers::Offset<GpuBuffer> CreateGpuBuffer(
flatbuffers::FlatBufferBuilder &_fbb,
ACCESS_TYPE access = ACCESS_TYPE_READ_ONLY,
STORAGE_TYPE storage = STORAGE_TYPE_BUFFER,
flatbuffers::Offset<Blob> content = 0) {
GpuBufferBuilder builder_(_fbb);
builder_.add_content(content);
builder_.add_storage(storage);
builder_.add_access(access);
return builder_.Finish();
}
flatbuffers::Offset<GpuBuffer> CreateGpuBuffer(flatbuffers::FlatBufferBuilder &_fbb, const GpuBufferT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct GpuPipelineT : public flatbuffers::NativeTable {
typedef GpuPipeline TableType;
std::vector<int32_t> localSize;
std::string key;
std::vector<int8_t> metal;
std::vector<int8_t> vulkan;
std::string openglComputeShader;
std::string openclKernel;
GpuPipelineT() {
}
};
struct GpuPipeline FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef GpuPipelineT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return GpuPipelineTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_LOCALSIZE = 4,
VT_KEY = 6,
VT_METAL = 8,
VT_VULKAN = 10,
VT_OPENGLCOMPUTESHADER = 12,
VT_OPENCLKERNEL = 14
};
const flatbuffers::Vector<int32_t> *localSize() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_LOCALSIZE);
}
const flatbuffers::String *key() const {
return GetPointer<const flatbuffers::String *>(VT_KEY);
}
const flatbuffers::Vector<int8_t> *metal() const {
return GetPointer<const flatbuffers::Vector<int8_t> *>(VT_METAL);
}
const flatbuffers::Vector<int8_t> *vulkan() const {
return GetPointer<const flatbuffers::Vector<int8_t> *>(VT_VULKAN);
}
const flatbuffers::String *openglComputeShader() const {
return GetPointer<const flatbuffers::String *>(VT_OPENGLCOMPUTESHADER);
}
const flatbuffers::String *openclKernel() const {
return GetPointer<const flatbuffers::String *>(VT_OPENCLKERNEL);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_LOCALSIZE) &&
verifier.VerifyVector(localSize()) &&
VerifyOffset(verifier, VT_KEY) &&
verifier.VerifyString(key()) &&
VerifyOffset(verifier, VT_METAL) &&
verifier.VerifyVector(metal()) &&
VerifyOffset(verifier, VT_VULKAN) &&
verifier.VerifyVector(vulkan()) &&
VerifyOffset(verifier, VT_OPENGLCOMPUTESHADER) &&
verifier.VerifyString(openglComputeShader()) &&
VerifyOffset(verifier, VT_OPENCLKERNEL) &&
verifier.VerifyString(openclKernel()) &&
verifier.EndTable();
}
GpuPipelineT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(GpuPipelineT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<GpuPipeline> Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuPipelineT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct GpuPipelineBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_localSize(flatbuffers::Offset<flatbuffers::Vector<int32_t>> localSize) {
fbb_.AddOffset(GpuPipeline::VT_LOCALSIZE, localSize);
}
void add_key(flatbuffers::Offset<flatbuffers::String> key) {
fbb_.AddOffset(GpuPipeline::VT_KEY, key);
}
void add_metal(flatbuffers::Offset<flatbuffers::Vector<int8_t>> metal) {
fbb_.AddOffset(GpuPipeline::VT_METAL, metal);
}
void add_vulkan(flatbuffers::Offset<flatbuffers::Vector<int8_t>> vulkan) {
fbb_.AddOffset(GpuPipeline::VT_VULKAN, vulkan);
}
void add_openglComputeShader(flatbuffers::Offset<flatbuffers::String> openglComputeShader) {
fbb_.AddOffset(GpuPipeline::VT_OPENGLCOMPUTESHADER, openglComputeShader);
}
void add_openclKernel(flatbuffers::Offset<flatbuffers::String> openclKernel) {
fbb_.AddOffset(GpuPipeline::VT_OPENCLKERNEL, openclKernel);
}
explicit GpuPipelineBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
GpuPipelineBuilder &operator=(const GpuPipelineBuilder &);
flatbuffers::Offset<GpuPipeline> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<GpuPipeline>(end);
return o;
}
};
inline flatbuffers::Offset<GpuPipeline> CreateGpuPipeline(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> localSize = 0,
flatbuffers::Offset<flatbuffers::String> key = 0,
flatbuffers::Offset<flatbuffers::Vector<int8_t>> metal = 0,
flatbuffers::Offset<flatbuffers::Vector<int8_t>> vulkan = 0,
flatbuffers::Offset<flatbuffers::String> openglComputeShader = 0,
flatbuffers::Offset<flatbuffers::String> openclKernel = 0) {
GpuPipelineBuilder builder_(_fbb);
builder_.add_openclKernel(openclKernel);
builder_.add_openglComputeShader(openglComputeShader);
builder_.add_vulkan(vulkan);
builder_.add_metal(metal);
builder_.add_key(key);
builder_.add_localSize(localSize);
return builder_.Finish();
}
inline flatbuffers::Offset<GpuPipeline> CreateGpuPipelineDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<int32_t> *localSize = nullptr,
const char *key = nullptr,
const std::vector<int8_t> *metal = nullptr,
const std::vector<int8_t> *vulkan = nullptr,
const char *openglComputeShader = nullptr,
const char *openclKernel = nullptr) {
auto localSize__ = localSize ? _fbb.CreateVector<int32_t>(*localSize) : 0;
auto key__ = key ? _fbb.CreateString(key) : 0;
auto metal__ = metal ? _fbb.CreateVector<int8_t>(*metal) : 0;
auto vulkan__ = vulkan ? _fbb.CreateVector<int8_t>(*vulkan) : 0;
auto openglComputeShader__ = openglComputeShader ? _fbb.CreateString(openglComputeShader) : 0;
auto openclKernel__ = openclKernel ? _fbb.CreateString(openclKernel) : 0;
return MNN::CreateGpuPipeline(
_fbb,
localSize__,
key__,
metal__,
vulkan__,
openglComputeShader__,
openclKernel__);
}
flatbuffers::Offset<GpuPipeline> CreateGpuPipeline(flatbuffers::FlatBufferBuilder &_fbb, const GpuPipelineT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct GpuStageT : public flatbuffers::NativeTable {
typedef GpuStage TableType;
std::string pipeline;
std::vector<int32_t> groupSize;
std::vector<int32_t> inputIndexes;
std::vector<int32_t> outputIndexes;
std::vector<std::unique_ptr<GpuBufferT>> middleBuffer;
std::vector<std::unique_ptr<GpuBufferT>> constBuffer;
int32_t globalSizeIndex;
std::vector<int32_t> globalSizeDivide;
bool requireSize;
GpuStageT()
: globalSizeIndex(0),
requireSize(false) {
}
};
struct GpuStage FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef GpuStageT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return GpuStageTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_PIPELINE = 4,
VT_GROUPSIZE = 6,
VT_INPUTINDEXES = 8,
VT_OUTPUTINDEXES = 10,
VT_MIDDLEBUFFER = 12,
VT_CONSTBUFFER = 14,
VT_GLOBALSIZEINDEX = 16,
VT_GLOBALSIZEDIVIDE = 18,
VT_REQUIRESIZE = 20
};
const flatbuffers::String *pipeline() const {
return GetPointer<const flatbuffers::String *>(VT_PIPELINE);
}
const flatbuffers::Vector<int32_t> *groupSize() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_GROUPSIZE);
}
const flatbuffers::Vector<int32_t> *inputIndexes() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUTINDEXES);
}
const flatbuffers::Vector<int32_t> *outputIndexes() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUTINDEXES);
}
const flatbuffers::Vector<flatbuffers::Offset<GpuBuffer>> *middleBuffer() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<GpuBuffer>> *>(VT_MIDDLEBUFFER);
}
const flatbuffers::Vector<flatbuffers::Offset<GpuBuffer>> *constBuffer() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<GpuBuffer>> *>(VT_CONSTBUFFER);
}
int32_t globalSizeIndex() const {
return GetField<int32_t>(VT_GLOBALSIZEINDEX, 0);
}
const flatbuffers::Vector<int32_t> *globalSizeDivide() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_GLOBALSIZEDIVIDE);
}
bool requireSize() const {
return GetField<uint8_t>(VT_REQUIRESIZE, 0) != 0;
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_PIPELINE) &&
verifier.VerifyString(pipeline()) &&
VerifyOffset(verifier, VT_GROUPSIZE) &&
verifier.VerifyVector(groupSize()) &&
VerifyOffset(verifier, VT_INPUTINDEXES) &&
verifier.VerifyVector(inputIndexes()) &&
VerifyOffset(verifier, VT_OUTPUTINDEXES) &&
verifier.VerifyVector(outputIndexes()) &&
VerifyOffset(verifier, VT_MIDDLEBUFFER) &&
verifier.VerifyVector(middleBuffer()) &&
verifier.VerifyVectorOfTables(middleBuffer()) &&
VerifyOffset(verifier, VT_CONSTBUFFER) &&
verifier.VerifyVector(constBuffer()) &&
verifier.VerifyVectorOfTables(constBuffer()) &&
VerifyField<int32_t>(verifier, VT_GLOBALSIZEINDEX) &&
VerifyOffset(verifier, VT_GLOBALSIZEDIVIDE) &&
verifier.VerifyVector(globalSizeDivide()) &&
VerifyField<uint8_t>(verifier, VT_REQUIRESIZE) &&
verifier.EndTable();
}
GpuStageT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(GpuStageT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<GpuStage> Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuStageT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct GpuStageBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_pipeline(flatbuffers::Offset<flatbuffers::String> pipeline) {
fbb_.AddOffset(GpuStage::VT_PIPELINE, pipeline);
}
void add_groupSize(flatbuffers::Offset<flatbuffers::Vector<int32_t>> groupSize) {
fbb_.AddOffset(GpuStage::VT_GROUPSIZE, groupSize);
}
void add_inputIndexes(flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputIndexes) {
fbb_.AddOffset(GpuStage::VT_INPUTINDEXES, inputIndexes);
}
void add_outputIndexes(flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputIndexes) {
fbb_.AddOffset(GpuStage::VT_OUTPUTINDEXES, outputIndexes);
}
void add_middleBuffer(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<GpuBuffer>>> middleBuffer) {
fbb_.AddOffset(GpuStage::VT_MIDDLEBUFFER, middleBuffer);
}
void add_constBuffer(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<GpuBuffer>>> constBuffer) {
fbb_.AddOffset(GpuStage::VT_CONSTBUFFER, constBuffer);
}
void add_globalSizeIndex(int32_t globalSizeIndex) {
fbb_.AddElement<int32_t>(GpuStage::VT_GLOBALSIZEINDEX, globalSizeIndex, 0);
}
void add_globalSizeDivide(flatbuffers::Offset<flatbuffers::Vector<int32_t>> globalSizeDivide) {
fbb_.AddOffset(GpuStage::VT_GLOBALSIZEDIVIDE, globalSizeDivide);
}
void add_requireSize(bool requireSize) {
fbb_.AddElement<uint8_t>(GpuStage::VT_REQUIRESIZE, static_cast<uint8_t>(requireSize), 0);
}
explicit GpuStageBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
GpuStageBuilder &operator=(const GpuStageBuilder &);
flatbuffers::Offset<GpuStage> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<GpuStage>(end);
return o;
}
};
inline flatbuffers::Offset<GpuStage> CreateGpuStage(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> pipeline = 0,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> groupSize = 0,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> inputIndexes = 0,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> outputIndexes = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<GpuBuffer>>> middleBuffer = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<GpuBuffer>>> constBuffer = 0,
int32_t globalSizeIndex = 0,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> globalSizeDivide = 0,
bool requireSize = false) {
GpuStageBuilder builder_(_fbb);
builder_.add_globalSizeDivide(globalSizeDivide);
builder_.add_globalSizeIndex(globalSizeIndex);
builder_.add_constBuffer(constBuffer);
builder_.add_middleBuffer(middleBuffer);
builder_.add_outputIndexes(outputIndexes);
builder_.add_inputIndexes(inputIndexes);
builder_.add_groupSize(groupSize);
builder_.add_pipeline(pipeline);
builder_.add_requireSize(requireSize);
return builder_.Finish();
}
inline flatbuffers::Offset<GpuStage> CreateGpuStageDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *pipeline = nullptr,
const std::vector<int32_t> *groupSize = nullptr,
const std::vector<int32_t> *inputIndexes = nullptr,
const std::vector<int32_t> *outputIndexes = nullptr,
const std::vector<flatbuffers::Offset<GpuBuffer>> *middleBuffer = nullptr,
const std::vector<flatbuffers::Offset<GpuBuffer>> *constBuffer = nullptr,
int32_t globalSizeIndex = 0,
const std::vector<int32_t> *globalSizeDivide = nullptr,
bool requireSize = false) {
auto pipeline__ = pipeline ? _fbb.CreateString(pipeline) : 0;
auto groupSize__ = groupSize ? _fbb.CreateVector<int32_t>(*groupSize) : 0;
auto inputIndexes__ = inputIndexes ? _fbb.CreateVector<int32_t>(*inputIndexes) : 0;
auto outputIndexes__ = outputIndexes ? _fbb.CreateVector<int32_t>(*outputIndexes) : 0;
auto middleBuffer__ = middleBuffer ? _fbb.CreateVector<flatbuffers::Offset<GpuBuffer>>(*middleBuffer) : 0;
auto constBuffer__ = constBuffer ? _fbb.CreateVector<flatbuffers::Offset<GpuBuffer>>(*constBuffer) : 0;
auto globalSizeDivide__ = globalSizeDivide ? _fbb.CreateVector<int32_t>(*globalSizeDivide) : 0;
return MNN::CreateGpuStage(
_fbb,
pipeline__,
groupSize__,
inputIndexes__,
outputIndexes__,
middleBuffer__,
constBuffer__,
globalSizeIndex,
globalSizeDivide__,
requireSize);
}
flatbuffers::Offset<GpuStage> CreateGpuStage(flatbuffers::FlatBufferBuilder &_fbb, const GpuStageT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct GpuFunctionT : public flatbuffers::NativeTable {
typedef GpuFunction TableType;
std::vector<std::unique_ptr<GpuStageT>> stags;
std::string name;
GpuFunctionT() {
}
};
struct GpuFunction FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef GpuFunctionT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return GpuFunctionTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_STAGS = 4,
VT_NAME = 6
};
const flatbuffers::Vector<flatbuffers::Offset<GpuStage>> *stags() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<GpuStage>> *>(VT_STAGS);
}
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_STAGS) &&
verifier.VerifyVector(stags()) &&
verifier.VerifyVectorOfTables(stags()) &&
VerifyOffset(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
verifier.EndTable();
}
GpuFunctionT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(GpuFunctionT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<GpuFunction> Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuFunctionT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct GpuFunctionBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_stags(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<GpuStage>>> stags) {
fbb_.AddOffset(GpuFunction::VT_STAGS, stags);
}
void add_name(flatbuffers::Offset<flatbuffers::String> name) {
fbb_.AddOffset(GpuFunction::VT_NAME, name);
}
explicit GpuFunctionBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
GpuFunctionBuilder &operator=(const GpuFunctionBuilder &);
flatbuffers::Offset<GpuFunction> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<GpuFunction>(end);
return o;
}
};
inline flatbuffers::Offset<GpuFunction> CreateGpuFunction(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<GpuStage>>> stags = 0,
flatbuffers::Offset<flatbuffers::String> name = 0) {
GpuFunctionBuilder builder_(_fbb);
builder_.add_name(name);
builder_.add_stags(stags);
return builder_.Finish();
}
inline flatbuffers::Offset<GpuFunction> CreateGpuFunctionDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<flatbuffers::Offset<GpuStage>> *stags = nullptr,
const char *name = nullptr) {
auto stags__ = stags ? _fbb.CreateVector<flatbuffers::Offset<GpuStage>>(*stags) : 0;
auto name__ = name ? _fbb.CreateString(name) : 0;
return MNN::CreateGpuFunction(
_fbb,
stags__,
name__);
}
flatbuffers::Offset<GpuFunction> CreateGpuFunction(flatbuffers::FlatBufferBuilder &_fbb, const GpuFunctionT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct GpuLibraryT : public flatbuffers::NativeTable {
typedef GpuLibrary TableType;
std::vector<std::unique_ptr<GpuFunctionT>> functions;
std::vector<std::unique_ptr<GpuPipelineT>> pipeline;
std::string name;
GpuLibraryT() {
}
};
struct GpuLibrary FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef GpuLibraryT NativeTableType;
static const flatbuffers::TypeTable *MiniReflectTypeTable() {
return GpuLibraryTypeTable();
}
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_FUNCTIONS = 4,
VT_PIPELINE = 6,
VT_NAME = 8
};
const flatbuffers::Vector<flatbuffers::Offset<GpuFunction>> *functions() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<GpuFunction>> *>(VT_FUNCTIONS);
}
const flatbuffers::Vector<flatbuffers::Offset<GpuPipeline>> *pipeline() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<GpuPipeline>> *>(VT_PIPELINE);
}
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_FUNCTIONS) &&
verifier.VerifyVector(functions()) &&
verifier.VerifyVectorOfTables(functions()) &&
VerifyOffset(verifier, VT_PIPELINE) &&
verifier.VerifyVector(pipeline()) &&
verifier.VerifyVectorOfTables(pipeline()) &&
VerifyOffset(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
verifier.EndTable();
}
GpuLibraryT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(GpuLibraryT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<GpuLibrary> Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuLibraryT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct GpuLibraryBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_functions(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<GpuFunction>>> functions) {
fbb_.AddOffset(GpuLibrary::VT_FUNCTIONS, functions);
}
void add_pipeline(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<GpuPipeline>>> pipeline) {
fbb_.AddOffset(GpuLibrary::VT_PIPELINE, pipeline);
}
void add_name(flatbuffers::Offset<flatbuffers::String> name) {
fbb_.AddOffset(GpuLibrary::VT_NAME, name);
}
explicit GpuLibraryBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
GpuLibraryBuilder &operator=(const GpuLibraryBuilder &);
flatbuffers::Offset<GpuLibrary> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<GpuLibrary>(end);
return o;
}
};
inline flatbuffers::Offset<GpuLibrary> CreateGpuLibrary(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<GpuFunction>>> functions = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<GpuPipeline>>> pipeline = 0,
flatbuffers::Offset<flatbuffers::String> name = 0) {
GpuLibraryBuilder builder_(_fbb);
builder_.add_name(name);
builder_.add_pipeline(pipeline);
builder_.add_functions(functions);
return builder_.Finish();
}
inline flatbuffers::Offset<GpuLibrary> CreateGpuLibraryDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<flatbuffers::Offset<GpuFunction>> *functions = nullptr,
const std::vector<flatbuffers::Offset<GpuPipeline>> *pipeline = nullptr,
const char *name = nullptr) {
auto functions__ = functions ? _fbb.CreateVector<flatbuffers::Offset<GpuFunction>>(*functions) : 0;
auto pipeline__ = pipeline ? _fbb.CreateVector<flatbuffers::Offset<GpuPipeline>>(*pipeline) : 0;
auto name__ = name ? _fbb.CreateString(name) : 0;
return MNN::CreateGpuLibrary(
_fbb,
functions__,
pipeline__,
name__);
}
flatbuffers::Offset<GpuLibrary> CreateGpuLibrary(flatbuffers::FlatBufferBuilder &_fbb, const GpuLibraryT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
inline GpuBufferT *GpuBuffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new GpuBufferT();
UnPackTo(_o, _resolver);
return _o;
}
inline void GpuBuffer::UnPackTo(GpuBufferT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = access(); _o->access = _e; };
{ auto _e = storage(); _o->storage = _e; };
{ auto _e = content(); if (_e) _o->content = std::unique_ptr<BlobT>(_e->UnPack(_resolver)); };
}
inline flatbuffers::Offset<GpuBuffer> GpuBuffer::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuBufferT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateGpuBuffer(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<GpuBuffer> CreateGpuBuffer(flatbuffers::FlatBufferBuilder &_fbb, const GpuBufferT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuBufferT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _access = _o->access;
auto _storage = _o->storage;
auto _content = _o->content ? CreateBlob(_fbb, _o->content.get(), _rehasher) : 0;
return MNN::CreateGpuBuffer(
_fbb,
_access,
_storage,
_content);
}
inline GpuPipelineT *GpuPipeline::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new GpuPipelineT();
UnPackTo(_o, _resolver);
return _o;
}
inline void GpuPipeline::UnPackTo(GpuPipelineT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = localSize(); if (_e) { _o->localSize.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->localSize[_i] = _e->Get(_i); } } };
{ auto _e = key(); if (_e) _o->key = _e->str(); };
{ auto _e = metal(); if (_e) { _o->metal.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metal[_i] = _e->Get(_i); } } };
{ auto _e = vulkan(); if (_e) { _o->vulkan.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vulkan[_i] = _e->Get(_i); } } };
{ auto _e = openglComputeShader(); if (_e) _o->openglComputeShader = _e->str(); };
{ auto _e = openclKernel(); if (_e) _o->openclKernel = _e->str(); };
}
inline flatbuffers::Offset<GpuPipeline> GpuPipeline::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuPipelineT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateGpuPipeline(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<GpuPipeline> CreateGpuPipeline(flatbuffers::FlatBufferBuilder &_fbb, const GpuPipelineT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuPipelineT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _localSize = _o->localSize.size() ? _fbb.CreateVector(_o->localSize) : 0;
auto _key = _o->key.empty() ? 0 : _fbb.CreateString(_o->key);
auto _metal = _o->metal.size() ? _fbb.CreateVector(_o->metal) : 0;
auto _vulkan = _o->vulkan.size() ? _fbb.CreateVector(_o->vulkan) : 0;
auto _openglComputeShader = _o->openglComputeShader.empty() ? 0 : _fbb.CreateString(_o->openglComputeShader);
auto _openclKernel = _o->openclKernel.empty() ? 0 : _fbb.CreateString(_o->openclKernel);
return MNN::CreateGpuPipeline(
_fbb,
_localSize,
_key,
_metal,
_vulkan,
_openglComputeShader,
_openclKernel);
}
inline GpuStageT *GpuStage::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new GpuStageT();
UnPackTo(_o, _resolver);
return _o;
}
inline void GpuStage::UnPackTo(GpuStageT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = pipeline(); if (_e) _o->pipeline = _e->str(); };
{ auto _e = groupSize(); if (_e) { _o->groupSize.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->groupSize[_i] = _e->Get(_i); } } };
{ auto _e = inputIndexes(); if (_e) { _o->inputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputIndexes[_i] = _e->Get(_i); } } };
{ auto _e = outputIndexes(); if (_e) { _o->outputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputIndexes[_i] = _e->Get(_i); } } };
{ auto _e = middleBuffer(); if (_e) { _o->middleBuffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->middleBuffer[_i] = std::unique_ptr<GpuBufferT>(_e->Get(_i)->UnPack(_resolver)); } } };
{ auto _e = constBuffer(); if (_e) { _o->constBuffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->constBuffer[_i] = std::unique_ptr<GpuBufferT>(_e->Get(_i)->UnPack(_resolver)); } } };
{ auto _e = globalSizeIndex(); _o->globalSizeIndex = _e; };
{ auto _e = globalSizeDivide(); if (_e) { _o->globalSizeDivide.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->globalSizeDivide[_i] = _e->Get(_i); } } };
{ auto _e = requireSize(); _o->requireSize = _e; };
}
inline flatbuffers::Offset<GpuStage> GpuStage::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuStageT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateGpuStage(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<GpuStage> CreateGpuStage(flatbuffers::FlatBufferBuilder &_fbb, const GpuStageT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuStageT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _pipeline = _o->pipeline.empty() ? 0 : _fbb.CreateString(_o->pipeline);
auto _groupSize = _o->groupSize.size() ? _fbb.CreateVector(_o->groupSize) : 0;
auto _inputIndexes = _o->inputIndexes.size() ? _fbb.CreateVector(_o->inputIndexes) : 0;
auto _outputIndexes = _o->outputIndexes.size() ? _fbb.CreateVector(_o->outputIndexes) : 0;
auto _middleBuffer = _o->middleBuffer.size() ? _fbb.CreateVector<flatbuffers::Offset<GpuBuffer>> (_o->middleBuffer.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuBuffer(*__va->__fbb, __va->__o->middleBuffer[i].get(), __va->__rehasher); }, &_va ) : 0;
auto _constBuffer = _o->constBuffer.size() ? _fbb.CreateVector<flatbuffers::Offset<GpuBuffer>> (_o->constBuffer.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuBuffer(*__va->__fbb, __va->__o->constBuffer[i].get(), __va->__rehasher); }, &_va ) : 0;
auto _globalSizeIndex = _o->globalSizeIndex;
auto _globalSizeDivide = _o->globalSizeDivide.size() ? _fbb.CreateVector(_o->globalSizeDivide) : 0;
auto _requireSize = _o->requireSize;
return MNN::CreateGpuStage(
_fbb,
_pipeline,
_groupSize,
_inputIndexes,
_outputIndexes,
_middleBuffer,
_constBuffer,
_globalSizeIndex,
_globalSizeDivide,
_requireSize);
}
inline GpuFunctionT *GpuFunction::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new GpuFunctionT();
UnPackTo(_o, _resolver);
return _o;
}
inline void GpuFunction::UnPackTo(GpuFunctionT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = stags(); if (_e) { _o->stags.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->stags[_i] = std::unique_ptr<GpuStageT>(_e->Get(_i)->UnPack(_resolver)); } } };
{ auto _e = name(); if (_e) _o->name = _e->str(); };
}
inline flatbuffers::Offset<GpuFunction> GpuFunction::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuFunctionT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateGpuFunction(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<GpuFunction> CreateGpuFunction(flatbuffers::FlatBufferBuilder &_fbb, const GpuFunctionT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuFunctionT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _stags = _o->stags.size() ? _fbb.CreateVector<flatbuffers::Offset<GpuStage>> (_o->stags.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuStage(*__va->__fbb, __va->__o->stags[i].get(), __va->__rehasher); }, &_va ) : 0;
auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
return MNN::CreateGpuFunction(
_fbb,
_stags,
_name);
}
inline GpuLibraryT *GpuLibrary::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new GpuLibraryT();
UnPackTo(_o, _resolver);
return _o;
}
inline void GpuLibrary::UnPackTo(GpuLibraryT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = functions(); if (_e) { _o->functions.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->functions[_i] = std::unique_ptr<GpuFunctionT>(_e->Get(_i)->UnPack(_resolver)); } } };
{ auto _e = pipeline(); if (_e) { _o->pipeline.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->pipeline[_i] = std::unique_ptr<GpuPipelineT>(_e->Get(_i)->UnPack(_resolver)); } } };
{ auto _e = name(); if (_e) _o->name = _e->str(); };
}
inline flatbuffers::Offset<GpuLibrary> GpuLibrary::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuLibraryT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateGpuLibrary(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<GpuLibrary> CreateGpuLibrary(flatbuffers::FlatBufferBuilder &_fbb, const GpuLibraryT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuLibraryT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _functions = _o->functions.size() ? _fbb.CreateVector<flatbuffers::Offset<GpuFunction>> (_o->functions.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuFunction(*__va->__fbb, __va->__o->functions[i].get(), __va->__rehasher); }, &_va ) : 0;
auto _pipeline = _o->pipeline.size() ? _fbb.CreateVector<flatbuffers::Offset<GpuPipeline>> (_o->pipeline.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuPipeline(*__va->__fbb, __va->__o->pipeline[i].get(), __va->__rehasher); }, &_va ) : 0;
auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
return MNN::CreateGpuLibrary(
_fbb,
_functions,
_pipeline,
_name);
}
inline const flatbuffers::TypeTable *STORAGE_TYPETypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
STORAGE_TYPETypeTable
};
static const char * const names[] = {
"BUFFER",
"UNIFORM",
"IMAGE"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *ACCESS_TYPETypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 0 }
};
static const flatbuffers::TypeFunction type_refs[] = {
ACCESS_TYPETypeTable
};
static const char * const names[] = {
"READ_ONLY",
"WRITE_ONLY",
"READ_WRITE"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *GpuBufferTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_CHAR, 0, 0 },
{ flatbuffers::ET_CHAR, 0, 1 },
{ flatbuffers::ET_SEQUENCE, 0, 2 }
};
static const flatbuffers::TypeFunction type_refs[] = {
ACCESS_TYPETypeTable,
STORAGE_TYPETypeTable,
BlobTypeTable
};
static const char * const names[] = {
"access",
"storage",
"content"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *GpuPipelineTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_INT, 1, -1 },
{ flatbuffers::ET_STRING, 0, -1 },
{ flatbuffers::ET_CHAR, 1, -1 },
{ flatbuffers::ET_CHAR, 1, -1 },
{ flatbuffers::ET_STRING, 0, -1 },
{ flatbuffers::ET_STRING, 0, -1 }
};
static const char * const names[] = {
"localSize",
"key",
"metal",
"vulkan",
"openglComputeShader",
"openclKernel"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 6, type_codes, nullptr, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *GpuStageTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_STRING, 0, -1 },
{ flatbuffers::ET_INT, 1, -1 },
{ flatbuffers::ET_INT, 1, -1 },
{ flatbuffers::ET_INT, 1, -1 },
{ flatbuffers::ET_SEQUENCE, 1, 0 },
{ flatbuffers::ET_SEQUENCE, 1, 0 },
{ flatbuffers::ET_INT, 0, -1 },
{ flatbuffers::ET_INT, 1, -1 },
{ flatbuffers::ET_BOOL, 0, -1 }
};
static const flatbuffers::TypeFunction type_refs[] = {
GpuBufferTypeTable
};
static const char * const names[] = {
"pipeline",
"groupSize",
"inputIndexes",
"outputIndexes",
"middleBuffer",
"constBuffer",
"globalSizeIndex",
"globalSizeDivide",
"requireSize"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 9, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *GpuFunctionTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 1, 0 },
{ flatbuffers::ET_STRING, 0, -1 }
};
static const flatbuffers::TypeFunction type_refs[] = {
GpuStageTypeTable
};
static const char * const names[] = {
"stags",
"name"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names
};
return &tt;
}
inline const flatbuffers::TypeTable *GpuLibraryTypeTable() {
static const flatbuffers::TypeCode type_codes[] = {
{ flatbuffers::ET_SEQUENCE, 1, 0 },
{ flatbuffers::ET_SEQUENCE, 1, 1 },
{ flatbuffers::ET_STRING, 0, -1 }
};
static const flatbuffers::TypeFunction type_refs[] = {
GpuFunctionTypeTable,
GpuPipelineTypeTable
};
static const char * const names[] = {
"functions",
"pipeline",
"name"
};
static const flatbuffers::TypeTable tt = {
flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names
};
return &tt;
}
} // namespace MNN
#endif // FLATBUFFERS_GENERATED_GPULIBRARY_MNN_H_