diff --git a/.gitignore b/.gitignore index 371bb619..7d1a55ed 100644 --- a/.gitignore +++ b/.gitignore @@ -63,29 +63,6 @@ obj/ *.iws /out/ -# User-specific configurations -.idea/caches/ -.idea/libraries/ -.idea/shelf/ -.idea/workspace.xml -.idea/tasks.xml -.idea/.name -.idea/compiler.xml -.idea/copyright/profiles_settings.xml -.idea/encodings.xml -.idea/misc.xml -.idea/modules.xml -.idea/scopes/scope_settings.xml -.idea/dictionaries -.idea/vcs.xml -.idea/jsLibraryMappings.xml -.idea/datasources.xml -.idea/dataSources.ids -.idea/sqlDataSources.xml -.idea/dynamic.xml -.idea/uiDesigner.xml -.idea/assetWizardSettings.xml - # OS-specific files .DS_Store .DS_Store? @@ -113,14 +90,9 @@ hs_err_pid* ## Plugin-specific files: -# mpeltonen/sbt-idea plugin -.idea_modules/ - # JIRA plugin atlassian-ide-plugin.xml -# Mongo Explorer plugin -.idea/mongoSettings.xml # Crashlytics plugin (for Android Studio and IntelliJ) com_crashlytics_export_strings.xml @@ -310,20 +282,7 @@ build.mac/ ### Projects *.podspec.json -demo/android/.idea -demo/android/.idea/gradle.xml -demo/android/.idea/misc.xml -demo/android/.idea/runConfigurations.xml -demo/android/.idea/vcs.xml -demo/android/.idea/caches/build_file_checksums.ser demo/android/app/libs/ -project/android/.idea/.name -project/android/.idea/gradle.xml -project/android/.idea/misc.xml -project/android/.idea/modules.xml -project/android/.idea/runConfigurations.xml -project/android/.idea/vcs.xml -project/android/.idea/caches/build_file_checksums.ser ### Temps 3rd_party/flatbuffers/tmp @@ -333,30 +292,15 @@ schema/private tools/converter/source/IR benchmark/benchmark.txt -### Python MNN -pymnn/android/build/ -pymnn/android/local.properties -pymnn/android/.idea -pymnn/android/.idea/.name -pymnn/android/.idea/gradle.xml -pymnn/android/.idea/misc.xml -pymnn/android/.idea/modules.xml -pymnn/android/.idea/runConfigurations.xml -pymnn/android/.idea/vcs.xml -pymnn/android/.idea/caches/build_file_checksums.ser - buildios build*/ -include/MNN/VCS.h source/backend/opencl/execution/cl/codegen/opencl_program.cc source/backend/opencl/execution/cl/opencl_program.cc # FIXME(haijing): MTL issues..... # source/backend/metal/MetalOPRegister.mm source/backend/opengl/AllShader.cpp -include/MNN/backend/opengl/shaders/AllShader.h +source/backend/opengl/AllShader.hpp source/backend/vulkan/compiler/AllShader.cpp -include/MNN/backend/vulkan/shaders/AllShader.h -.idea project/ios/iOS_64 project/ios/iOS_32 project/ios/SIM_32 @@ -366,5 +310,9 @@ project/ios/MNN_iOS64 project/ios/MNN_iOS32 project/ios/MNN_SIM_32 project/ios/MNN_SIM_64 +.idea/ +include/MNN/VCS.h +schema/current/ pymnn_build/ +macosbuild diff --git a/CMakeLists.txt b/CMakeLists.txt index c34bcd24..d8f0cfe3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -10,7 +10,7 @@ if(NOT DEFINED MNN_VERSION_PATCH) set(MNN_VERSION_PATCH 1) endif() if(NOT DEFINED MNN_VERSION_BUILD) - set(MNN_VERSION_BUILD 7) + set(MNN_VERSION_BUILD 8) endif() if(NOT DEFINED MNN_VERSION_SUFFIX) set(MNN_VERSION_SUFFIX git) @@ -82,6 +82,14 @@ IF(APPLE AND MNN_AAPL_FMWK AND MNN_SEP_BUILD) message(WARNING "MNN_SEP_BUILD AND MNN_AAPL_FMWK can't coexist. Turning off MNN_SEP_BUILD") SET(MNN_SEP_BUILD OFF) ENDIF() +IF(MSVC OR WIN32) + message(WARNING "MNN_SEP_BUILD IS TROUBLESOME ON Windows. Forcing OFF...") + SET(MNN_SEP_BUILD OFF) +ENDIF() + + + + include(${CMAKE_CURRENT_LIST_DIR}/cmake/macros.cmake) @@ -264,7 +272,9 @@ FOREACH(SCHEMA_SRC ${MNN_SCHEMA_SRC}) ENDIF() LIST(APPEND SCHEMA_TARGETS "${CMAKE_CURRENT_LIST_DIR}/schema/current/${SCHEMA_NAME}_generated.h") ENDFOREACH() -add_custom_target(MNN_SCHEMA_GEN DEPENDS ${SCHEMA_TARGETS}) + +# GenVCSHDR is not actually required. But this allows sub-targets using VCS.h without extra work in their CMake dependency declaration +add_custom_target(MNN_SCHEMA_GEN DEPENDS ${SCHEMA_TARGETS} GenVCSHDR) set(MNN_OBJECTS_TO_LINK "") set(MNN_TARGETS "") @@ -386,6 +396,8 @@ IF(MNN_BUILD_SHARED_LIBS) target_compile_definitions(${TARGET} PRIVATE "-DBUILDING_MNN_DLL") target_compile_definitions(${TARGET} INTERFACE "-DUSING_MNN_DLL") endforeach() + target_compile_definitions(MNN PRIVATE "-DBUILDING_MNN_DLL") + target_compile_definitions(MNN INTERFACE "-DUSING_MNN_DLL") endif() ELSE() add_library(MNN STATIC ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp ${MNN_OBJECTS_TO_LINK} ${MNN_PUB_HDRS} ${MNN_EXPR_PUB_HDRS}) @@ -395,14 +407,19 @@ set_target_properties(MNN PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURR if(APPLE) IF(MNN_AAPL_FMWK) - set_target_properties(MNN PROPERTIES FRAMEWORK TRUE) - set_target_properties(MNN PROPERTIES + SET_TARGET_PROPERTIES(MNN PROPERTIES FRAMEWORK TRUE) + SET_TARGET_PROPERTIES(MNN PROPERTIES MACOSX_FRAMEWORK_IDENTIFIER com.alibaba.MNN MACOSX_FRAMEWORK_SHORT_VERSION_STRING ${PACKAGE_VERSION} MACOSX_FRAMEWORK_BUNDLE_VERSION ${PACKAGE_VERSION} XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "iPhone Developer" ) - set_target_properties(MNN PROPERTIES MACOSX_FRAMEWORK_INFO_PLIST ${CMAKE_CURRENT_SOURCE_DIR}/project/ios/MNN/Info.plist) + SET_TARGET_PROPERTIES(MNN PROPERTIES MACOSX_FRAMEWORK_INFO_PLIST ${CMAKE_CURRENT_SOURCE_DIR}/project/ios/MNN/Info.plist) + IF(DEFINED MNN_METALLIB_PATH) + message(STATUS "Metal Library Path:${MNN_METALLIB_PATH}") + SET_TARGET_PROPERTIES(MNN PROPERTIES RESOURCE "${MNN_METALLIB_PATH}") + SET_SOURCE_FILES_PROPERTIES("${MNN_METALLIB_PATH}" PROPERTIES MACOSX_PACKAGE_LOCATION Resources/) + ENDIF() ENDIF() find_library(FOUNDATION Foundation REQUIRED) target_link_libraries(MNN PUBLIC ${FOUNDATION}) @@ -530,8 +547,8 @@ list(APPEND MNN_TARGETS MNN) target_compile_options(${TARGET} PRIVATE -fvisibility-inlines-hidden -fvisibility=hidden) endif() else() - add_compile_definitions("_CRT_SECURE_NO_WARNINGS") - add_compile_options("/wd4267" "/wd4018" "/wd4251" "/wd4996" "/wd4244" "/wd4146" "/wd4129" "/wd4305") + target_compile_definitions(${TARGET} PRIVATE "_CRT_SECURE_NO_WARNINGS") + target_compile_options(${TARGET} PRIVATE "/wd4267" "/wd4018" "/wd4251" "/wd4996" "/wd4244" "/wd4146" "/wd4129" "/wd4305") endif() ENDFOREACH() list(REMOVE_ITEM MNN_TARGETS MNN) @@ -597,7 +614,4 @@ ELSE() FOREACH(HDR ${MNN_PUB_HDRS}) SET_SOURCE_FILES_PROPERTIES(${HDR} PROPERTIES MACOSX_PACKAGE_LOCATION Headers/ ) ENDFOREACH() - IF(MNN_METAL) - SET_SOURCE_FILES_PROPERTIES(${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib PROPERTIES MACOSX_PACKAGE_LOCATION Resources/) - ENDIF() ENDIF() diff --git a/MNN.podspec b/MNN.podspec index 74882084..98ff0fd5 100644 --- a/MNN.podspec +++ b/MNN.podspec @@ -31,44 +31,8 @@ Pod::Spec.new do |s| s.platform = :ios s.ios.deployment_target = '8.0' s.requires_arc = true - - s.prepare_command = <<-CMD - schema/generate.sh - python source/backend/metal/MetalCodeGen.py source/backend/metal/ source/backend/metal/MetalOPRegister.mm - CMD - s.source = {:git => "https://github.com/alibaba/MNN.git",:branch=> 'master'} s.frameworks = 'Metal', 'Accelerate' s.library = 'c++' - s.subspec 'core' do |a| - a.source_files = \ - 'include/MNN/*.{h,hpp}',\ - 'include/MNN/expr/*.{h,hpp}',\ - 'schema/current/*.{h}',\ - '3rd_party/flatbuffers/include/flatbuffers/*.{h}',\ - 'source/core/**/*.{h,c,m,mm,cc,hpp,cpp}',\ - 'source/cv/**/*.{h,c,m,mm,cc,hpp,cpp}',\ - 'source/math/**/*.{h,c,m,mm,cc,hpp,cpp,metal}',\ - 'source/shape/*.{h,c,m,mm,cc,hpp,cpp}',\ - 'source/backend/cpu/*.{h,c,m,mm,cc,S,hpp,cpp}',\ - 'source/backend/cpu/compute/*.{h,c,m,mm,cc,S,hpp,cpp}',\ - 'source/backend/metal/*.{h,c,m,mm,cc,hpp,cpp,metal}',\ - 'express/**/*.{hpp,cpp}' - end - s.subspec 'armv7' do |a| - a.source_files = 'source/backend/cpu/arm/arm32/*.{h,c,m,mm,cc,S,hpp,cpp}' - a.pod_target_xcconfig = {'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/source/backend/cpu/arm/"'} - end - s.subspec 'aarch64' do |a| - a.source_files = 'source/backend/cpu/arm/arm64/*.{h,c,m,mm,cc,S,hpp,cpp}' - a.pod_target_xcconfig = {'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/source/backend/cpu/arm/"'} - end - s.subspec 'metal' do |a| - a.source_files = 'source/backend/metal/**/*.{h,c,m,mm,cc,hpp,cpp,metal}' - end - - s.default_subspecs = 'core', 'armv7', 'aarch64', 'metal' - s.header_mappings_dir = 'include/' - - s.pod_target_xcconfig = {'METAL_LIBRARY_FILE_BASE' => 'mnn', 'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/include" "$(PODS_TARGET_SRCROOT)/source/" "$(PODS_TARGET_SRCROOT)/3rd_party/flatbuffers/include" "$(PODS_TARGET_SRCROOT)/source" "$(PODS_TARGET_SRCROOT)/3rd_party/half"', 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) MNN_CODEGEN_REGISTER=1 MNN_SUPPORT_TFLITE_QUAN=1'} - s.user_target_xcconfig = { 'OTHER_LDFLAGS' => '-force_load $(BUILD_DIR)/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)/MNN/libMNN.a', 'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/include"' } + s.source = {:http=>"https://github.com/alibaba/MNN/releases/download/#{s.version}/MNN-iOS-#{s.version}.zip"} + s.vendored_frameworks = "MNN.framework" end diff --git a/README.md b/README.md index 6ccfdda3..4bb34941 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [中文版本](README_CN.md) -[Build Status](BUILDSTATUS.md) +[![Build Status](https://travis-ci.com/alibaba/MNN.svg?branch=master)](https://travis-ci.com/alibaba/MNN) ## Intro MNN is a lightweight deep neural network inference engine. It loads models and do inference on devices. At present, MNN has been integrated in more than 20 apps of Alibaba-inc, such as Taobao, Tmall, Youku and etc., covering live broadcast, short video capture, search recommendation, product searching by image, interactive marketing, equity distribution, security risk control and other scenarios. In addition, MNN is also used on embedded devices, such as IoT. diff --git a/ciscripts/Android/32.sh b/ciscripts/Android/32.sh index 81bd1482..4a8f8825 100755 --- a/ciscripts/Android/32.sh +++ b/ciscripts/Android/32.sh @@ -1,6 +1,8 @@ set -e schema/generate.sh cd project/android +rm -rf build_32 mkdir build_32 cd build_32 -../build_32.sh -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON +cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DANDROID_ABI="armeabi-v7a" -DANDROID_STL=c++_static -DCMAKE_BUILD_TYPE=Release -DANDROID_NATIVE_API_LEVEL=android-21 -DANDROID_TOOLCHAIN=clang -DMNN_BUILD_FOR_ANDROID_COMMAND=true -DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON -DMNN_OPENGL=ON -DMNN_OPENCL=ON ../../../ +make -j8 diff --git a/ciscripts/Android/32OMP.sh b/ciscripts/Android/32OMP.sh index 6aa6d699..4259f1a0 100755 --- a/ciscripts/Android/32OMP.sh +++ b/ciscripts/Android/32OMP.sh @@ -1,6 +1,8 @@ set -e schema/generate.sh cd project/android +rm -rf build_32 mkdir build_32 cd build_32 -../build_32.sh -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF +cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DANDROID_ABI="armeabi-v7a" -DANDROID_STL=c++_static -DCMAKE_BUILD_TYPE=Release -DANDROID_NATIVE_API_LEVEL=android-21 -DANDROID_TOOLCHAIN=clang -DMNN_BUILD_FOR_ANDROID_COMMAND=true -DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF -DMNN_OPENGL=ON -DMNN_OPENCL=ON ../../../ +make -j8 diff --git a/ciscripts/Android/64.sh b/ciscripts/Android/64.sh index 23f4d02f..d0399fed 100755 --- a/ciscripts/Android/64.sh +++ b/ciscripts/Android/64.sh @@ -3,4 +3,5 @@ schema/generate.sh cd project/android mkdir build_64 cd build_64 -../build_64.sh -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON +cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DANDROID_ABI="arm64-v8a" -DANDROID_STL=c++_static -DCMAKE_BUILD_TYPE=Release -DANDROID_NATIVE_API_LEVEL=android-21 -DANDROID_TOOLCHAIN=clang -DMNN_BUILD_FOR_ANDROID_COMMAND=true -DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON -DMNN_OPENGL=ON -DMNN_OPENCL=ON ../../../ +make -j8 diff --git a/ciscripts/Android/64OMP.sh b/ciscripts/Android/64OMP.sh index 3259dd69..228e0cd5 100755 --- a/ciscripts/Android/64OMP.sh +++ b/ciscripts/Android/64OMP.sh @@ -1,6 +1,8 @@ set -e schema/generate.sh cd project/android +rm -rf build_64 mkdir build_64 cd build_64 -../build_64.sh -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF +cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DANDROID_ABI="arm64-v8a" -DANDROID_STL=c++_static -DCMAKE_BUILD_TYPE=Release -DANDROID_NATIVE_API_LEVEL=android-21 -DANDROID_TOOLCHAIN=clang -DMNN_BUILD_FOR_ANDROID_COMMAND=true -DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF -DMNN_OPENGL=ON -DMNN_OPENCL=ON ../../../ +make -j8 diff --git a/demo/android/app/gradle/wrapper/gradle-wrapper.jar b/demo/android/app/gradle/wrapper/gradle-wrapper.jar deleted file mode 100644 index f6b961fd..00000000 Binary files a/demo/android/app/gradle/wrapper/gradle-wrapper.jar and /dev/null differ diff --git a/demo/android/app/gradle/wrapper/gradle-wrapper.properties b/demo/android/app/gradle/wrapper/gradle-wrapper.properties deleted file mode 100644 index e75aaa44..00000000 --- a/demo/android/app/gradle/wrapper/gradle-wrapper.properties +++ /dev/null @@ -1,6 +0,0 @@ -#Mon Jan 06 13:18:29 CST 2020 -distributionBase=GRADLE_USER_HOME -distributionPath=wrapper/dists -zipStoreBase=GRADLE_USER_HOME -zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-5.4.1-all.zip diff --git a/demo/android/app/gradlew b/demo/android/app/gradlew deleted file mode 100644 index cccdd3d5..00000000 --- a/demo/android/app/gradlew +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/env sh - -############################################################################## -## -## Gradle start up script for UN*X -## -############################################################################## - -# Attempt to set APP_HOME -# Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi -done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null - -APP_NAME="Gradle" -APP_BASE_NAME=`basename "$0"` - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS="" - -# Use the maximum available, or set MAX_FD != -1 to use that value. -MAX_FD="maximum" - -warn () { - echo "$*" -} - -die () { - echo - echo "$*" - echo - exit 1 -} - -# OS specific support (must be 'true' or 'false'). -cygwin=false -msys=false -darwin=false -nonstop=false -case "`uname`" in - CYGWIN* ) - cygwin=true - ;; - Darwin* ) - darwin=true - ;; - MINGW* ) - msys=true - ;; - NONSTOP* ) - nonstop=true - ;; -esac - -CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar - -# Determine the Java command to use to start the JVM. -if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - if [ ! -x "$JAVACMD" ] ; then - die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." - fi -else - JAVACMD="java" - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. - -Please set the JAVA_HOME variable in your environment to match the -location of your Java installation." -fi - -# Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then - MAX_FD_LIMIT=`ulimit -H -n` - if [ $? -eq 0 ] ; then - if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then - MAX_FD="$MAX_FD_LIMIT" - fi - ulimit -n $MAX_FD - if [ $? -ne 0 ] ; then - warn "Could not set maximum file descriptor limit: $MAX_FD" - fi - else - warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" - fi -fi - -# For Darwin, add options to specify how the application appears in the dock -if $darwin; then - GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" -fi - -# For Cygwin, switch paths to Windows format before running java -if $cygwin ; then - APP_HOME=`cygpath --path --mixed "$APP_HOME"` - CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` - JAVACMD=`cygpath --unix "$JAVACMD"` - - # We build the pattern for arguments to be converted via cygpath - ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` - SEP="" - for dir in $ROOTDIRSRAW ; do - ROOTDIRS="$ROOTDIRS$SEP$dir" - SEP="|" - done - OURCYGPATTERN="(^($ROOTDIRS))" - # Add a user-defined pattern to the cygpath arguments - if [ "$GRADLE_CYGPATTERN" != "" ] ; then - OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" - fi - # Now convert the arguments - kludge to limit ourselves to /bin/sh - i=0 - for arg in "$@" ; do - CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` - CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option - - if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition - eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` - else - eval `echo args$i`="\"$arg\"" - fi - i=$((i+1)) - done - case $i in - (0) set -- ;; - (1) set -- "$args0" ;; - (2) set -- "$args0" "$args1" ;; - (3) set -- "$args0" "$args1" "$args2" ;; - (4) set -- "$args0" "$args1" "$args2" "$args3" ;; - (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; - (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; - (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; - (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; - (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; - esac -fi - -# Escape application args -save () { - for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done - echo " " -} -APP_ARGS=$(save "$@") - -# Collect all arguments for the java command, following the shell quoting and substitution rules -eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" - -# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong -if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then - cd "$(dirname "$0")" -fi - -exec "$JAVACMD" "$@" diff --git a/demo/android/app/gradlew.bat b/demo/android/app/gradlew.bat deleted file mode 100644 index e95643d6..00000000 --- a/demo/android/app/gradlew.bat +++ /dev/null @@ -1,84 +0,0 @@ -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto init - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega diff --git a/demo/android/app/src/main/java/com/taobao/android/mnndemo/VideoActivity.java b/demo/android/app/src/main/java/com/taobao/android/mnndemo/VideoActivity.java index 64ef4548..2dfe898e 100644 --- a/demo/android/app/src/main/java/com/taobao/android/mnndemo/VideoActivity.java +++ b/demo/android/app/src/main/java/com/taobao/android/mnndemo/VideoActivity.java @@ -121,7 +121,6 @@ public class VideoActivity extends AppCompatActivity implements AdapterView.OnIt Common.copyAssetResource2File(getBaseContext(), MobileModelFileName, mMobileModelPath); mMobileTaiWords = TxtFileReader.getUniqueUrls(getBaseContext(), MobileWordsFileName, Integer.MAX_VALUE); } catch (Throwable e) { - Log.v(null,mMobileModelPath); throw new RuntimeException(e); } diff --git a/demo/exec/expressDemo.cpp b/demo/exec/expressDemo.cpp index 8ab5153a..3c4e1fae 100644 --- a/demo/exec/expressDemo.cpp +++ b/demo/exec/expressDemo.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include #include @@ -130,26 +130,22 @@ int main(int argc, const char* argv[]) { } auto modelFileName = argv[1]; FUNC_PRINT_ALL(modelFileName, s); - auto device = Optimizer::CPU; + auto exe = Executor::getGlobalExecutor(); + MNN::BackendConfig config; + config.precision = MNN::BackendConfig::Precision_Low; + MNNForwardType forwardType = MNN_FORWARD_CPU; if (argc >= 3) { - device = (Optimizer::Device)atoi(argv[2]); + forwardType = (MNNForwardType)atoi(argv[2]); } + exe->setGlobalExecutorConfig(forwardType, config, 4); auto model = Variable::loadMap(modelFileName); auto inputOutput = Variable::getInputAndOutput(model); - Optimizer::Config config; - config.device = device; - auto optimizer = Optimizer::create(config); auto inputs = inputOutput.first; auto outputs = inputOutput.second; - if (nullptr == optimizer) { - MNN_ERROR("Can't find optimizer for %d\n", device); - return 0; - } int testTime = 10; if (argc >= 4) { testTime = atoi(argv[3]); } - optimizer->onExecute(Variable::mapToSequence(outputs)); Variable::save(Variable::mapToSequence(outputs), "temp.mnn"); auto input = inputs.begin()->second; auto output = outputs.begin()->second; @@ -172,6 +168,7 @@ int main(int argc, const char* argv[]) { return 0; } auto size = outputInfo->size; + exe->gc(Executor::FULL); //Test Speed if (testTime > 0){ //Let the frequence up diff --git a/demo/exec/segment.cpp b/demo/exec/segment.cpp index 0b1015b5..3a607cc6 100644 --- a/demo/exec/segment.cpp +++ b/demo/exec/segment.cpp @@ -17,7 +17,6 @@ #include #include #include -#include #include #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" @@ -38,11 +37,6 @@ int main(int argc, const char* argv[]) { MNN_ERROR("Invalid Model\n"); return 0; } - Optimizer::Config config; - config.device = Optimizer::CPU; - auto optimizer = Optimizer::create(config); - optimizer->onExecute(Variable::mapToSequence(net.second)); - auto input = net.first.begin()->second; auto info = input->getInfo(); if (nullptr == info) { diff --git a/demo/ios/Podfile b/demo/ios/Podfile index aa6d15d9..35add270 100644 --- a/demo/ios/Podfile +++ b/demo/ios/Podfile @@ -4,5 +4,5 @@ platform :ios target 'playground' do platform :ios, '8.0' - pod 'MNN', :path => "../../" + pod 'MNN' end diff --git a/demo/ios/Podfile.lock b/demo/ios/Podfile.lock index 684568c7..59610e1e 100644 --- a/demo/ios/Podfile.lock +++ b/demo/ios/Podfile.lock @@ -1,13 +1,5 @@ PODS: - - MNN (0.2.1.7): - - MNN/aarch64 (= 0.2.1.7) - - MNN/armv7 (= 0.2.1.7) - - MNN/core (= 0.2.1.7) - - MNN/metal (= 0.2.1.7) - - MNN/aarch64 (0.2.1.7) - - MNN/armv7 (0.2.1.7) - - MNN/core (0.2.1.7) - - MNN/metal (0.2.1.7) + - MNN (1.0.0) DEPENDENCIES: - MNN (from `../../`) @@ -17,8 +9,8 @@ EXTERNAL SOURCES: :path: "../../" SPEC CHECKSUMS: - MNN: 35ce69746fdb1f2b9a810c91d7494bfc9b5d5f87 + MNN: 31075cbcadf73e96c1bf29cccc97e4ef131e0650 PODFILE CHECKSUM: b0491e2fa8f04fdaec2683a1c6c9de3a1d483842 -COCOAPODS: 1.8.4 +COCOAPODS: 1.5.3 diff --git a/demo/ios/playground.xcodeproj/project.pbxproj b/demo/ios/playground.xcodeproj/project.pbxproj index 19aa1483..4d643bcb 100644 --- a/demo/ios/playground.xcodeproj/project.pbxproj +++ b/demo/ios/playground.xcodeproj/project.pbxproj @@ -422,14 +422,14 @@ buildSettings = { ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; CODE_SIGN_STYLE = Automatic; - DEVELOPMENT_TEAM = 3P5LJKKF4Q; + DEVELOPMENT_TEAM = ""; INFOPLIST_FILE = playground/Info.plist; IPHONEOS_DEPLOYMENT_TARGET = 9.0; LD_RUNPATH_SEARCH_PATHS = ( "$(inherited)", "@executable_path/Frameworks", ); - PRODUCT_BUNDLE_IDENTIFIER = com.taobao.mnndemo.xxx; + PRODUCT_BUNDLE_IDENTIFIER = com.taobao.mnndemo; PRODUCT_NAME = "$(TARGET_NAME)"; TARGETED_DEVICE_FAMILY = "1,2"; }; @@ -441,7 +441,7 @@ buildSettings = { ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; CODE_SIGN_STYLE = Automatic; - DEVELOPMENT_TEAM = 3P5LJKKF4Q; + DEVELOPMENT_TEAM = ""; INFOPLIST_FILE = playground/Info.plist; IPHONEOS_DEPLOYMENT_TARGET = 9.0; LD_RUNPATH_SEARCH_PATHS = ( @@ -449,7 +449,7 @@ "@executable_path/Frameworks", ); ONLY_ACTIVE_ARCH = YES; - PRODUCT_BUNDLE_IDENTIFIER = com.taobao.mnndemo.xxx; + PRODUCT_BUNDLE_IDENTIFIER = com.taobao.mnndemo; PRODUCT_NAME = "$(TARGET_NAME)"; TARGETED_DEVICE_FAMILY = "1,2"; }; diff --git a/demo/ios/playground.xcodeproj/xcshareddata/xcschemes/playground.xcscheme b/demo/ios/playground.xcodeproj/xcshareddata/xcschemes/playground.xcscheme index d66f7190..975f02a4 100644 --- a/demo/ios/playground.xcodeproj/xcshareddata/xcschemes/playground.xcscheme +++ b/demo/ios/playground.xcodeproj/xcshareddata/xcschemes/playground.xcscheme @@ -27,6 +27,8 @@ selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB" selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB" shouldUseLaunchSchemeArgsEnv = "YES"> + + - - + + + + -#include "BasicOptimizer_generated.h" +#include "core/TensorUtils.hpp" +#include namespace MNN { namespace Express { +class Executor::Profiler { +public: + void reset(); + void dump() const; + void add(int opType, float timeInMs); +private: + std::map mTimes; +}; +void Executor::Profiler::reset() { + mTimes.clear(); +} +void Executor::Profiler::dump() const { + for (auto iter : mTimes) { + MNN_PRINT("%s: %f ms\n", EnumNameOpType((OpType)iter.first), iter.second); + } +} +void Executor::Profiler::add(int opType, float timeInMs) { + auto iter = mTimes.find(opType); + if (iter == mTimes.end()) { + mTimes[opType] = timeInMs; + return; + } + iter->second += timeInMs; +} + void Executor::setGlobalExecutorConfig(MNNForwardType type, const BackendConfig& config, int numberThread) { std::lock_guard _l(mMutex); auto creator = MNNGetExtraBackendCreator(type); @@ -22,334 +48,49 @@ void Executor::setGlobalExecutorConfig(MNNForwardType type, const BackendConfig& MNN_ERROR("Error to find creator of %d\n", type); return; } - mSolutions.clear(); + _resetCache(); Backend::Info info; info.type = type; info.numThread = numberThread; std::shared_ptr bn(creator->onCreate(info)); mBackend = bn; } -void Executor::gc(GCFlag flag) { - std::lock_guard _l(mMutex); - mSolutions.clear(); - mBackend->onClearBuffer(); +void Executor::_resetCache() { } -std::shared_ptr Executor::getGlobalExecutor() { - static std::once_flag of; - static std::shared_ptr gExecutor; - std::call_once(of, [&]() { - auto creator = MNNGetExtraBackendCreator(MNN_FORWARD_CPU); - SizeComputerSuite::init(); +void Executor::gc(GCFlag flag) { + std::lock_guard _l(mMutex); + _resetCache(); + if (FULL == flag) { + mBackend->onClearBuffer(); + mBackupBackend->onClearBuffer(); + } +} +Executor::Executor(std::shared_ptr backend) { + mBackend = backend; + if (mBackend->type() == MNN_FORWARD_CPU) { + mBackupBackend = mBackend; + } else { Backend::Info info; info.type = MNN_FORWARD_CPU; info.numThread = 1; - std::shared_ptr bn(creator->onCreate(info)); - gExecutor.reset(new Executor(bn)); - }); - return gExecutor; + auto creator = MNNGetExtraBackendCreator(MNN_FORWARD_CPU); + mBackupBackend.reset(creator->onCreate(info)); + } + _resetCache(); +#ifdef MNN_EXPR_ENABLE_PROFILER + mProfiler.reset(new Profiler); +#endif +} +Executor::~Executor(){ + mBackend = nullptr; + mBackupBackend = nullptr; +} +void Executor::_addToCache(const std::vector>& caches) { + //FUNC_PRINT(mCaches.size()); } -class Solution { -public: - Solution(){} - virtual ~ Solution(){} - virtual ErrorCode computeInfo(Expr* expr) = 0; - virtual ErrorCode compute(Expr* expr) = 0; -}; -class UnitSolution : public Solution { -public: - UnitSolution(Expr* expr, std::shared_ptr bn) { - mOutputs.resize(expr->outputSize()); - mContent.resize(expr->outputSize()); - for (int i=0; ibuffer().host = nullptr; - } - mInputs.resize(expr->inputs().size()); - mInputContent.resize(expr->inputs().size()); - for (int i=0; ibuffer().host = nullptr; - } - mBackend = bn; - mExpr = expr; - } - ~ UnitSolution() { - for (auto t : mOutputs) { - if (nullptr != t->host()) { - mBackend->onReleaseBuffer(t, Backend::DYNAMIC); - } - } - mExpr->setInfoDirty(); - } - virtual ErrorCode computeInfo(Expr* expr) override { - auto op = expr->get(); - for (int i = 0; i < expr->inputs().size(); ++i) { - auto inputExpr = expr->inputs()[i]->expr(); - Utils::copyInfoToTensor(mInputContent[i].get(), inputExpr.first->outputInfo(inputExpr.second)); - } - bool res = SizeComputer::computeOutputSize(op, mInputs, mOutputs); - if (!res) { - // Compute Error - #ifdef MNN_EXPRESS_ERROR_REPORT - FUNC_PRINT(op->type()); - #endif - return COMPUTE_SIZE_ERROR; - } - for (int i = 0; i < mOutputs.size(); ++i) { - auto tensor = mOutputs[i]; - for (int j = 0; j < tensor->dimensions(); ++j) { - if (tensor->length(j) <= 0) { - #ifdef MNN_EXPRESS_ERROR_REPORT - if (nullptr != op->name()) { - auto name = op->name()->str(); - MNN_ERROR("Error to compute shape for %s\n", op->name()->c_str()); - } - #endif - return COMPUTE_SIZE_ERROR; - } - } - auto shape = expr->outputInfo(i); - Utils::copyTensorToInfo(shape, tensor); - } - mNeedResize = true; - return NO_ERROR; - } - ErrorCode prepare(Expr* expr) { - for (int i = 0; i < expr->inputs().size(); ++i) { - auto inputExpr = expr->inputs()[i]->expr(); - mInputContent[i]->buffer().host = (uint8_t*)inputExpr.first->outputInfo(inputExpr.second)->ptr; - } - if (nullptr == mExecution) { - mExecution.reset(mBackend->onCreate(mInputs, mOutputs, expr->get())); - } - for (auto& output : mOutputs) { - if (output->host() != nullptr) { - mBackend->onReleaseBuffer(output, Backend::DYNAMIC); - output->buffer().host = nullptr; - } - TensorUtils::setLinearLayout(output); - auto res = mBackend->onAcquireBuffer(output, Backend::DYNAMIC); - if (!res) { - return OUT_OF_MEMORY; - } - } - for (int i = 0; i < mOutputs.size(); ++i) { - expr->outputInfo(i)->ptr = mOutputs[i]->host(); - } - return mExecution->onResize(mInputs, mOutputs); - } - virtual ErrorCode compute(Expr* expr) override { - if (mNeedResize) { - auto code = prepare(expr); - if (NO_ERROR != code) { - return code; - } - mNeedResize = false; - } - mBackend->onExecuteBegin(); - auto code = mExecution->onExecute(mInputs, mOutputs); - mBackend->onExecuteEnd(); - return code; - } -private: - std::shared_ptr mExecution; - std::vector mInputs; - std::vector mOutputs; - std::vector> mContent; - std::vector> mInputContent; - std::shared_ptr mBackend; - bool mNeedResize = false; - Expr* mExpr; -}; -static Tensor::DimensionType getDimType(const Tensor* origin) { - auto dimformat = TensorUtils::getDescribe(origin)->dimensionFormat; - switch (dimformat) { - case MNN_DATA_FORMAT_NHWC: - return Tensor::TENSORFLOW; - case MNN_DATA_FORMAT_NCHW: - return Tensor::CAFFE; - case MNN_DATA_FORMAT_NC4HW4: - return Tensor::CAFFE_C4; - default: - break; - } - return Tensor::CAFFE; -} -class MergeExpr : public Solution{ -public: - MergeExpr(const Optimizer::Merge* merge, int inputSize, int outputSize) { - MNN_ASSERT(nullptr != merge); - MNN_ASSERT(nullptr != merge->backend()); - MNN_ASSERT(nullptr != merge->oplists()); - MNN_ASSERT(nullptr != merge->outputIndexes()); - - //Create tensors - Schedule::ScheduleInfo schedule; - std::vector pipelineInfos; - schedule.allTensors.resize(merge->tensorNumber()); - for (int i=0; itensorNumber(); ++i) { - schedule.allTensors[i].second.reset(new Tensor); - } - pipelineInfos.resize(merge->oplists()->size()); - for (int i = 0; i < merge->oplists()->size(); ++i) { - auto& pipelineInfo = pipelineInfos[i]; - auto op = merge->oplists()->GetAs(i); - if (nullptr != op->inputIndexes()) { - auto data = op->inputIndexes()->data(); - pipelineInfo.inputs.resize(op->inputIndexes()->size()); - for (int j = 0; j < op->inputIndexes()->size(); ++j) { - auto index = data[j]; - schedule.allTensors[index].first += 1; - pipelineInfo.inputs[j] = schedule.allTensors[index].second.get(); - } - } - if (nullptr != op->outputIndexes()) { - auto data = op->outputIndexes()->data(); - pipelineInfo.outputs.resize(op->outputIndexes()->size()); - for (int j = 0; j < op->outputIndexes()->size(); ++j) { - auto index = data[j]; - pipelineInfo.outputs[j] = schedule.allTensors[index].second.get(); - } - } - pipelineInfo.op = op; - } - mOutputs.resize(merge->outputIndexes()->size()); - for (int i=0; ioutputIndexes()->size(); ++i) { - schedule.allTensors[merge->outputIndexes()->data()[i]].first += 1; - mOutputs[i].first = schedule.allTensors[merge->outputIndexes()->data()[i]].second.get(); - } - if (nullptr != merge->inputIndexes()) { - mInputs.resize(merge->inputIndexes()->size()); - for (int i=0; iinputIndexes()->size(); ++i) { - mInputs[i].first = schedule.allTensors[merge->inputIndexes()->data()[i]].second.get(); - mInputs[i].second.reset(new Tensor); - } - } - //Create Backend - auto backendInfo = merge->backend(); - auto creator = MNNGetExtraBackendCreator((MNNForwardType)backendInfo->type()); - if (nullptr == creator) { - mValid = false; - MNN_ERROR("Get Backend Creator Error\n"); - return; - } - Backend::Info info; - info.type = (MNNForwardType)backendInfo->type(); - info.numThread = backendInfo->numberThread(); - info.mode = Backend::Info::INDIRECT; - BackendConfig backendConfig; - backendConfig.memory = (BackendConfig::MemoryMode)backendInfo->memroy(); - backendConfig.power = (BackendConfig::PowerMode)backendInfo->power(); - backendConfig.precision = (BackendConfig::PrecisionMode)backendInfo->precision(); - info.user = &backendConfig; - creator->onValid(info); - mDirect = info.mode == Backend::Info::DIRECT; - schedule.pipelineInfo.emplace_back(std::make_pair(info, pipelineInfos)); - mSession.reset(new Session(schedule)); - } - - ~ MergeExpr () { - //Do nothing - } - virtual ErrorCode computeInfo(Expr* expr) override { - MNN_ASSERT(expr->outputSize() == mOutputs.size()); - MNN_ASSERT(expr->inputs().size() == mInputs.size()); - bool needResize = mSession->getNeedResize(); - auto& inputs = expr->inputs(); - if (!needResize) { - for (int i=0; igetInfo(); - auto check = mInputs[i].first; - if (src->dim.size() != check->dimensions()) { - needResize = true; - break; - } - for (int d=0; ddim.size(); ++d) { - if (src->dim[d] != check->length(d)) { - needResize = true; - break; - } - } - if (needResize) { - break; - } - } - } - if (needResize) { - for (int i=0; igetInfo(); - auto dst = mInputs[i].first; - Utils::copyInfoToTensor(dst, src); - } - mSession->setNeedResize(); - auto code = mSession->resize(); - if (NO_ERROR != code) { - return code; - } - } - for (int i=0; ioutputInfo(i), mOutputs[i].second.get()); - } - mResized = false; - return NO_ERROR; - } - ErrorCode prepare(Expr* expr) { - auto inputs = expr->inputs(); - for (int i=0; igetInfo(); - TensorUtils::copyShape(mInputs[i].first, mInputs[i].second.get(), true); - mInputs[i].second->buffer().host = (uint8_t*)src->ptr; - } - for (int i=0; ioutputSize(); ++i) { - expr->outputInfo(i)->ptr = mOutputs[i].second->host(); - } - return NO_ERROR; - } - virtual ErrorCode compute(Expr* expr) override { - if (!mResized) { - auto code = prepare(expr); - if (NO_ERROR != code) { - return code; - } - mResized = true; - } - for (auto& input : mInputs) { - input.first->copyFromHostTensor(input.second.get()); - } - auto code = mSession->run(); - if (NO_ERROR != code) { - return code; - } - for (auto& tensor : mOutputs) { - tensor.first->copyToHostTensor(tensor.second.get()); - } - return NO_ERROR; - } - bool valid() const {return mValid;} -private: - std::shared_ptr mSession; - std::vector>> mInputs; - std::vector>> mOutputs; - bool mValid = true; - bool mDirect = true; - bool mResized = false; -}; - -Executor::Executor(std::shared_ptr bn) { - mBackend = bn; -} -Executor:: ~Executor() { - for (auto iter : mSolutions) { - iter.first->setInfoDirty(); - } -} - -Executor::Requirement Executor::onGetRequirement(Expr* expr) const { +Executor::Requirement Executor::getRequirement(Expr* expr) const { Executor::Requirement req; auto op = expr->get(); auto inputSize = expr->inputs().size(); @@ -382,42 +123,519 @@ Executor::Requirement Executor::onGetRequirement(Expr* expr) const { return req; } -ErrorCode Executor::onComputeInfo(Expr* expr) { +std::shared_ptr Executor::getGlobalExecutor() { + static std::once_flag of; + static std::shared_ptr gExecutor; + std::call_once(of, [&]() { + auto creator = MNNGetExtraBackendCreator(MNN_FORWARD_CPU); + SizeComputerSuite::init(); + Backend::Info info; + info.type = MNN_FORWARD_CPU; + info.numThread = 1; + std::shared_ptr bn(creator->onCreate(info)); + gExecutor.reset(new Executor(bn)); + }); + return gExecutor; +} + +ErrorCode Executor::computeInfo(Expr* expr) { + MNN_ASSERT(nullptr != expr); + MNN_ASSERT(nullptr != expr->get()); if (expr->get()->type() == OpType_Extra) { - auto param = expr->get()->main_as_Extra(); - if (nullptr == param || "MNN" != param->engine()->str()) { - FUNC_PRINT(1); + return NOT_SUPPORT; + } + std::lock_guard _l(mMutex); + mInputs.resize(expr->inputs().size()); + mOutputs.resize(expr->outputSize()); + if (mStack.size() < mInputs.size() + mOutputs.size()) { + int origin = (int)mStack.size(); + int destSize = (int)(mInputs.size() + mOutputs.size()); + for (int i=origin; i(new Tensor)); + } + } + for (int i=0; iget(); + for (int i = 0; i < expr->inputs().size(); ++i) { + auto inputExpr = expr->inputs()[i]->expr(); + Utils::copyInfoToTensor(mInputs[i], inputExpr.first->outputInfo(inputExpr.second)); + } + bool res = SizeComputer::computeOutputSize(op, mInputs, mOutputs); + if (!res) { + // Compute Error +#ifdef MNN_EXPRESS_ERROR_REPORT + FUNC_PRINT(op->type()); +#endif + return COMPUTE_SIZE_ERROR; + } + for (int i = 0; i < mOutputs.size(); ++i) { + auto tensor = mOutputs[i]; + for (int j = 0; j < tensor->dimensions(); ++j) { + if (tensor->length(j) <= 0) { +#ifdef MNN_EXPRESS_ERROR_REPORT + if (nullptr != op->name()) { + auto name = op->name()->str(); + MNN_ERROR("Error to compute shape for %s\n", op->name()->c_str()); + } +#endif + return COMPUTE_SIZE_ERROR; + } + } + auto shape = expr->outputInfo(i); + Utils::copyTensorToInfo(shape, tensor); + } + return NO_ERROR; +} + +Executor::ComputeCache::~ComputeCache() { + mUnits.clear(); + for (auto t : mTensors) { + t.reset(); + } +} + +void Executor::ComputeCache::setShapeDirty() { + mShapeDirty = true; + for (auto iter : mLinks) { + auto cache = iter.lock(); + if (nullptr != cache && false == cache->mShapeDirty) { + cache->setShapeDirty(); + } + } +} +void Executor::ComputeCache::setContentDirty() { + mContentDirty = true; + for (auto iter : mLinks) { + auto cache = iter.lock(); + if (nullptr != cache && false == cache->mContentDirty) { + cache->setContentDirty(); + } + } +} + +void Executor::ComputeCache::TensorContent::reset() { + auto des = TensorUtils::getDescribe(tensor.get()); + des->useCount = refCount; + if (nullptr != des->backend) { + des->backend->onReleaseBuffer(tensor.get(), Backend::DYNAMIC); + des->backend = nullptr; + } +} +void Executor::ComputeCache::addLink(std::shared_ptr cache) { + for (int i=0; i(cache); + return; + } + } + mLinks.emplace_back(std::weak_ptr(cache)); +} +Tensor* Executor::ComputeCache::output(EXPRP outputExpr, int index, bool host) const { + auto iter = mOutputTensors.find(outputExpr.get()); + if (iter == mOutputTensors.end()) { + return nullptr; + } + MNN_ASSERT(index >= 0 && index < iter->second.size()); + if (host) { + return iter->second[index].first; + } + return iter->second[index].second; +} +void Executor::ComputeCache::dup(EXPRP src, EXPRP dst) { + if (mOutputTensors.find(src.get()) == mOutputTensors.end()) { + return; + } + mOutputTensors[dst.get()] = mOutputTensors[src.get()]; +} +void Executor::ComputeCache::recycle(Expr* expr) { + mOutputTensors.erase(expr); + if (mOutputTensors.empty()) { + mUnits.clear(); + for (auto& t : mTensors) { + t.reset(); + } + mTensors.clear(); + mInputs.clear(); + } +} + + +ErrorCode Executor::ComputeCache::compute() { + if (mShapeDirty) { + auto code = resize(); + if (NO_ERROR != code) { + return code; + } + } + if (!mContentDirty) { + return NO_ERROR; + } + for (auto c : mInputs) { + auto code = c->compute(); + if (NO_ERROR != code) { + return code; + } + } + mBackend->onExecuteBegin(); + for (int i=0; iget()->type()), s); +#ifdef MNN_EXPR_ENABLE_PROFILER + Timer autoTime; +#endif + auto code = iter.exe->onExecute(iter.inputs, iter.outputs); +#ifdef MNN_EXPR_ENABLE_PROFILER + float costTime = (float)autoTime.durationInUs() / (float)1000; + Executor::getGlobalExecutor()->addOpCostTime((int)mUnits[i].origin->get()->type(), costTime); +#endif + if (NO_ERROR != code) { + mBackend->onExecuteEnd(); + return code; + } + } + mBackend->onExecuteEnd(); + for (auto& iter : mOutputTensors) { + for (auto& output : iter.second) { + TensorUtils::getDescribe(output.second)->useCount = 0; + } + } + for (auto& iter : mOutputTensors) { + for (auto& output : iter.second) { + if (TensorUtils::getDescribe(output.second)->useCount > 0) { + continue; + } + if (mUnits.empty()) { + output.second->copyFromHostTensor(output.first); + } else { + output.second->copyToHostTensor(output.first); + } + TensorUtils::getDescribe(output.second)->useCount = 1; + } + } + mContentDirty = false; + return NO_ERROR; +} + + +ErrorCode Executor::ComputeCache::resize() { + if (!mShapeDirty) { + return NO_ERROR; + } + for (auto c : mInputs) { + auto code = c->resize(); + if (NO_ERROR != code) { + return code; + } + } + for (auto& t : mTensors) { + t.reset(); + } + if (mUnits.empty()) { + // Single Tensor + auto iter = mOutputTensors.begin(); + auto expr = iter->first; + Utils::copyInfoToTensor(iter->second[0].first, expr->outputInfo(0)); + iter->second[0].first->buffer().device = 0; + } + for (auto& iter : mUnits) { + if ((iter.origin->infoDirty()) || (!iter.origin->valid())) { + for (int i=0; ibuffer().dimensions = 0; + } + continue; + } + for (int i=0; ioutputInfo(i)); + auto res = mBackend->onAcquireBuffer(iter.outputs[i], Backend::DYNAMIC); + TensorUtils::getDescribe(iter.outputs[i])->backend = mBackend.get(); + if (!res) { + return OUT_OF_MEMORY; + } + } + if (nullptr == iter.exe) { +#ifdef MNN_EXPR_ENABLE_PROFILER + Timer autoTime; +#endif + iter.exe.reset(mBackend->onCreate(iter.inputs, iter.outputs, iter.origin->get())); +#ifdef MNN_EXPR_ENABLE_PROFILER + float costTime = (float)autoTime.durationInUs() / (float)1000; + Executor::getGlobalExecutor()->addOpCostTime((int)iter.origin->get()->type(), costTime); +#endif + } + if (nullptr == iter.exe) { return NOT_SUPPORT; } - } - std::lock_guard _l(mMutex); - auto iter = mSolutions.find(expr); - std::shared_ptr solution; - if (iter == mSolutions.end()) { - if (expr->get()->type() != OpType_Extra) { - solution.reset(new UnitSolution(expr, mBackend)); - } else { - auto param = expr->get()->main_as_Extra(); - auto blob = param->info(); - auto merge = flatbuffers::GetRoot(blob->data()); - solution.reset(new MergeExpr(merge, expr->inputs().size(), expr->outputSize())); +#ifdef MNN_EXPR_ENABLE_PROFILER + Timer autoTime; +#endif + auto code= iter.exe->onResize(iter.inputs, iter.outputs); +#ifdef MNN_EXPR_ENABLE_PROFILER + float costTime = (float)autoTime.durationInUs() / (float)1000; + Executor::getGlobalExecutor()->addOpCostTime((int)iter.origin->get()->type(), costTime); +#endif + if (NO_ERROR != code) { + return code; + } + auto& req = iter.origin->inside()->mReq.contentNeedContent; + for (int i=0; iuseCount--; + if (des->useCount <= 0 && des->backend != nullptr) { + des->backend->onReleaseBuffer(iter.inputs[i], Backend::DYNAMIC); + des->backend = nullptr; + } } - mSolutions[expr] = solution; - } else { - solution = iter->second; } - return solution->computeInfo(expr); + for (auto& iter : mOutputTensors) { + auto expr = iter.first; + for (int i=0; i Device + if (iter.second[i].first != iter.second[i].second) { + TensorUtils::copyShape(iter.second[i].first, iter.second[i].second, true); + iter.second[i].second->buffer().host = nullptr; + auto res = mBackend->onAcquireBuffer(iter.second[i].second, Backend::DYNAMIC); + if (!res) { + return OUT_OF_MEMORY; + } + TensorUtils::getDescribe(iter.second[i].second)->backend = mBackend.get(); + } + } else { + // For Other Cache, Device -> Host + if (iter.second[i].first != iter.second[i].second) { + TensorUtils::copyShape(iter.second[i].second, iter.second[i].first, true); + iter.second[i].first->buffer().device = 0; + auto res = mBackupBackend->onAcquireBuffer(iter.second[i].first, Backend::DYNAMIC); + if (!res) { + return OUT_OF_MEMORY; + } + TensorUtils::getDescribe(iter.second[i].first)->backend = mBackupBackend.get(); + } + } + expr->outputInfo(i)->ptr = iter.second[i].first->host(); + } + } + mShapeDirty = false; + mContentDirty = true; + return NO_ERROR; } -ErrorCode Executor::onComputeContent(Expr* expr) { - std::lock_guard _l(mMutex); - //MNN_PRINT("Compute for %s \n", EnumNameOpType(expr->get()->type())); - auto code = mSolutions[expr]->compute(expr); - return code; + +static void _collectExecuteUnit(std::vector& dest, EXPRP expr, std::map& units) { + auto& inputs = expr->inputs(); + auto& req = expr->inside()->mReq.contentNeedContent; + MNN_ASSERT(inputs.size() == req.size()); + + for (int i=0; iexpr(); + if (units.find(inputExpr.first) == units.end()) { + continue; + } + auto inputCache = inputExpr.first->inside()->mCache; + if (nullptr != inputCache) { + continue; + } + _collectExecuteUnit(dest, inputExpr.first, units); + } + auto iter = units.find(expr); + if (iter == units.end()) { + return; + } + dest.emplace_back(std::move(iter->second)); + units.erase(iter); } -void Executor::recycle(Expr* expr) { + +void Executor::ComputeCache::create(const std::vector& outputs, std::map& units, std::set>&& inputCaches, std::vector&& tensors, std::shared_ptr bn, std::shared_ptr backup) { + std::vector packed; + for (auto expr : outputs) { + // Make Cache For Single Tensor + auto cache = expr->inside()->mCache; + if (nullptr != cache) { + continue; + } + if (nullptr != expr->get()) { + packed.emplace_back(expr); + continue; + } + cache.reset(new ComputeCache); + cache->mBackend = bn; + cache->mTensors.resize(1); + cache->mTensors[0].tensor.reset(new Tensor); + Utils::copyInfoToTensor(cache->mTensors[0].tensor.get(), expr->outputInfo(0)); + expr->inside()->mCache = cache; + if (bn->type() != MNN_FORWARD_CPU) { + cache->mTensors.resize(2); + cache->mTensors[1].tensor.reset(new Tensor); + Utils::copyInfoToTensor(cache->mTensors[1].tensor.get(), expr->outputInfo(0)); + cache->mTensors[1].tensor->buffer().host = nullptr; + cache->mOutputTensors[expr.get()] = {std::make_pair(cache->mTensors[0].tensor.get(), cache->mTensors[1].tensor.get())}; + } else { + cache->mOutputTensors[expr.get()] = {std::make_pair(cache->mTensors[0].tensor.get(), cache->mTensors[0].tensor.get())}; + } + cache->mBackupBackend = backup; + } + if (packed.empty()) { + return; + } + std::shared_ptr packedCache(new ComputeCache); + packedCache->mBackend = bn; + packedCache->mInputs = std::move(inputCaches); + for (auto input : packedCache->mInputs) { + input->addLink(packedCache); + } + for (auto expr : packed) { + MNN_ASSERT(units.find(expr) != units.end()); + auto& originOutputs = units[expr].outputs; + std::vector> destOutputs; + if (bn->type() == MNN_FORWARD_CPU) { + for (auto t : originOutputs) { + destOutputs.emplace_back(std::make_pair(t, t)); + } + } else { + for (auto t : originOutputs) { + ComputeCache::TensorContent content; + content.tensor.reset(new Tensor); + TensorUtils::copyShape(t, content.tensor.get(), true); + destOutputs.emplace_back(std::make_pair(content.tensor.get(), t)); + tensors.emplace_back(std::move(content)); + } + } + packedCache->mOutputTensors[expr.get()] = std::move(destOutputs); + expr->inside()->mCache = packedCache; + } + packedCache->mTensors = std::move(tensors); + packedCache->mBackupBackend = backup; + + // Backup Tensor Refcount + for (auto& t : packedCache->mTensors) { + t.refCount = TensorUtils::getDescribe(t.tensor.get())->useCount; + } + // Create Units + for (auto expr : packed) { + _collectExecuteUnit(packedCache->mUnits, expr, units); + } + // Resize if possible + packedCache->resize(); +} + +void Executor::_visit(EXPRP expr, std::map& units, std::set>& inputCaches, std::vector& tensors) { + auto& inputs = expr->inputs(); + auto& req = expr->inside()->mReq.contentNeedContent; + MNN_ASSERT(inputs.size() == req.size()); + + // Create Input's Unit / Cache + for (int i=0; iexpr(); + if (units.find(inputExpr.first) != units.end()) { + continue; + } + auto inputCache = inputExpr.first->inside()->mCache; + if (nullptr != inputCache) { + inputCaches.insert(inputCache); + continue; + } + _visit(inputExpr.first, units, inputCaches, tensors); + } + + // Create Self Unit / Cache + auto op = expr->get(); + if (nullptr == op) { + // Make Cache For Single Tensor + Executor::ComputeCache::create({expr}, units, {}, {}, mBackend, mBackupBackend); + return; + } + ComputeCache::Unit unit; + unit.origin = expr.get(); + unit.inputs.resize(inputs.size()); + unit.inputFromCache.resize(inputs.size()); + for (int i=0; iexpr(); + if (!req[i]) { + ComputeCache::TensorContent content; + content.tensor.reset(new Tensor); + Utils::copyInfoToTensor(content.tensor.get(), inputExpr.first->outputInfo(inputExpr.second)); + unit.inputs[i] = content.tensor.get(); + tensors.emplace_back(std::move(content)); + continue; + } + auto iter = units.find(inputExpr.first); + if (iter != units.end()) { + unit.inputs[i] = iter->second.outputs[inputExpr.second]; + TensorUtils::getDescribe(unit.inputs[i])->useCount++; + unit.inputFromCache[i] = false; + continue; + } + auto inputCache = inputExpr.first->inside()->mCache; + if (nullptr != inputCache) { + unit.inputs[i] = inputCache->output(inputExpr.first, inputExpr.second, false); + unit.inputFromCache[i] = true; + continue; + } + MNN_ASSERT(false); + } + unit.outputs.resize(expr->outputSize()); + for (int i=0; i expr) { std::lock_guard _l(mMutex); - mSolutions.erase(expr); - return; + //FUNC_PRINT(mCaches.size()); + std::map units; + std::set> inputCaches; + std::vector tensors; + for (auto e : expr) { + _visit(e, units, inputCaches, tensors); + } + Executor::ComputeCache::create(expr, units, std::move(inputCaches), std::move(tensors), mBackend, mBackupBackend); +} +void Executor::addOpCostTime(int op, float costTime) { +#ifdef MNN_EXPR_ENABLE_PROFILER + mProfiler->add(op, costTime); +#endif +} + +ErrorCode Executor::runCache(std::shared_ptr cache) { + std::lock_guard _l(mMutex); + return cache->compute(); +} +void Executor::resetProfile() { +#ifdef MNN_EXPR_ENABLE_PROFILER + mProfiler->reset(); +#endif +} +void Executor::dumpProfile() { +#ifdef MNN_EXPR_ENABLE_PROFILER + mProfiler->dump(); +#endif } } // namespace Express diff --git a/express/Expr.cpp b/express/Expr.cpp index 56baacbe..f9182268 100644 --- a/express/Expr.cpp +++ b/express/Expr.cpp @@ -77,16 +77,10 @@ bool VARP::fix(VARP::InputType type) const { default: return false; } - auto temp = VARP(mContent); - Variable::replace(temp, newVar); + Variable::replace(VARP(mContent), newVar); return true; } -struct Expr::Inside { - std::vector mInputInfos; - std::vector mOutputInfos; - Executor::Requirement mReq; -}; Expr::Expr(int outputSize) { mInside.reset(new Inside); mInside->mOutputInfos.resize(outputSize); @@ -94,22 +88,13 @@ Expr::Expr(int outputSize) { } Expr::~Expr() { - Executor::getGlobalExecutor()->recycle(this); + auto cache = mInside->mCache; + if (nullptr != cache) { + cache->recycle(this); + } mInside.reset(); } -void Expr::set(const OpT* op) { - MNN_ASSERT(nullptr != op); - flatbuffers::FlatBufferBuilder builder; - auto offset = Op::Pack(builder, op); - builder.Finish(offset); - mExtraBuffer.reset(new char[builder.GetSize()]); - ::memcpy(mExtraBuffer.get(), builder.GetBufferPointer(), builder.GetSize()); - mOp = flatbuffers::GetMutableRoot(mExtraBuffer.get()); - mOpBufferSize = builder.GetSize(); - mContentDirty = true; - mInfoDirty = true; -} -Variable::Info* Expr::outputInfo(int index) { +Variable::Info* Expr::outputInfo(int index) const { return mInside->mOutputInfos.data() + index; } @@ -148,17 +133,24 @@ EXPRP Expr::create(Variable::Info&& info) { } if (nullptr == originPtr) { expr->mType = VARP::INPUT; - expr->mContentDirty = true; return expr; } expr->mType = VARP::CONST; - expr->mContentDirty = false; ::memcpy(expr->mInside->mOutputInfos[0].ptr, originPtr, dstInfo.size * dstInfo.type.bytes()); return expr; } +EXPRP Expr::create(std::pair, int> extra, std::vector&& inputs, int outputSize) { + EXPRP expr(new Expr(outputSize)); + expr->mExtraBuffer = extra.first; + expr->mOpBufferSize = extra.second; + expr->mOp = flatbuffers::GetMutableRoot(extra.first.get()); + expr->mOpBufferSize = extra.second; + expr->mInputs = std::move(inputs); + _addLinkForInputs(expr); + return expr; +} EXPRP Expr::create(const OpT* op, std::vector inputs, int outputSize) { - EXPRP expr(new Expr(outputSize)); if (OpType_Input == op->type) { Variable::Info info; info.dim = op->main.AsInput()->dims; @@ -198,10 +190,12 @@ EXPRP Expr::create(const OpT* op, std::vector inputs, int outputSize) { } return expr; } - expr->set(op); - expr->mInputs = std::move(inputs); - _addLinkForInputs(expr); - return expr; + flatbuffers::FlatBufferBuilder builder; + auto offset = Op::Pack(builder, op); + builder.Finish(offset); + std::shared_ptr extraBuffer(new char[builder.GetSize()]); + ::memcpy(extraBuffer.get(), builder.GetBufferPointer(), builder.GetSize()); + return Expr::create(std::make_pair(extraBuffer, builder.GetSize()), std::move(inputs), outputSize); } void Expr::setName(const std::string& name) { mName = name; @@ -219,7 +213,7 @@ bool Expr::requireInfo() { bool ready = true; mInside->mInputInfos.resize(mInputs.size()); if (mInside->mReq.shapeNeedContent.empty()) { - mInside->mReq = Executor::getGlobalExecutor()->onGetRequirement(this); + mInside->mReq = Executor::getGlobalExecutor()->getRequirement(this); } for (int i = 0; i < mInputs.size(); ++i) { if (nullptr == mInputs[i] || nullptr == mInputs[i]->mFrom) { @@ -238,8 +232,8 @@ bool Expr::requireInfo() { for (int i = 0; i < mInputs.size(); ++i) { auto& v = mInputs[i]; if (mInside->mReq.shapeNeedContent[i]) { - auto res = v->expr().first->requireCompute(); - if (!res) { + auto resPtr = v->readInternal(); + if (nullptr == resPtr) { #ifdef MNN_EXPRESS_ERROR_REPORT MNN_ERROR("%s, Error for compute shape %d\n", mName.c_str(), i); #endif @@ -253,7 +247,7 @@ bool Expr::requireInfo() { return false; } //MNN_PRINT("Info %s, %p Start\n", mName.c_str(), this); - auto res = Executor::getGlobalExecutor()->onComputeInfo(this); + auto res = Executor::getGlobalExecutor()->computeInfo(this); //MNN_PRINT("Info Compute %s\n", mName.c_str()); if (NO_ERROR == res) { @@ -264,72 +258,6 @@ bool Expr::requireInfo() { return NO_ERROR == res; } -bool Expr::requireCompute() { - if (nullptr == mOp) { - if (mType == VARP::INPUT) { - return !mContentDirty; - } - return true; - } - if ((!mContentDirty) && mValid) { - return true; - } - if (!mValid) { - return false; - } -#ifdef DEBUG_OVERFLOW - if (mTo.size() > 1) { - if (mName.size() > 0) { - MNN_PRINT("output: %d, type:%s, name: %s\n", mTo.size(), EnumNameOpType(mOp->type()), mName.c_str()); - } else { - MNN_PRINT("output: %d, type:%s\n", mTo.size(), EnumNameOpType(mOp->type())); - } - for (auto t : mTo) { - auto tp = t.lock(); - if (nullptr == tp) { - MNN_PRINT("nullptr\t"); - } else { - MNN_PRINT("%s\n", EnumNameOpType(tp->get()->type())); - } - } - MNN_PRINT("\n"); - //FUNC_PRINT(mTo.size()); - } -#endif - bool res = requireInfo(); - if (!res) { - return false; - } - for (int i = 0; i < mInputs.size(); ++i) { - if (mInside->mReq.contentNeedContent[i]) { - auto& input = mInputs[i]; - auto expr = input->expr().first; - res = expr->requireCompute(); - if (!res) { -#ifdef MNN_EXPRESS_ERROR_REPORT - MNN_ERROR("%s compute input %d error , \n", mName.c_str(), i); -#endif - if (!mInside->mReq.supportError[i]) { - mValid = false; - return false; - } - } - } - } - auto code = Executor::getGlobalExecutor()->onComputeContent(this); - //MNN_PRINT("Compute %s, %p End\n", mName.c_str(), this); - res = code == NO_ERROR; - if (!res) { -#ifdef MNN_EXPRESS_ERROR_REPORT - MNN_ERROR("Error for compute %s\n", mName.c_str()); -#endif - mValid = false; - return false; - } - mContentDirty = false; - return true; -} - size_t Variable::linkNumber() const { return mFrom->outputs().size(); } @@ -376,22 +304,32 @@ void Expr::replace(EXPRP old, EXPRP from) { input->mFrom->mTo.emplace_back(WeakEXPRP(old)); } } - Executor::getGlobalExecutor()->recycle(old.get()); old->mOp = from->mOp; old->mName = from->mName; old->mOutputNames = from->mOutputNames; old->mExtraBuffer = from->mExtraBuffer; old->mOpBufferSize = from->mOpBufferSize; old->mType = from->mType; + auto cache = old->mInside->mCache; + if (nullptr != cache) { + cache->recycle(old.get()); + } old->mInside = from->mInside; - old->mContentDirty = from->mContentDirty; - old->mInfoDirty = true; + cache = old->mInside->mCache; + if (nullptr != cache) { + cache->dup(from, old); + } + old->mInfoDirty = from->mInfoDirty; old->mInputs = from->mInputs; old->visitOutputs([&](EXPRP expr, int index) { if (expr->mInfoDirty) { return false; } - expr->mContentDirty = true; + auto cache = expr->mInside->mCache; + if (nullptr != cache) { + cache->recycle(expr.get()); + expr->mInside->mCache.reset(); + } expr->mInfoDirty = true; return true; }); @@ -446,6 +384,9 @@ bool Variable::input(VARP src) { mFrom->mExtraBuffer.reset(new char[info->size * info->type.bytes()]); } mFrom->mInside->mOutputInfos[0].ptr = mFrom->mExtraBuffer.get(); + if (nullptr != mFrom->mInside->mCache) { + mFrom->mInside->mCache->setShapeDirty(); + } } if (needCopy) { auto dstPtr = writeInternal(false); @@ -469,6 +410,32 @@ void Variable::replace(VARP dst, VARP src) { dst->setExpr(nullptr, 0); return; } + if (src->mFrom.get() == dst->mFrom.get()) { + dst->mFromIndex = src->mFromIndex; + return; + } + if (src->mFrom->outputSize() != dst->mFrom->outputSize()) { + // Can't replace Expr, Just replace VARP + dst->mFrom->visitOutputs([src, dst](EXPRP expr, int index) { + src->mFrom->mTo.emplace_back(expr); + return false; + }); + dst->mFrom->visitOutputs([src, dst](EXPRP expr, int index) { + if (expr->mInfoDirty && nullptr == expr->mInside->mCache) { + return false; + } + auto cache = expr->mInside->mCache; + if (nullptr != cache) { + cache->recycle(expr.get()); + expr->mInside->mCache.reset(); + } + expr->setInfoDirty(); + return true; + }); + dst->mFrom = src->mFrom; + dst->mFromIndex = src->mFromIndex; + return; + } Expr::replace(dst->mFrom, src->mFrom); dst->mFromIndex = src->mFromIndex; } @@ -503,17 +470,16 @@ bool Variable::resize(INTS dims) { } } info.dim = dims; - info.size = 1; - for (int i=0; imExtraBuffer.reset(new char[info.size * info.type.bytes()]); info.ptr = mFrom->mExtraBuffer.get(); - mFrom->mContentDirty = true; mFrom->mValid = true; mFrom->mInside->mInputInfos.clear(); - + auto cache = mFrom->mInside->mCache; + if (nullptr != cache) { + cache->setShapeDirty(); + } mFrom->visitOutputs([](EXPRP expr, int index) { return expr->setInfoDirty(); }); return true; } @@ -530,30 +496,59 @@ void Expr::visit(EXPRP expr, const std::function& before, const std void* Variable::readInternal() { if (nullptr == mFrom->get()) { - if (mFrom->mContentDirty) { - return nullptr; + if (VARP::INPUT == mFrom->mType) { + if (nullptr == mFrom->mInside->mCache) { + return nullptr; + } } return mFrom->outputInfo(mFromIndex)->ptr; } - auto res = mFrom->requireCompute(); - if (!res) { + auto res = mFrom->requireInfo(); + if (false == res) { + return nullptr; + } + auto cache = mFrom->inside()->mCache; + if (nullptr == cache) { + Executor::getGlobalExecutor()->makeCache({mFrom}); + cache = mFrom->inside()->mCache; + } + if (nullptr == cache) { + return nullptr; + } + if (NO_ERROR != Executor::getGlobalExecutor()->runCache(cache)) { return nullptr; } return mFrom->outputInfo(mFromIndex)->ptr; } void Variable::informDirty() { - mFrom->visitOutputs([](EXPRP expr, int index) { - auto needRecurse = expr->setContentDirty(index); - return needRecurse; - }); + auto cache = mFrom->mInside->mCache; + if (nullptr != cache) { + cache->setContentDirty(); + } +} +void Variable::prepareCompute(const std::vector& vars) { + std::vector exprs; + for (auto v : vars) { + exprs.emplace_back(v->expr().first); + } + for (auto expr : exprs) { + auto res = expr->requireInfo(); + if (!res) { + return; + } + } + Executor::getGlobalExecutor()->makeCache(std::move(exprs)); } void* Variable::writeInternal(bool inform) { if (inform) { informDirty(); } - mFrom->mContentDirty = false; + auto cache = mFrom->mInside->mCache; + if (nullptr == cache) { + Executor::getGlobalExecutor()->makeCache({mFrom}); + } return mFrom->mInside->mOutputInfos[0].ptr; } @@ -581,22 +576,6 @@ void Expr::visitOutputs(const std::function& visit) { iter++; } } -bool Expr::setContentDirty(int inputIndex) { - if (mContentDirty) { - return false; - } - if (nullptr != mInside) { - if (mInside->mReq.shapeNeedContent[inputIndex]) { - visitOutputs([](EXPRP expr, int index) { return expr->setInfoDirty(); }); - return setInfoDirty(); - } - if (!mInside->mReq.contentNeedContent[inputIndex]) { - return false; - } - } - mContentDirty = true; - return true; -} bool Expr::setInfoDirty() { if (mInfoDirty && mValid) { //MNN_PRINT("End Info Dirty for %s\n", mName.c_str()); @@ -604,8 +583,10 @@ bool Expr::setInfoDirty() { } //MNN_PRINT("Set Info Dirty for %s\n", mName.c_str()); mInfoDirty = true; - mContentDirty = true; mValid = true; + if (nullptr != mInside->mCache) { + mInside->mCache->setShapeDirty(); + } return true; } diff --git a/express/MathOp.cpp b/express/MathOp.cpp index 8b10bd23..e85bcd30 100644 --- a/express/MathOp.cpp +++ b/express/MathOp.cpp @@ -131,6 +131,16 @@ VARP _Ceil(VARP x) return _Unary(x, UnaryOpOperation_CEIL); } +/*Returns element-wise rounded integer not less than x. +Args: +x: A variable. Must be Halide_Type_Float +Returns: +A variable. Halide_Type_Float. +*/ +VARP _Round(VARP x) { + return _Unary(x, UnaryOpOperation_ROUND); +} + /*Computes square of x element-wise. Args: x: A variable. Must be one of the following types: Halide_Type_Int or Halide_Type_Float diff --git a/express/MergeOptimizer.cpp b/express/MergeOptimizer.cpp index f676b3b3..5a11fe5a 100644 --- a/express/MergeOptimizer.cpp +++ b/express/MergeOptimizer.cpp @@ -9,9 +9,6 @@ #include "MergeOptimizer.hpp" #include #include "Utils.hpp" -#include "BasicOptimizer_generated.h" -#define FLATBUFFERS_PREFER_PRINTF -#include "flatbuffers/util.h" namespace MNN { namespace Express { @@ -31,109 +28,7 @@ Optimizer::Cost MergeOptimizer::onMeasure(const std::vector& outputs, std: return cost; } bool MergeOptimizer::onExecute(const std::vector& outputs, std::shared_ptr parameters) { - auto sequence = Variable::getExecuteOrder(outputs); - if (1 == sequence.size()) { - return true; - } - std::map varIndexOffset; - std::vector inputs; - std::unique_ptr merge(new MNN::Optimizer::MergeT); - merge->backend.reset(new MNN::Optimizer::BackendConfigT); - merge->backend->numberThread = mNumberThread; - merge->backend->type = (MNN::ForwardType)mType; - merge->backend->power = (int)mConfig.power; - merge->backend->precision = (int)mConfig.precision; - merge->backend->memroy = (int)mConfig.memory; - - int tensorOffset = 0; - for (int i = 0; i < sequence.size(); ++i) { - auto expr = sequence[i]; - if (nullptr != expr->get() && OpType_Extra == expr->get()->type()) { - return true; - } - varIndexOffset[expr] = tensorOffset; - tensorOffset += expr->outputSize(); - if (nullptr == expr->get()) { - if (expr->inputType() == VARP::INPUT) { - inputs.emplace_back(Variable::create(expr)); - merge->inputIndexes.emplace_back(varIndexOffset[expr]); - } else { - std::unique_ptr op; - VARP var = Variable::create(expr); - auto& info = *(var->getInfo()); - auto blob = new BlobT; - blob->dataFormat = (MNN_DATA_FORMAT)Utils::convertFormat(info.order); - blob->dims = info.dim; - if (info.type.code == halide_type_float) { - blob->dataType = DataType_DT_FLOAT; - blob->float32s.resize(info.size); - ::memcpy(blob->float32s.data(), info.ptr, info.size * sizeof(float)); - } else if (info.type.code == halide_type_int) { - blob->dataType = DataType_DT_INT32; - blob->int32s.resize(info.size); - ::memcpy(blob->int32s.data(), info.ptr, info.size * sizeof(int)); - } - else if (info.type.code == halide_type_uint && info.type.bits == 8) { - blob->dataType = DataType_DT_UINT8; - blob->uint8s.resize(info.size); - ::memcpy(blob->uint8s.data(), info.ptr, info.size * sizeof(uint8_t)); - } - op.reset(new OpT); - op->type = OpType_Const; - op->main.type = OpParameter_Blob; - op->main.value = blob; - op->outputIndexes = {varIndexOffset[expr]}; - merge->oplists.emplace_back(std::move(op)); - } - } - } - merge->tensorNumber = tensorOffset; - for (auto expr : sequence) { - if (nullptr == expr->get()) { - continue; - } - std::unique_ptr op(expr->get()->UnPack()); - auto outputIndexStart = varIndexOffset[expr]; - op->name = EnumNameOpType(op->type) + flatbuffers::NumToString(outputIndexStart+1); - op->outputIndexes.resize(expr->outputSize()); - for (int i=0; ioutputSize(); ++i) { - op->outputIndexes[i] = outputIndexStart + i; - } - auto exprinputs = expr->inputs(); - op->inputIndexes.resize(exprinputs.size()); - for (int i = 0; i < exprinputs.size(); ++i) { - auto inputExpr = exprinputs[i]->expr(); - op->inputIndexes[i] = varIndexOffset[inputExpr.first] + inputExpr.second; - } - merge->oplists.emplace_back(std::move(op)); - } - for (auto var : outputs) { - auto expr = var->expr(); - merge->outputIndexes.emplace_back(varIndexOffset[expr.first] + expr.second); - } - - std::unique_ptr mergeOp(new OpT); - mergeOp->type = OpType_Extra; - mergeOp->name = outputs[0]->name(); - mergeOp->main.type = OpParameter_Extra; - mergeOp->main.value = new ExtraT; - auto plugin = mergeOp->main.AsExtra(); - plugin->type = "Session"; - plugin->engine = "MNN"; - - flatbuffers::FlatBufferBuilder builder; - auto offset = MNN::Optimizer::Merge::Pack(builder, merge.get()); - builder.Finish(offset); - plugin->info.resize(builder.GetSize()); - ::memcpy(plugin->info.data(), builder.GetBufferPointer(), builder.GetSize()); - - auto mergeExpr = Expr::create(mergeOp.get(), inputs, (int)outputs.size()); - mergeExpr->setName(outputs[0]->name()); - for (int i = 0; i < outputs.size(); ++i) { - auto name = outputs[i]->name(); - outputs[i]->setExpr(mergeExpr, i); - outputs[i]->setName(name); // merge expr does not copy mOutputNames, so copy to prevent var's name to be erased - } + // Deceperate return true; } } // namespace Express diff --git a/express/NeuralNetWorkOp.cpp b/express/NeuralNetWorkOp.cpp index 529e85f8..005a4a5f 100644 --- a/express/NeuralNetWorkOp.cpp +++ b/express/NeuralNetWorkOp.cpp @@ -41,12 +41,19 @@ static PoolPadType _convertPoollingPadMode(PaddingMode mode) { } return PoolPadType_CAFFE; } - -VARP _Input(INTS dims, Dimensionformat format, halide_type_t type) { +/*create a input variable. +Args: +shape: A vector, the shape of the variable. +data_format: A enum, NCHW/NHWC/NC4HW4 is allowed. +dtype: The type of the elements of the resulting variable. +Returns: +output: A variable. +*/ +VARP _Input(INTS shape, Dimensionformat data_format, halide_type_t dtype) { Variable::Info info; - info.dim = std::move(dims); - info.order = format; - info.type = type; + info.dim = std::move(shape); + info.order = data_format; + info.type = dtype; info.ptr = nullptr; return (Variable::create(Expr::create(std::move(info)))); } @@ -58,26 +65,34 @@ VARP _Scalar(const void* ptr, halide_type_t type) { info.ptr = (void*)ptr; return (Variable::create(Expr::create(std::move(info)))); } -VARP _Const(const void* ptr, INTS dims, Dimensionformat format, halide_type_t type) { +/*create a constant variable. +Args: +ptr: A pointer. Indicates the values. +shape: A vector, the shape of the variable. +format: A enum, NCHW/NHWC/NC4HW4 is allowed. +type: The type of the elements of the resulting variable. +Returns: +output: A constant variable. +*/ +VARP _Const(const void* ptr, INTS shape, Dimensionformat format, halide_type_t type) { Variable::Info info; - info.dim = std::move(dims); + info.dim = std::move(shape); info.order = format; info.type = type; info.ptr = (void*)ptr; return (Variable::create(Expr::create(std::move(info)))); } -VARP _Const(float value, INTS dims, Dimensionformat format) { - auto size = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); - std::vector values; - values.resize(size); - for (int i = 0; i < size; ++i) { - values[i] = value; - } +VARP _Const(float value, INTS shape, Dimensionformat format) { Variable::Info info; - info.dim = std::move(dims); + info.dim = std::move(shape); info.order = format; info.type = halide_type_of(); + info.syncSize(); + std::vector values(info.size); + for (int i = 0; i < info.size; ++i) { + values[i] = value; + } info.ptr = (void*)values.data(); return (Variable::create(Expr::create(std::move(info)))); } @@ -123,6 +138,9 @@ VARP _Conv(VARP weight, VARP bias, VARP x, PaddingMode pad, INTS stride, INTS di conv2D->common->dilateY = dilate[1]; conv2D->common->kernelX = kernelSize[0]; conv2D->common->kernelY = kernelSize[1]; + if (nullptr == bias) { + return (Variable::create(Expr::create(convOp.get(), {x, weight}))); + } return (Variable::create(Expr::create(convOp.get(), {x, weight, bias}))); } VARP _Conv(std::vector&& weight, std::vector&& bias, VARP x, INTS channel, INTS kernelSize, @@ -250,21 +268,39 @@ VARP _AvePool(VARP x, INTS kernel, INTS stride, PaddingMode pad, INTS pads) { VARP _MaxPool(VARP x, INTS kernel, INTS stride, PaddingMode pad, INTS pads) { return _Pool(x, kernel, stride, PoolType_MAXPOOL, pad, pads); } -VARP _Reshape(VARP x, INTS dim, Dimensionformat format) { +/*Reshapes a variable. +Args: +x: A variable. +shape: A vector, the shape of the target variable. +original_format: A enum, only NCHW/NHWC is allowed, NC4HW4 is not allowed, +as it provides additional information(x comes from NCHW or NHWC) When x is NC4HW4. +Returns: +output: A variable with the same type as `x`. +*/ +VARP _Reshape(VARP x, INTS shape, Dimensionformat original_format) { std::unique_ptr reshape(new OpT); reshape->type = OpType_Reshape; reshape->main.type = OpParameter_Reshape; reshape->main.value = new ReshapeT; - reshape->main.AsReshape()->dims = dim; - reshape->main.AsReshape()->dimType = (MNN_DATA_FORMAT)Utils::convertFormat(format); + reshape->main.AsReshape()->dims = shape; + reshape->main.AsReshape()->dimType = (MNN_DATA_FORMAT)Utils::convertFormat(original_format); return (Variable::create(Expr::create(reshape.get(), {x}))); } +/*Reshapes a variable. +Args: +x: A variable. +shape: A variable, the shape of the target variable. +Returns: +output: A variable with the same type as `x`. +*/ VARP _Reshape(VARP x, VARP shape) { + MNN_ASSERT(nullptr != x); + MNN_ASSERT(nullptr != x->getInfo()); std::unique_ptr reshape(new OpT); reshape->type = OpType_Reshape; reshape->main.type = OpParameter_Reshape; reshape->main.value = new ReshapeT; - reshape->main.AsReshape()->dimType = MNN_DATA_FORMAT_NCHW; + reshape->main.AsReshape()->dimType = (MNN_DATA_FORMAT)Utils::convertFormat(x->getInfo()->order); return (Variable::create(Expr::create(reshape.get(), {x, shape}))); } VARP _Scale(VARP x, int channels, std::vector&& scales, std::vector&& bias) { @@ -277,6 +313,13 @@ VARP _Scale(VARP x, int channels, std::vector&& scales, std::vectormain.AsScale()->biasData = std::move(bias); return (Variable::create(Expr::create(std::move(scale), {x}))); } +/*Given an input value x, it computes the output as x if x > 0 and slope * x if x <= 0. +Args: +x: A variable. +slope: A float, a positive float value, it leakes the negative part by multiplying with `slope` rather than setting it to 0.0f. +Returns: +output: A variable with the same type as `x`. +*/ VARP _Relu(VARP x, float slope) { std::unique_ptr relu(new OpT); relu->type = OpType_ReLU; @@ -285,11 +328,24 @@ VARP _Relu(VARP x, float slope) { relu->main.AsRelu()->slope = slope; return (Variable::create(Expr::create(relu.get(), {x}))); } +/*Given an input value x, it computes Rectified Linear 6: min(max(x, 0), 6). +Args: +x: A variable. +Returns: +output: A variable with the same type as `x`. +*/ VARP _Relu6(VARP x) { std::unique_ptr relu(new OpT); relu->type = OpType_ReLU6; return (Variable::create(Expr::create(relu.get(), {x}))); } +/*Given an input value x, it computes the output as x if x > 0 and slopes * x if x <= 0. +Args: +x: A variable, must be 4-D with NC4HW4 format. +slopes: A vector, has save size as x. +Returns: +output: A variable with the same type as `x`. +*/ VARP _PRelu(VARP x, std::vector&& slopes) { std::unique_ptr prelu(new OpT); prelu->type = OpType_PReLU; @@ -299,60 +355,99 @@ VARP _PRelu(VARP x, std::vector&& slopes) { prelu->main.AsPRelu()->slopeCount = slopes.size(); return (Variable::create(Expr::create(prelu.get(), {x}))); } - -VARP _Softmax(VARP x, int axis) { +/*Computes softmax activations. +Args: +logits: A non-empty variable. Must be Halide_Type_Float. +axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. +Returns: +output: A variable with the same type as `logits`. +*/ +VARP _Softmax(VARP logits, int axis) { std::unique_ptr softmax(new OpT); softmax->type = OpType_Softmax; softmax->main.type = OpParameter_Axis; softmax->main.value = new AxisT; softmax->main.AsAxis()->axis = axis; - return (Variable::create(Expr::create(softmax.get(), {x}))); + return (Variable::create(Expr::create(softmax.get(), {logits}))); } - -VARP _Softplus(VARP x) { - return _Log(_Add(_Exp(x), _Const(1))); +/*Computes softplus: log(exp(features) + 1). +Args: +features: A variable. Must be Halide_Type_Float. +Returns: +A variable with the same type as `features`. +*/ +VARP _Softplus(VARP features) { + return _Log(_Add(_Exp(features), _Const(1))); } - -VARP _Softsign(VARP x) { - return _Divide(x, _Add(_Abs(x), _Const(1))); +/*Computes softsign: features / (abs(features) + 1). +Args: +features: A variable. Must be Halide_Type_Float. +Returns: +A variable with the same type as `features`. +*/ +VARP _Softsign(VARP features) { + return _Divide(features, _Add(_Abs(features), _Const(1))); } - -VARP _Concat(VARPS xs, int axis) { +/*Concatenates variables along one dimension. +Args: +values: A list of variables a single variable. +axis: A int. Dimension along which to concatenate. +Must be in the range [-rank(values), rank(values)). +As in Python, indexing for axis is 0-based. +Positive axis in the rage of [0, rank(values)) refers to axis-th dimension. +And negative axis refers to axis + rank(values)-th dimension. +Returns: +A variable resulting from concatenation of the input variables. +*/ +VARP _Concat(VARPS values, int axis) { std::unique_ptr concat(new OpT); concat->type = OpType_Concat; concat->main.type = OpParameter_Axis; concat->main.value = new AxisT; concat->main.AsAxis()->axis = axis; - return (Variable::create(Expr::create(concat.get(), xs))); + return (Variable::create(Expr::create(concat.get(), values))); } - -VARP _Convert(VARP x, Dimensionformat dest) { - std::unique_ptr convert(new OpT); - if (nullptr != x->getInfo()) { - auto source = x->getInfo()->order; - if (source == dest) { - return x; +/*Convert a variable to another format(possibily added after `input`). +Args: +input: A variable. +format: The target format. +Returns: +A variable. If `input` is already `format`, then return `input` directly, otherwize add a variable after `input` with `format`. +*/ +VARP _Convert(VARP input, Dimensionformat format) { + if (nullptr != input->getInfo()) { + auto source = input->getInfo()->order; + if (source == format) { + return input; } } + std::unique_ptr convert(new OpT); convert->type = OpType_ConvertTensor; convert->main.type = OpParameter_TensorConvertInfo; convert->main.value = new TensorConvertInfoT; - convert->main.AsTensorConvertInfo()->dest = (MNN_DATA_FORMAT)Utils::convertFormat(dest); - return (Variable::create(Expr::create(convert.get(), {x}))); + convert->main.AsTensorConvertInfo()->dest = (MNN_DATA_FORMAT)Utils::convertFormat(format); + return (Variable::create(Expr::create(convert.get(), {input}))); } - -std::vector _Split(VARP x, INTS points, int axis) { - MNN_ASSERT(points.size() >= 1); +/*Splits a variable value into a list of sub variables. +Args: +value: The variable to split. +size_splits: A vector, a 1-D integer containing the sizes of each output variable along axis. +axis: A int, the dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0 +Returns: +A list of variables. +*/ +std::vector _Split(VARP value, INTS size_splits, int axis) { + MNN_ASSERT(size_splits.size() >= 1); std::unique_ptr op(new OpT); op->type = OpType_Slice; op->main.type = OpParameter_Slice; op->main.value = new SliceT; op->main.AsSlice()->axis = axis; op->main.AsSlice()->sourceType = NetSource_TENSORFLOW; - op->main.AsSlice()->slicePoints = points; + op->main.AsSlice()->slicePoints = size_splits; - int slices = points.size() == 1 ? points[0] : (int)points.size(); - EXPRP expr = Expr::create(std::move(op), {x}, slices); + int slices = size_splits.size() == 1 ? size_splits[0] : (int)size_splits.size(); + EXPRP expr = Expr::create(std::move(op), {value}, slices); std::vector res; for (int i = 0; i < slices; ++i) { res.emplace_back(Variable::create(expr, i)); @@ -381,7 +476,13 @@ VARP _StridedSlice(VARP x, VARP begin, VARP end, VARP strided, halide_type_t typ op->main.AsStridedSliceParam()->shrinkAxisMask = shrinkAxisMask; return (Variable::create(Expr::create(op.get(), {x, begin, end, strided}))); } - +/*Transposes x. +Args: +x: A variable. +perm: A vector, indicating the permutation of the dimensions of x. +Returns: +A transposed variable. +*/ VARP _Transpose(VARP x, INTS perm) { auto permVar = _Const((const void*)perm.data(), {static_cast(perm.size())}, NHWC, halide_type_of()); return _Transpose(x, permVar); @@ -412,17 +513,25 @@ VARP _ReverseSequence(VARP x, VARP y, int batchDim, int seqDim) { op->main.AsReverseSequenceParam()->seqDim = seqDim; return (Variable::create(Expr::create(op.get(), {x, y}))); } -VARP _ChangeInputFormat(VARP x, Dimensionformat requireInput) { - if (nullptr == x || nullptr == x->getInfo()) { +/*Convert a variable to another format(possibily added before `input`). +Args: +input: A variable. +format: The target format. +Returns: +A variable. If `input` is already `format`, then return `input` directly, otherwize add a variable before `input` with `format`. +*/ + +VARP _ChangeInputFormat(VARP input, Dimensionformat format) { + if (nullptr == input || nullptr == input->getInfo()) { return nullptr; } - if (x->getInfo()->order == requireInput) { - return x; + if (input->getInfo()->order == format) { + return input; } - auto input = _Input(x->getInfo()->dim, requireInput, x->getInfo()->type); - auto convert = _Convert(input, x->getInfo()->order); - Variable::replace(x, convert); - return input; + auto input_before = _Input(input->getInfo()->dim, format, input->getInfo()->type); + auto convert = _Convert(input_before, input->getInfo()->order); + Variable::replace(input, convert); + return input_before; } VARP _Clone(VARP source, bool deepCopy) { @@ -498,26 +607,50 @@ VARP _PoolGrad(VARP originInput, VARP originOutput, VARP inputGrad, INTS kernel, pool->main.AsPool()->type = (PoolType)type; return (Variable::create(Expr::create(std::move(pool), {originInput, originOutput, inputGrad}))); } - -VARP _Crop(VARP x, VARP s, int axis, INTS offset) { +/*Crop images. +Args: +images: 4-D variable of NC4HW4 format. +size: A variable. It takes the shape of `size` as output cropped variable's shape while omits the values/format of `size`. +axis: A int indicating the dimention to crop. Must be >=2. All dimensions up to but excluding `axis` are preserved, while the dimensions including and trailing `axis` are cropped. +offset: A vector of int indicating the offsets. length(`offset`) must be >=1 and <=2. If length(`offset`) is 1, then all dimensions are offset by this amount.Otherwise, the number of offsets must equal the number of cropped axes in each dimension accordingly. +Returns: +The cropped 4-D variable of NC4HW4 format. +*/ +VARP _Crop(VARP images, VARP size, int axis, INTS offset) { std::unique_ptr crop(new OpT); crop->type = OpType_Crop; crop->main.type = OpParameter_Crop; crop->main.value = new CropT; crop->main.AsCrop()->axis = axis; crop->main.AsCrop()->offset = offset; - return (Variable::create(Expr::create(std::move(crop), {x, s}))); + return (Variable::create(Expr::create(std::move(crop), {images, size}))); } -VARP _Resize(VARP x, float xScale, float yScale) { +/*Resize images. +Args: +images: 4-D variable of NC4HW4 format. +xScale: A float. +yScale: A float. +Returns: +The resized 4-D variable of NC4HW4 format. +*/ +VARP _Resize(VARP images, float xScale, float yScale) { std::unique_ptr resize(new OpT); resize->type = OpType_Resize; resize->main.type = OpParameter_Resize; resize->main.value = new ResizeT; resize->main.AsResize()->xScale = xScale; resize->main.AsResize()->yScale = yScale; - return (Variable::create(Expr::create(std::move(resize), {x}))); + return (Variable::create(Expr::create(std::move(resize), {images}))); } -VARP _Pad(VARP x, VARP pads, PadValueMode mode) { +/*Pads a variable. +Args: +x: A variable. +paddings: A variable of type Halide_Type_Int. The shape is [n, 2] where n is the rank of variable. +mode: A enum, One of PadValueMode_CONSTANT, PadValueMode_SYMMETRIC, or PadValueMode_REFLECT. +Returns: +A variable. Has the same type as x. +*/ +VARP _Pad(VARP x, VARP paddings, PadValueMode mode) { std::unique_ptr pad(new OpT); pad->type = OpType_Padding; pad->main.type = OpParameter_PadParam; @@ -536,28 +669,41 @@ VARP _Pad(VARP x, VARP pads, PadValueMode mode) { pad->main.AsPadParam()->mode = MNN::PadValueMode_CONSTANT; break; } - return (Variable::create(Expr::create(std::move(pad), {x, pads}))); + return (Variable::create(Expr::create(std::move(pad), {x, paddings}))); } -VARP _ExpandDims(VARP x, int axis) { +/*Returns a variable with an additional dimension inserted at index axis. +Args: +input: A variable. +axis: A int, specifying the dimension index at which to expand the shape of input. +Given an input of D dimensions, axis must be in range [-(D+1), D] (inclusive). +Returns: +A variable with the same data as input, with an additional dimension inserted at the index specified by axis. +*/ +VARP _ExpandDims(VARP input, int axis) { std::unique_ptr expand(new OpT); expand->type = OpType_ExpandDims; expand->main.type = OpParameter_ExpandDims; expand->main.value = new ExpandDimsT; expand->main.AsExpandDims()->axis = axis; - return (Variable::create(Expr::create(std::move(expand), {x}))); + return (Variable::create(Expr::create(std::move(expand), {input}))); } -VARP _ExpandDims(VARP x, VARP axis) { +VARP _ExpandDims(VARP input, VARP axis) { std::unique_ptr expand(new OpT); expand->type = OpType_ExpandDims; expand->main.type = OpParameter_ExpandDims; expand->main.value = new ExpandDimsT; - return (Variable::create(Expr::create(std::move(expand), {x, axis}))); + return (Variable::create(Expr::create(std::move(expand), {input, axis}))); } - -VARP _Shape(VARP x) { +/*Returns the shape of a variable. +Args: +input: A variable. +Returns: +A variable of Halide_Type_Int. +*/ +VARP _Shape(VARP input) { std::unique_ptr shape(new OpT); shape->type = OpType_Shape; - return (Variable::create(Expr::create(std::move(shape), {x}))); + return (Variable::create(Expr::create(std::move(shape), {input}))); } /*Stacks a list of rank-R variables into one rank-(R+1) variable. Packs the list of variables in `values` into a ariable with rank one higher than each variable in values, @@ -575,21 +721,33 @@ output: A stacked variable with the same type as `values`. VARP _Stack(VARPS values, int axis) { std::unique_ptr pack(new OpT); pack->type = OpType_Pack; - MNN_ASSERT(values.size()>0); - auto info_first = values[0]->getInfo(); - MNN_ASSERT(nullptr != info_first); pack->main.type = OpParameter_PackParam; pack->main.value = new PackParamT; - pack->main.AsPackParam()->dataType = (MNN::DataType)Utils::convertDataType(info_first->type); pack->main.AsPackParam()->axis = axis; return (Variable::create(Expr::create(std::move(pack), values))); } -VARP _CropAndResize(VARP image, VARP boxes, VARP indexes, VARP sizes, float extrapolation, InterpolationMethod method) { +/*Extracts crops from the input image variable and resizes them using bilinear sampling or nearest neighbor sampling (possibly with aspect ratio change) +to a common output size specified by crop_size. +Returns a variable with crops from the input image at positions defined at the bounding box locations in boxes. +The cropped boxes are all resized (with bilinear or nearest neighbor interpolation) to a fixed size = [crop_height, crop_width]. +The result is a 4-D tensor [num_boxes, crop_height, crop_width, depth](supposing NHWC format). +Arguments: +image: A 4-D variable of shape [batch, image_height, image_width, depth](supposing NHWC format). Both image_height and image_width need to be positive. +boxes: A 2-D variable of shape [num_boxes, 4]. The i-th row of the variable specifies the coordinates of a box in the box_ind[i] image and is specified in normalized coordinates [y1, x1, y2, x2]. +A normalized coordinate value of y is mapped to the image coordinate at y * (image_height - 1), so as the [0, 1] interval of normalized image height is mapped to [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the [0, 1] range are allowed, in which case we use extrapolation_value to extrapolate the input image values. +box_ind: A 1-D variable of shape [num_boxes] with int values in [0, batch). The value of box_ind[i] specifies the image that the i-th box refers to. +crop_size: A 1-D variable of 2 elements, size = [crop_height, crop_width]. All cropped image patches are resized to this size. The aspect ratio of the image content is not preserved. Both crop_height and crop_width need to be positive. +method: A enum, either CropAndResizeMethod_NEAREST, or CropAndResizeMethod_BILINEAR, default to CropAndResizeMethod_BILINEAR. +extrapolation_value: Value used for extrapolation, when applicable. +Returns: +Output: A 4-D variable of shape [num_boxes, crop_height, crop_width, depth](supposing NHWC format). +*/ +VARP _CropAndResize(VARP image, VARP boxes, VARP box_ind, VARP crop_size, InterpolationMethod method, float extrapolation_value) { std::unique_ptr car(new OpT); car->type = OpType_CropAndResize; car->main.type = OpParameter_CropAndResize; car->main.value = new CropAndResizeT; - car->main.AsCropAndResize()->extrapolationValue = extrapolation; + car->main.AsCropAndResize()->extrapolationValue = extrapolation_value; switch (method) { case NEAREST: car->main.AsCropAndResize()->method = CropAndResizeMethod_NEAREST; @@ -599,29 +757,60 @@ VARP _CropAndResize(VARP image, VARP boxes, VARP indexes, VARP sizes, float extr car->main.AsCropAndResize()->method = CropAndResizeMethod_BILINEAR; break; } - return (Variable::create(Expr::create(std::move(car), {image, boxes, indexes, sizes}))); + return (Variable::create(Expr::create(std::move(car), {image, boxes, box_ind, crop_size}))); } -VARP _Fill(VARP s, VARP v) { +/*Creates a variable filled with a scalar value. +Args: +dims: A variable. Must be 1-D Halide_Type_Int. Represents the shape of the output variable. +value: A variable. 0-D (scalar). Value to fill the returned variable. +Returns: +A variable. Has the same type as value. +*/ +VARP _Fill(VARP dims, VARP value) { std::unique_ptr fill(new OpT); fill->type = OpType_Fill; fill->main.type = OpParameter_Fill; fill->main.value = new FillT; - return (Variable::create(Expr::create(std::move(fill), {s, v}))); + return (Variable::create(Expr::create(std::move(fill), {dims, value}))); } -VARP _Tile(VARP x, VARP mul) { +/*Constructs a variable by tiling a given variable. +Args: +input: A variable. 1-D or higher. +multiples: A variable. Must be 1-D Halide_Type_Int.Length must be the same as the number of dimensions in input. +Returns: +A variable. Has the same type as input. +*/ +VARP _Tile(VARP input, VARP multiples) { std::unique_ptr tile(new OpT); tile->type = OpType_Tile; - return (Variable::create(Expr::create(std::move(tile), {x, mul}))); + return (Variable::create(Expr::create(std::move(tile), {input, multiples}))); } -VARP _Gather(VARP embedding, VARP indices) { +/*Gather slices from params according to indices. +Arguments: +params: The variable from which to gather values. +indices: Index variable. Must be Halide_Type_Int in range [0, ndims(params)-1]. +Returns: +Output: Values from params gathered from indices given by indices. +*/ +VARP _Gather(VARP params, VARP indices) { std::unique_ptr gather(new OpT); gather->type = OpType_Gather; gather->main.value = new GatherT; - return (Variable::create(Expr::create(std::move(gather), {embedding, indices}))); + return (Variable::create(Expr::create(std::move(gather), {params, indices}))); } +/*Gather slices from params axis according to indices. +Arguments: +params: The variable from which to gather values. +indices: Index variable. Must be Halide_Type_Int in range [0, ndims(params)-1]. +axis: A int, the axis in params to gather indices from. Supports negative indexes. +If set to 0, it's same as _Gather. Currently only 0 is supported. +Returns: +Output: Values from params gathered from indices given by indices. +*/ VARP _GatherV2(VARP params, VARP indices, VARP axis) { std::unique_ptr gather(new OpT); gather->type = OpType_GatherV2; + gather->main.type = OpParameter_GatherV2; gather->main.value = new GatherV2T; if (axis.get()) { return (Variable::create(Expr::create(std::move(gather), {params, indices, axis}))); @@ -629,25 +818,32 @@ VARP _GatherV2(VARP params, VARP indices, VARP axis) { return (Variable::create(Expr::create(std::move(gather), {params, indices}))); } } - -VARP _Squeeze(VARP x, INTS axes) { +/*Removes dimensions of size 1 from the shape of a variable. +Args: +input: A variable. The input to squeeze. +axis: A vector, Defaults to {}. If specified, only squeezes the dimensions listed. The dimension index starts at 0. +Must be in the range [-rank(input), rank(input)). +Returns: +A variable. Has the same type as input. Contains the same data as input, but has one or more dimensions of size 1 removed. +*/ +VARP _Squeeze(VARP input, INTS axis) { std::unique_ptr squeeze(new OpT); squeeze->type = OpType_Squeeze; auto squeezeParam = new SqueezeParamT; - squeezeParam->squeezeDims = axes; + squeezeParam->squeezeDims = axis; squeeze->main.type = OpParameter_SqueezeParam; squeeze->main.value = squeezeParam; - return Variable::create(Expr::create(std::move(squeeze), {x})); + return Variable::create(Expr::create(std::move(squeeze), {input})); } -VARP _Unsqueeze(VARP x, INTS axes) { - std::unique_ptr squeeze(new OpT); - squeeze->type = OpType_Unsqueeze; +VARP _Unsqueeze(VARP input, INTS axis) { + std::unique_ptr unsqueeze(new OpT); + unsqueeze->type = OpType_Unsqueeze; auto squeezeParam = new SqueezeParamT; - squeezeParam->squeezeDims = axes; - squeeze->main.type = OpParameter_SqueezeParam; - squeeze->main.value = squeezeParam; - return Variable::create(Expr::create(std::move(squeeze), {x})); + squeezeParam->squeezeDims = axis; + unsqueeze->main.type = OpParameter_SqueezeParam; + unsqueeze->main.value = squeezeParam; + return Variable::create(Expr::create(std::move(unsqueeze), {input})); } /*Computes exponential linear: alpha * (exp(features) - 1) if < 0, features otherwise. features: A variable of type Halide_Type_Float @@ -784,7 +980,6 @@ Output: Rank k variable of the same shape as input. The extracted banded tensor. VARP _MatrixBandPart(VARP input, VARP num_lower, VARP num_upper) { std::unique_ptr op(new OpT); op->type = OpType_MatrixBandPart; - auto lrnParam = new LRNT; op->main.type = OpParameter_NONE; return (Variable::create(Expr::create(std::move(op), {input, num_lower, num_upper}))); } @@ -988,7 +1183,119 @@ VARP _Range(VARP start, VARP limit, VARP delta) { op->main.value = rangeParam; return Variable::create(Expr::create(std::move(op), {start, limit, delta})); } - +/*Rearranges data from depth into blocks of spatial data. +It is the reverse transformation of SpaceToDepth. More specifically, +it outputs a copy of the input variable where values from the depth dimension are moved in spatial blocks to the height and width dimensions. +Args: +input: A variable. +block_size: An int that is >= 2. The size of the spatial block, same as in Space2Depth. +Returns: +A variable. Has the same type as input. +*/ +VARP _DepthToSpace(VARP input, int block_size) { + std::unique_ptr op(new OpT); + op->type = OpType_DepthToSpace; + auto depthtospaceParam = new DepthSpaceParamT; + depthtospaceParam->blockSize = block_size; + op->main.type = OpParameter_DepthSpaceParam; + op->main.value = depthtospaceParam; + return Variable::create(Expr::create(std::move(op), {input})); +} +/*SSD network's priorbox layer. +Args: +feature: A variable. Contains the feature map. Namely bottom[0] in caffe. +image: A variable. Contains the image. Namely bottom[1] in caffe. +min_size: Minimum box size (in pixels). +max_size: Maximum box size (in pixels). +aspect_ratio: Various of aspect ratios. Duplicate ratios are ignored. If none is provided, use default 1.0. +flip: If true, flips each aspect ratio. For example, if there is aspect ratio "r", generates aspect ratio "1.0/r" as well. Default true. +clip: If true, clips the prior so that it is within [0, 1]. Default false. +variance: Variance for adjusting the prior bboxes. +img_h: image height. If 0, uses information in image. +img_w: image width. If 0, uses information in image. +step_h: step in height. +step_w: step in width. +offset: Offset to the top left corner of each cell. +Returns: +A variable. +*/ +VARP _PriorBox(VARP feature, VARP image, std::vector min_size, std::vector max_size, std::vectoraspect_ratio, + bool flip, bool clip, std::vectorvariance, + unsigned int img_h, unsigned int img_w, float step_h, float step_w, float offset) { + std::unique_ptr op(new OpT); + op->type = OpType_PriorBox; + auto param = new PriorBoxT; + param->minSizes = min_size; + param->maxSizes = max_size; + param->aspectRatios = aspect_ratio; + param->flip = flip; + param->clip = clip; + param->variances = variance; + param->imageHeight = img_h; + param->imageWidth = img_w; + param->stepHeight = step_h; + param->stepWidth = step_w; + param->offset = offset; + op->main.type = OpParameter_PriorBox; + op->main.value = param; + return Variable::create(Expr::create(std::move(op), {feature, image})); +} +/*SSD network's permute layer. +Args: +input: A variable. Contains the feature map. Namely bottom[0] in caffe. +dims: A vector. Contains the order. +Returns: +A variable. +*/ +VARP _Permute(VARP input, INTS dims) { + std::unique_ptr op(new OpT); + op->type = OpType_Permute; + auto param = new PermuteT; + param->dims = dims; + op->main.type = OpParameter_Permute; + op->main.value = param; + return Variable::create(Expr::create(std::move(op), {input})); +} +/*SSD network's detectionoutput layer. +Args: +location: A variable. +confidence: A variable. +priorbox: A variable. +num_classes: number of classes. +share_location: indicates wheter share location between different classes, default true. +background_label_id: default = 0. +nms_threshhold: nonmaximumsupression threshhold. +mns_topk: nonmaximumsupression topk. +code_type: indicates the mode to encode bbox, default = CORNER. +variance_encoded_in_target: indicates whether encode variance in target, default false. +keep_top_k: indicates the number of boxes kept, default -1(all boxes are kept). +confidence_threshold: the threshhold for confidence. +visualize_threshold: The threshold used to visualize the detection results. +Returns: +A variable. +*/ +VARP _DetectionOutput(VARP location, VARP confidence, VARP priorbox, + unsigned int num_classes, bool share_location, int background_label_id, + float nms_threshhold, int nms_topk, int code_type, + bool variance_encoded_in_target, + int keep_top_k, float confidence_threshold, float visualize_threshold){ + std::unique_ptr op(new OpT); + op->type = OpType_DetectionOutput; + auto param = new DetectionOutputT; + param->classCount = num_classes; + param->shareLocation = share_location; + param->backgroundLable = background_label_id; + param->nmsThresholdold = nms_threshhold; + param->nmsTopK = nms_topk; + param->codeType = code_type; + param->varianceEncodedTarget = variance_encoded_in_target; + param->keepTopK = keep_top_k; + param->confidenceThreshold = confidence_threshold; + param->objectnessScore = visualize_threshold; + op->main.type = OpParameter_DetectionOutput; + op->main.value = param; + return Variable::create(Expr::create(std::move(op), {location, confidence, priorbox})); +} VARP _Interp(VARPS xs, float widthScale, float heightScale, int outputWidth, int outputHeight, int resizeType, bool alignCorners) { std::unique_ptr interp(new OpT); @@ -1004,6 +1311,42 @@ VARP _Interp(VARPS xs, float widthScale, float heightScale, int outputWidth, int interp->main.type = OpParameter_Interp; return Variable::create(Expr::create(std::move(interp), xs)); } +VARP _ZeroGrad(VARP x) { + std::unique_ptr op(new OpT); + op->type = OpType_ZeroGrad; + return Variable::create(Expr::create(std::move(op), {x})); +} + +VARP _Conv(std::vector&& weight, std::vector&& bias, std::vector&& scale, VARP x, INTS channel, INTS kernelSize, + PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads) { + std::unique_ptr convOp(new OpT); + convOp->type = OpType_ConvInt8; + if (channel[0] == channel[1] && channel[0] == group) { + convOp->type = OpType_DepthwiseConvInt8; + } + convOp->main.type = OpParameter_Convolution2D; + convOp->main.value = new Convolution2DT; + auto conv2D = convOp->main.AsConvolution2D(); + conv2D->common.reset(new Convolution2DCommonT); + conv2D->common->padMode = _convertPadMode(pad); + conv2D->common->padX = pads[0]; + conv2D->common->padY = pads[1]; + conv2D->common->strideX = stride[0]; + conv2D->common->strideY = stride[1]; + conv2D->common->group = group; + conv2D->common->outputCount = channel[1]; + conv2D->common->inputCount = channel[0]; + conv2D->common->dilateX = dilate[0]; + conv2D->common->dilateY = dilate[1]; + conv2D->common->kernelX = kernelSize[0]; + conv2D->common->kernelY = kernelSize[1]; + MNN_ASSERT(weight.size() == channel[1] * (channel[0] / group) * kernelSize[0] * kernelSize[1]); + conv2D->symmetricQuan.reset(new QuantizedFloatParamT); + conv2D->symmetricQuan->bias = std::move(bias); + conv2D->symmetricQuan->scale = std::move(scale); + conv2D->symmetricQuan->weight = std::move(weight); + return (Variable::create(Expr::create(convOp.get(), {x}))); +} } // namespace Express } // namespace MNN diff --git a/express/Utils.hpp b/express/Utils.hpp index 110dc3a0..18de5749 100644 --- a/express/Utils.hpp +++ b/express/Utils.hpp @@ -6,11 +6,20 @@ // Copyright © 2018, Alibaba Group Holding Limited // +#ifndef Utils_hpp +#define Utils_hpp #include #include +#include namespace MNN { namespace Express { +struct Expr::Inside { + std::vector mInputInfos; + std::vector mOutputInfos; + Executor::Requirement mReq; + std::shared_ptr mCache; +}; class Utils { public: static void copyInfoToTensor(Tensor* dest, const Variable::Info* source); @@ -22,3 +31,4 @@ public: }; } // namespace Express } // namespace MNN +#endif diff --git a/include/MNN/expr/Executor.hpp b/include/MNN/expr/Executor.hpp index 426a2e53..971d1a9e 100644 --- a/include/MNN/expr/Executor.hpp +++ b/include/MNN/expr/Executor.hpp @@ -5,41 +5,98 @@ // Created by MNN on 2019/07/25. // Copyright © 2018, Alibaba Group Holding Limited // - +#ifndef Executor_hpp +#define Executor_hpp #include #include #include #include #include +#include #include namespace MNN { class Backend; +class Execution; namespace Express { -class Solution; class MNN_PUBLIC Executor { public: + class ComputeCache { + public: + void setShapeDirty(); + void setContentDirty(); + + ErrorCode compute(); + ErrorCode resize(); + Tensor* output(EXPRP outputExpr, int index, bool host = true) const; + void dup(EXPRP src, EXPRP dst); + void recycle(Expr* expr); + struct TensorContent { + std::shared_ptr tensor; + int refCount = 0; + void reset(); + }; + struct Unit { + std::vector inputs; + std::vector inputFromCache; + std::vector outputs; + const Expr* origin; + std::shared_ptr exe; + }; + static void create(const std::vector& outputs, std::map& units, std::set>&& inputCaches, std::vector&& tensors, std::shared_ptr bn, std::shared_ptr backendBn); + + ~ ComputeCache(); + void addLink(std::shared_ptr cache); + bool valid() const { + return !mOutputTensors.empty(); + } + private: + ComputeCache(){}; + std::set> mInputs; + // First is Host Tensor, Second is Device Tensor + std::map>> mOutputTensors; + std::vector mTensors; + std::vector mUnits; + std::vector> mLinks; + bool mContentDirty = true; + bool mShapeDirty = true; + std::shared_ptr mBackend; + std::shared_ptr mBackupBackend; + }; struct Requirement { std::vector contentNeedContent; std::vector shapeNeedContent; std::vector supportError; }; - virtual ~Executor(); - virtual Requirement onGetRequirement(Expr* expr) const; - virtual ErrorCode onComputeInfo(Expr* expr); - virtual ErrorCode onComputeContent(Expr* expr); - void recycle(Expr* expr); + ~Executor(); + Requirement getRequirement(Expr* expr) const; + ErrorCode computeInfo(Expr* expr); + void makeCache(std::vector expr); + ErrorCode runCache(std::shared_ptr cache); void setGlobalExecutorConfig(MNNForwardType type, const BackendConfig& config, int numberThread); enum GCFlag { - ALL, - UNACTIVE + FULL, + PART }; - void gc(GCFlag flag = ALL); + void gc(GCFlag flag = FULL); static std::shared_ptr getGlobalExecutor(); + void resetProfile(); + void dumpProfile(); + void addOpCostTime(int op, float costTime); + class Profiler; private: + void _addToCache(const std::vector>& caches); + void _resetCache(); + void _visit(EXPRP expr, std::map& units, std::set>& inputCaches, std::vector& tensors); + Executor(std::shared_ptr backend); std::shared_ptr mBackend; - std::map> mSolutions; + std::shared_ptr mBackupBackend; std::mutex mMutex; + std::vector> mStack; + std::vector mInputs; + std::vector mOutputs; + std::shared_ptr mProfiler; }; } // namespace Express } // namespace MNN +#endif diff --git a/include/MNN/expr/Expr.hpp b/include/MNN/expr/Expr.hpp index aaf66357..ee7cbf5c 100644 --- a/include/MNN/expr/Expr.hpp +++ b/include/MNN/expr/Expr.hpp @@ -143,6 +143,9 @@ public: static std::vector getExecuteOrder(const std::vector& output); static void save(const std::vector& vars, const char* fileName); static void save(const std::vector& vars, NetT* dest); + + // Pack a few Variable to compute in one pipeline + static void prepareCompute(const std::vector& vars); size_t linkNumber() const; const std::vector& toExprs() const; @@ -170,6 +173,7 @@ public: struct Inside; static EXPRP create(Variable::Info&& info); static EXPRP create(const OpT* op, std::vector inputs, int outputSize = 1); + static EXPRP create(std::pair, int> extra, std::vector&& inputs, int outputSize = 1); static EXPRP create(std::unique_ptr&& op, std::vector inputs, int outputSize = 1) { return create(op.get(), inputs, outputSize); } @@ -186,7 +190,6 @@ public: } static void replace(EXPRP oldExpr, EXPRP newExpr); bool requireInfo(); - bool requireCompute(); void visitOutputs(const std::function& visit); static void visit(EXPRP expr, const std::function& before, const std::function& after); @@ -209,15 +212,22 @@ public: } VARP::InputType inputType() const {return mType;} - Variable::Info* outputInfo(int index); + Variable::Info* outputInfo(int index) const; std::pair, int> extra() const { return std::make_pair(mExtraBuffer, mOpBufferSize); } bool setInfoDirty(); + std::shared_ptr inside() const { + return mInside; + } + bool valid() const { + return mValid; + } + bool infoDirty() const { + return mInfoDirty; + } private: - void set(const OpT* op); static void _addLinkForInputs(EXPRP expr); - bool setContentDirty(int inputIndex); Expr(int outputSize); @@ -230,7 +240,6 @@ private: bool mValid = true; bool mInfoDirty = true; - bool mContentDirty = true; std::shared_ptr mExtraBuffer; int mOpBufferSize = 0; std::string mName; diff --git a/include/MNN/expr/MathOp.hpp b/include/MNN/expr/MathOp.hpp index 0722d2ec..bc4975b7 100644 --- a/include/MNN/expr/MathOp.hpp +++ b/include/MNN/expr/MathOp.hpp @@ -31,6 +31,7 @@ MNN_PUBLIC VARP _Sign(VARP a); MNN_PUBLIC VARP _Abs(VARP x); MNN_PUBLIC VARP _Negative(VARP x); MNN_PUBLIC VARP _Floor(VARP x); +MNN_PUBLIC VARP _Round(VARP x); MNN_PUBLIC VARP _Ceil(VARP x); MNN_PUBLIC VARP _Square(VARP x); MNN_PUBLIC VARP _Sqrt(VARP x); diff --git a/include/MNN/expr/NeuralNetWorkOp.hpp b/include/MNN/expr/NeuralNetWorkOp.hpp index 527c51f7..41d096af 100644 --- a/include/MNN/expr/NeuralNetWorkOp.hpp +++ b/include/MNN/expr/NeuralNetWorkOp.hpp @@ -11,8 +11,8 @@ namespace Express { enum PaddingMode {CAFFE, VALID, SAME}; enum PoolingMode {MAXPOOL, AVEPOOL}; enum PadValueMode {CONSTANT, REFLECT, SYMMETRIC}; -MNN_PUBLIC VARP _Input(INTS dims = {}, Dimensionformat format = NC4HW4, halide_type_t type = halide_type_of()); -MNN_PUBLIC VARP _Clone(VARP source, bool deepCopy=false); +MNN_PUBLIC VARP _Input(INTS shape = {}, Dimensionformat data_format = NC4HW4, halide_type_t dtype = halide_type_of()) ; +MNN_PUBLIC VARP _Clone(VARP source, bool deepCopy = false); MNN_PUBLIC VARP _Scalar(const void* ptr, halide_type_t type); @@ -22,8 +22,8 @@ VARP _Scalar(T value) { } -MNN_PUBLIC VARP _Const(float value, INTS dims = {}, Dimensionformat format = NHWC); -MNN_PUBLIC VARP _Const(const void* ptr, INTS dims = {}, Dimensionformat format = NHWC, +MNN_PUBLIC VARP _Const(float value, INTS shape = {}, Dimensionformat format = NHWC); +MNN_PUBLIC VARP _Const(const void* ptr, INTS shape = {}, Dimensionformat format = NHWC, halide_type_t type = halide_type_of()); MNN_PUBLIC VARP _TrainableParam(float value, INTS dims, Dimensionformat format); MNN_PUBLIC VARP _TrainableParam(const void* ptr, INTS dims, Dimensionformat format, @@ -37,50 +37,51 @@ MNN_PUBLIC VARP _Conv(std::vector&& weight, std::vector&& bias, VA PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); MNN_PUBLIC VARP _Deconv(VARP weight, VARP bias, VARP x, PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); -MNN_PUBLIC VARP _MaxPool(VARP x, INTS kernel, INTS stride, PaddingMode pad = VALID, INTS pads= {0, 0}); -MNN_PUBLIC VARP _AvePool(VARP x, INTS kernel, INTS stride, PaddingMode pad = VALID, INTS pads= {0, 0}); -MNN_PUBLIC VARP _Reshape(VARP x, INTS dim, Dimensionformat format = NHWC); +MNN_PUBLIC VARP _MaxPool(VARP x, INTS kernel, INTS stride = {1, 1}, PaddingMode pad = VALID, INTS pads= {0, 0}); +MNN_PUBLIC VARP _AvePool(VARP x, INTS kernel, INTS stride = {1, 1}, PaddingMode pad = VALID, INTS pads= {0, 0}); +MNN_PUBLIC VARP _Reshape(VARP x, INTS shape, Dimensionformat original_format = NHWC); MNN_PUBLIC VARP _Reshape(VARP x, VARP shape); MNN_PUBLIC VARP _Scale(VARP x, int channels, std::vector&& scales, std::vector&& bias); MNN_PUBLIC VARP _Relu(VARP x, float slope = 0.0f); MNN_PUBLIC VARP _Relu6(VARP x); MNN_PUBLIC VARP _PRelu(VARP x, std::vector &&slopes); -MNN_PUBLIC VARP _Softmax(VARP x, int axis); -MNN_PUBLIC VARP _Softplus(VARP x); -MNN_PUBLIC VARP _Softsign(VARP x); -MNN_PUBLIC std::vector _Split(VARP x, INTS points, int axis); +MNN_PUBLIC VARP _Softmax(VARP logits, int axis = -1); +MNN_PUBLIC VARP _Softplus(VARP features); +MNN_PUBLIC VARP _Softsign(VARP features); +MNN_PUBLIC std::vector _Split(VARP value, INTS size_splits, int axis = 0); MNN_PUBLIC VARP _Slice(VARP x, VARP starts, VARP sizes); MNN_PUBLIC VARP _StridedSlice(VARP x, VARP begin, VARP end, VARP strided, halide_type_t type, int32_t beginMask, int32_t endMask, int32_t ellipsisMask, int32_t newAxisMask, int32_t shrinkAxisMask); -MNN_PUBLIC VARP _Concat(VARPS xs, int axis); -MNN_PUBLIC VARP _Convert(VARP x, Dimensionformat dest); +MNN_PUBLIC VARP _Concat(VARPS values, int axis); +MNN_PUBLIC VARP _Convert(VARP input, Dimensionformat format); MNN_PUBLIC VARP _Transpose(VARP x, INTS perm); MNN_PUBLIC VARP _Transpose(VARP x, VARP perm); MNN_PUBLIC VARP _ChannelShuffle(VARP x, int group); -MNN_PUBLIC VARP _ChangeInputFormat(VARP x, Dimensionformat requireInput); +MNN_PUBLIC VARP _ChangeInputFormat(VARP input, Dimensionformat format); MNN_PUBLIC VARP _Conv2DBackPropFilter(VARP weight, VARP input, VARP inputGrad, PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}); MNN_PUBLIC VARP _PoolGrad(VARP originInput, VARP originOutput, VARP inputGrad, INTS kernel, INTS stride, PoolingMode type, PaddingMode pad = VALID, INTS pads= {0, 0}); // FIXME: move the api to Array Ops MNN_PUBLIC VARP _ReverseSequence(VARP x, VARP y, int batchDim, int seqDim); // FIXME: move the api to Image Ops -MNN_PUBLIC VARP _Crop(VARP x, VARP s, int axis, INTS offset); -MNN_PUBLIC VARP _Resize(VARP x, float xScale, float yScale); -MNN_PUBLIC VARP _Pad(VARP x, VARP pads, PadValueMode mode = CONSTANT); -MNN_PUBLIC VARP _ExpandDims(VARP x, int axis); -MNN_PUBLIC VARP _ExpandDims(VARP x, VARP axis); +MNN_PUBLIC VARP _Crop(VARP images, VARP size, int axis, INTS offset); +MNN_PUBLIC VARP _Resize(VARP images, float xScale, float yScale); +MNN_PUBLIC VARP _Pad(VARP x, VARP paddings, PadValueMode mode = CONSTANT); +MNN_PUBLIC VARP _ExpandDims(VARP input, int axis); +MNN_PUBLIC VARP _ExpandDims(VARP input, VARP axis); -MNN_PUBLIC VARP _Shape(VARP x); +MNN_PUBLIC VARP _Shape(VARP input); MNN_PUBLIC VARP _Stack(VARPS values, int axis=0); enum InterpolationMethod {BILINEAR, NEAREST}; -MNN_PUBLIC VARP _CropAndResize(VARP image, VARP boxes, VARP indexes, VARP sizes, float extrapolation, InterpolationMethod method); -MNN_PUBLIC VARP _Fill(VARP s, VARP v); -MNN_PUBLIC VARP _Tile(VARP x, VARP mul); -MNN_PUBLIC VARP _Gather(VARP embedding, VARP indices); +MNN_PUBLIC VARP _CropAndResize(VARP image, VARP boxes, VARP box_ind, VARP crop_size, + InterpolationMethod method, float extrapolation_value = 0.0); +MNN_PUBLIC VARP _Fill(VARP dims, VARP value); +MNN_PUBLIC VARP _Tile(VARP input, VARP multiples); +MNN_PUBLIC VARP _Gather(VARP params, VARP indices); MNN_PUBLIC VARP _GatherV2(VARP params, VARP indices, VARP axis = nullptr); -MNN_PUBLIC VARP _Squeeze(VARP x, INTS axes = {}); -MNN_PUBLIC VARP _Unsqueeze(VARP x, INTS axes = {}); +MNN_PUBLIC VARP _Squeeze(VARP input, INTS axis = {}); +MNN_PUBLIC VARP _Unsqueeze(VARP input, INTS axis = {}); MNN_PUBLIC VARP _BatchToSpaceND(VARP input, VARP block_shape, VARP crops); MNN_PUBLIC VARP _GatherND(VARP params, VARP indices); MNN_PUBLIC VARP _Selu(VARP features, float scale, float alpha); @@ -95,6 +96,22 @@ MNN_PUBLIC VARP _ZerosLike(VARP input); MNN_PUBLIC std::vector _Unstack(VARP value, int axis=0); MNN_PUBLIC VARP _Rank(VARP input); MNN_PUBLIC VARP _Range(VARP start, VARP limit, VARP delta); +MNN_PUBLIC VARP _DepthToSpace(VARP input, int block_size); +MNN_PUBLIC VARP _PriorBox(VARP feature, VARP image, + std::vector min_size, std::vector max_size, std::vectoraspect_ratio, + bool flip, bool clip, std::vectorvariance, + unsigned int img_h, unsigned int img_w, float step_h, float step_w, float offset = 0.5); +MNN_PUBLIC VARP _Permute(VARP input, INTS dims); +MNN_PUBLIC VARP _DetectionOutput(VARP location, VARP confidence, VARP priorbox, + unsigned int num_classes, bool share_location, int background_label_id, + float nms_threshhold, int nms_topk, int code_type, + bool variance_encoded_in_target, + int keep_top_k, float confidence_threshold, float visualize_threshold); MNN_PUBLIC VARP _Interp(VARPS xs, float widthScale, float heightScale, int outputWidth, int outputHeight, int resizeType, bool alignCorners); + +MNN_PUBLIC VARP _ZeroGrad(VARP x); +MNN_PUBLIC VARP _Conv(std::vector&& weight, std::vector&& bias, std::vector&& scale, VARP x, INTS channel, INTS kernelSize, + PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads); + } // namespace Express } // namespace MNN diff --git a/project/android/gradlew.bat b/project/android/gradlew.bat deleted file mode 100644 index 24467a14..00000000 --- a/project/android/gradlew.bat +++ /dev/null @@ -1,100 +0,0 @@ -@rem -@rem Copyright 2015 the original author or authors. -@rem -@rem Licensed under the Apache License, Version 2.0 (the "License"); -@rem you may not use this file except in compliance with the License. -@rem You may obtain a copy of the License at -@rem -@rem https://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. -@rem - -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem Gradle startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME% - -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto init - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar - -@rem Execute Gradle -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega diff --git a/project/ios/MNN.xcodeproj/project.pbxproj b/project/ios/MNN.xcodeproj/project.pbxproj index 07c658aa..5951e406 100644 --- a/project/ios/MNN.xcodeproj/project.pbxproj +++ b/project/ios/MNN.xcodeproj/project.pbxproj @@ -87,19 +87,19 @@ 1F501F892397BA5B004E8721 /* MNNForwardType.h in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F7C2397BA5A004E8721 /* MNNForwardType.h */; settings = {ATTRIBUTES = (Public, ); }; }; 1F501F8B2397BA5B004E8721 /* MNNSharedContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F7E2397BA5B004E8721 /* MNNSharedContext.h */; settings = {ATTRIBUTES = (Public, ); }; }; 1F501F9D2397BB00004E8721 /* expr in Headers */ = {isa = PBXBuildFile; fileRef = 1F501F762397BA5A004E8721 /* expr */; settings = {ATTRIBUTES = (Public, ); }; }; - 1FD952CF23A89CA100888FC3 /* (null) in Headers */ = {isa = PBXBuildFile; }; - 1FD9533C23A89CA100888FC3 /* (null) in Headers */ = {isa = PBXBuildFile; }; - 1FD9534223A89CA100888FC3 /* (null) in Headers */ = {isa = PBXBuildFile; }; - 1FD9535B23A89CA200888FC3 /* (null) in Headers */ = {isa = PBXBuildFile; }; - 1FD9536D23A89CA200888FC3 /* (null) in Headers */ = {isa = PBXBuildFile; }; - 1FD953D723A89CD100888FC3 /* (null) in Headers */ = {isa = PBXBuildFile; }; - 1FD9549323A89D1300888FC3 /* (null) in Headers */ = {isa = PBXBuildFile; }; - 1FD9553F23A89D4F00888FC3 /* (null) in Headers */ = {isa = PBXBuildFile; }; - 1FD9566323A89D8A00888FC3 /* (null) in Headers */ = {isa = PBXBuildFile; }; - 1FD9566723A89D8A00888FC3 /* (null) in Headers */ = {isa = PBXBuildFile; }; - 1FD956A623A89D8A00888FC3 /* (null) in Headers */ = {isa = PBXBuildFile; }; 22EA50A92051677800C3906C /* Metal.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0F78AC261FCD495800205A7C /* Metal.framework */; settings = {ATTRIBUTES = (Required, ); }; }; 22EA50B02051681600C3906C /* MNN.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0F1465B71FA18D1000F9860A /* MNN.framework */; }; + 4829A2D623CC26AE00623BF5 /* MatMulTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4829A2CB23CC26AD00623BF5 /* MatMulTest.cpp */; }; + 4829A2D723CC26AE00623BF5 /* GatherTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4829A2CC23CC26AD00623BF5 /* GatherTest.cpp */; }; + 4829A2D823CC26AE00623BF5 /* MatrixBandTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4829A2CD23CC26AD00623BF5 /* MatrixBandTest.cpp */; }; + 4829A2D923CC26AE00623BF5 /* ExtraTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4829A2CE23CC26AD00623BF5 /* ExtraTest.cpp */; }; + 4829A2DA23CC26AE00623BF5 /* AllAnyTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4829A2CF23CC26AD00623BF5 /* AllAnyTest.cpp */; }; + 4829A2DB23CC26AE00623BF5 /* MultiThreadLoad.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4829A2D023CC26AD00623BF5 /* MultiThreadLoad.cpp */; }; + 4829A2DC23CC26AE00623BF5 /* ConvInt8Test.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4829A2D123CC26AD00623BF5 /* ConvInt8Test.cpp */; }; + 4829A2DD23CC26AE00623BF5 /* ExprResizeTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4829A2D223CC26AD00623BF5 /* ExprResizeTest.cpp */; }; + 4829A2DE23CC26AE00623BF5 /* ReverseSequenceTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4829A2D323CC26AD00623BF5 /* ReverseSequenceTest.cpp */; }; + 4829A2DF23CC26AE00623BF5 /* ReplaceTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4829A2D423CC26AD00623BF5 /* ReplaceTest.cpp */; }; + 4829A2E023CC26AE00623BF5 /* PaddingTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 4829A2D523CC26AD00623BF5 /* PaddingTest.cpp */; }; 486FDF47223E4B2800F487FB /* MetalBinary.mm in Sources */ = {isa = PBXBuildFile; fileRef = 486FDF44223E4B2700F487FB /* MetalBinary.mm */; }; 486FDF48223E4B2800F487FB /* MetalBinary.metal in Sources */ = {isa = PBXBuildFile; fileRef = 486FDF45223E4B2800F487FB /* MetalBinary.metal */; }; 4888759B215B639F0079B12E /* MetalSpatialProduct.mm in Sources */ = {isa = PBXBuildFile; fileRef = 488873C9215B639D0079B12E /* MetalSpatialProduct.mm */; }; @@ -239,22 +239,49 @@ 923B7FA721A6C940002AFCE0 /* MetalCropAndResize.metal in Sources */ = {isa = PBXBuildFile; fileRef = 923B7FA621A6C940002AFCE0 /* MetalCropAndResize.metal */; }; 9243106C2239FE0B0016DA25 /* MetalSize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9243106A2239FE0A0016DA25 /* MetalSize.mm */; }; 9243106F2239FE190016DA25 /* MetalSize.metal in Sources */ = {isa = PBXBuildFile; fileRef = 9243106E2239FE190016DA25 /* MetalSize.metal */; }; - 924F131921A81C74006D46A4 /* (null) in Sources */ = {isa = PBXBuildFile; }; - 924F131C21A81C80006D46A4 /* (null) in Sources */ = {isa = PBXBuildFile; }; - 924F132221ABD470006D46A4 /* (null) in Sources */ = {isa = PBXBuildFile; }; - 924F132521ABD47F006D46A4 /* (null) in Sources */ = {isa = PBXBuildFile; }; - 924F132721ABEA28006D46A4 /* (null) in Sources */ = {isa = PBXBuildFile; }; + 924F131921A81C74006D46A4 /* MetalTranspose.mm in Sources */ = {isa = PBXBuildFile; fileRef = 924F131721A81C74006D46A4 /* MetalTranspose.mm */; }; + 924F131C21A81C80006D46A4 /* MetalTranspose.metal in Sources */ = {isa = PBXBuildFile; fileRef = 924F131B21A81C80006D46A4 /* MetalTranspose.metal */; }; + 924F132221ABD470006D46A4 /* MetalQuantizedSoftmax.mm in Sources */ = {isa = PBXBuildFile; fileRef = 924F132021ABD470006D46A4 /* MetalQuantizedSoftmax.mm */; }; + 924F132521ABD47F006D46A4 /* MetalQuantizedSoftmax.metal in Sources */ = {isa = PBXBuildFile; fileRef = 924F132421ABD47F006D46A4 /* MetalQuantizedSoftmax.metal */; }; + 924F132721ABEA28006D46A4 /* MetalFixedPoint.metal in Sources */ = {isa = PBXBuildFile; fileRef = 924F132621ABEA28006D46A4 /* MetalFixedPoint.metal */; }; 925702D021EF0F5300A2A3CA /* TensorUtilsTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 925702CE21EF0F5300A2A3CA /* TensorUtilsTest.cpp */; }; 925702D221EF270D00A2A3CA /* BufferAllocatorTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 925702D121EF270D00A2A3CA /* BufferAllocatorTest.cpp */; }; 925702F621EF604400A2A3CA /* SizeComputerTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 925702F521EF604400A2A3CA /* SizeComputerTest.cpp */; }; + 92575979219EA07F00918499 /* MetalStridedSlice.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92575977219EA07F00918499 /* MetalStridedSlice.mm */; }; + 9257597C219EA08400918499 /* MetalStridedSlice.metal in Sources */ = {isa = PBXBuildFile; fileRef = 9257597B219EA08400918499 /* MetalStridedSlice.metal */; }; + 9258013E2223B77C00555D43 /* MetalConvolutionDepthwise.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9258013C2223B77C00555D43 /* MetalConvolutionDepthwise.mm */; }; + 925801412223B79600555D43 /* MetalConvolutionDepthwise.metal in Sources */ = {isa = PBXBuildFile; fileRef = 925801402223B79600555D43 /* MetalConvolutionDepthwise.metal */; }; + 925801442223B8D100555D43 /* MetalConvolutionCommon.mm in Sources */ = {isa = PBXBuildFile; fileRef = 925801422223B8D100555D43 /* MetalConvolutionCommon.mm */; }; + 925A89122223951200D22428 /* MetalConvolutionActivation.metal in Sources */ = {isa = PBXBuildFile; fileRef = 925A89112223951200D22428 /* MetalConvolutionActivation.metal */; }; + 925A8915222395ED00D22428 /* MetalConvolution1x1.mm in Sources */ = {isa = PBXBuildFile; fileRef = 925A8913222395ED00D22428 /* MetalConvolution1x1.mm */; }; + 925A89182223961F00D22428 /* MetalConvolution1x1.metal in Sources */ = {isa = PBXBuildFile; fileRef = 925A89172223961F00D22428 /* MetalConvolution1x1.metal */; }; + 925E87E0220447900000192E /* MetalConvolutionWinograd.metal in Sources */ = {isa = PBXBuildFile; fileRef = 925E87DF220447900000192E /* MetalConvolutionWinograd.metal */; }; 925F018921FF1E0B00E648A1 /* SqueezeNetTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 925F018821FF1E0B00E648A1 /* SqueezeNetTest.cpp */; }; 925F018B21FF222E00E648A1 /* model in Resources */ = {isa = PBXBuildFile; fileRef = 925F018A21FF222E00E648A1 /* model */; }; 925F018D21FFF3D300E648A1 /* MobileNetTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 925F018C21FFF3D300E648A1 /* MobileNetTest.cpp */; }; + 9260B27221A7C5CD00D48C97 /* MetalQuantizedMaxPool.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9260B27021A7C5CD00D48C97 /* MetalQuantizedMaxPool.mm */; }; + 9260B27521A7C5DC00D48C97 /* MetalQuantizedMaxPool.metal in Sources */ = {isa = PBXBuildFile; fileRef = 9260B27421A7C5DC00D48C97 /* MetalQuantizedMaxPool.metal */; }; + 9260B27821A7C5EA00D48C97 /* MetalQuantizedAvgPool.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9260B27621A7C5EA00D48C97 /* MetalQuantizedAvgPool.mm */; }; + 9260B27B21A7C5FC00D48C97 /* MetalQuantizedAvgPool.metal in Sources */ = {isa = PBXBuildFile; fileRef = 9260B27A21A7C5FC00D48C97 /* MetalQuantizedAvgPool.metal */; }; + 92682C4D2181729200B52B9D /* MetalTile.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92682C4B2181729200B52B9D /* MetalTile.mm */; }; + 92682C50218172A300B52B9D /* MetalTile.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92682C4F218172A300B52B9D /* MetalTile.metal */; }; + 92682C5321819BF100B52B9D /* MetalSeLU.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92682C5121819BF100B52B9D /* MetalSeLU.mm */; }; + 92682C5621819BFA00B52B9D /* MetalSeLU.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92682C5521819BFA00B52B9D /* MetalSeLU.metal */; }; + 92682C5F2181A2EF00B52B9D /* MetalFill.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92682C5D2181A2EF00B52B9D /* MetalFill.mm */; }; + 92682C622181A2F900B52B9D /* MetalFill.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92682C612181A2F900B52B9D /* MetalFill.metal */; }; 9273AB4F1FE7BE4D00477B22 /* AppDelegate.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9273AB4E1FE7BE4D00477B22 /* AppDelegate.mm */; }; 9273AB571FE7BE4D00477B22 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 9273AB561FE7BE4D00477B22 /* Assets.xcassets */; }; 9273AB5D1FE7BE4D00477B22 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 9273AB5C1FE7BE4D00477B22 /* main.m */; }; + 92921A86219C24CD00B063D1 /* MetalPack.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92921A84219C24CD00B063D1 /* MetalPack.mm */; }; + 92921A89219C272B00B063D1 /* MetalPack.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92921A88219C272B00B063D1 /* MetalPack.metal */; }; + 92965EDE2175B3C300B86ABE /* MetalConcat.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92965EDD2175B3C300B86ABE /* MetalConcat.metal */; }; 92A4E0FC21F05A4F000B0919 /* MemoryUtilsTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92A4E0FB21F05A4F000B0919 /* MemoryUtilsTest.cpp */; }; 92A4E10321F07C76000B0919 /* AutoStorageTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92A4E10221F07C76000B0919 /* AutoStorageTest.cpp */; }; + 92A8D70021A40695009C2201 /* MetalTFQuantizedConv2D.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92A8D6FE21A40695009C2201 /* MetalTFQuantizedConv2D.mm */; }; + 92A8D70321A406A8009C2201 /* MetalTFQuantizedConv2D.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92A8D70221A406A8009C2201 /* MetalTFQuantizedConv2D.metal */; }; + 92A8D70821A54087009C2201 /* MetalDefine.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92A8D70721A54087009C2201 /* MetalDefine.metal */; }; + 92C674F922549A1600011D33 /* MetalReLU6.mm in Sources */ = {isa = PBXBuildFile; fileRef = 92C674F722549A1600011D33 /* MetalReLU6.mm */; }; + 92C674FC22549A2500011D33 /* MetalReLU6.metal in Sources */ = {isa = PBXBuildFile; fileRef = 92C674FB22549A2500011D33 /* MetalReLU6.metal */; }; 92C674FF22549C9900011D33 /* ReLU6Test.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92C674FD22549C9900011D33 /* ReLU6Test.cpp */; }; 92D765BB222819EF00178BE5 /* BackendTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92D765B8222819EF00178BE5 /* BackendTest.cpp */; }; 92D765BC222819EF00178BE5 /* ScheduleTest.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 92D765B9222819EF00178BE5 /* ScheduleTest.cpp */; }; @@ -853,6 +880,17 @@ 1F501F7B2397BA5A004E8721 /* Tensor.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; name = Tensor.hpp; path = MNN/Tensor.hpp; sourceTree = ""; }; 1F501F7C2397BA5A004E8721 /* MNNForwardType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = MNNForwardType.h; path = MNN/MNNForwardType.h; sourceTree = ""; }; 1F501F7E2397BA5B004E8721 /* MNNSharedContext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = MNNSharedContext.h; path = MNN/MNNSharedContext.h; sourceTree = ""; }; + 4829A2CB23CC26AD00623BF5 /* MatMulTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MatMulTest.cpp; sourceTree = ""; }; + 4829A2CC23CC26AD00623BF5 /* GatherTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = GatherTest.cpp; sourceTree = ""; }; + 4829A2CD23CC26AD00623BF5 /* MatrixBandTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MatrixBandTest.cpp; sourceTree = ""; }; + 4829A2CE23CC26AD00623BF5 /* ExtraTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ExtraTest.cpp; sourceTree = ""; }; + 4829A2CF23CC26AD00623BF5 /* AllAnyTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AllAnyTest.cpp; sourceTree = ""; }; + 4829A2D023CC26AD00623BF5 /* MultiThreadLoad.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MultiThreadLoad.cpp; sourceTree = ""; }; + 4829A2D123CC26AD00623BF5 /* ConvInt8Test.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConvInt8Test.cpp; sourceTree = ""; }; + 4829A2D223CC26AD00623BF5 /* ExprResizeTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ExprResizeTest.cpp; sourceTree = ""; }; + 4829A2D323CC26AD00623BF5 /* ReverseSequenceTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ReverseSequenceTest.cpp; sourceTree = ""; }; + 4829A2D423CC26AD00623BF5 /* ReplaceTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ReplaceTest.cpp; sourceTree = ""; }; + 4829A2D523CC26AD00623BF5 /* PaddingTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = PaddingTest.cpp; sourceTree = ""; }; 486FDF44223E4B2700F487FB /* MetalBinary.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalBinary.mm; sourceTree = ""; }; 486FDF45223E4B2800F487FB /* MetalBinary.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalBinary.metal; sourceTree = ""; }; 488873C9215B639D0079B12E /* MetalSpatialProduct.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalSpatialProduct.mm; sourceTree = ""; }; @@ -995,12 +1033,36 @@ 9243106A2239FE0A0016DA25 /* MetalSize.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalSize.mm; sourceTree = ""; }; 9243106E2239FE190016DA25 /* MetalSize.metal */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.metal; path = MetalSize.metal; sourceTree = ""; }; 924B11AB21E73B9C006B37DB /* XCTest.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = XCTest.framework; path = Platforms/iPhoneOS.platform/Developer/Library/Frameworks/XCTest.framework; sourceTree = DEVELOPER_DIR; }; + 924F131721A81C74006D46A4 /* MetalTranspose.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalTranspose.mm; sourceTree = ""; }; + 924F131B21A81C80006D46A4 /* MetalTranspose.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalTranspose.metal; sourceTree = ""; }; + 924F132021ABD470006D46A4 /* MetalQuantizedSoftmax.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalQuantizedSoftmax.mm; sourceTree = ""; }; + 924F132421ABD47F006D46A4 /* MetalQuantizedSoftmax.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalQuantizedSoftmax.metal; sourceTree = ""; }; + 924F132621ABEA28006D46A4 /* MetalFixedPoint.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalFixedPoint.metal; sourceTree = ""; }; 925702CE21EF0F5300A2A3CA /* TensorUtilsTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = TensorUtilsTest.cpp; sourceTree = ""; }; 925702D121EF270D00A2A3CA /* BufferAllocatorTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = BufferAllocatorTest.cpp; sourceTree = ""; }; 925702F521EF604400A2A3CA /* SizeComputerTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = SizeComputerTest.cpp; sourceTree = ""; }; + 92575977219EA07F00918499 /* MetalStridedSlice.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalStridedSlice.mm; sourceTree = ""; }; + 9257597B219EA08400918499 /* MetalStridedSlice.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalStridedSlice.metal; sourceTree = ""; }; + 9258013C2223B77C00555D43 /* MetalConvolutionDepthwise.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalConvolutionDepthwise.mm; sourceTree = ""; }; + 925801402223B79600555D43 /* MetalConvolutionDepthwise.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalConvolutionDepthwise.metal; sourceTree = ""; }; + 925801422223B8D100555D43 /* MetalConvolutionCommon.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalConvolutionCommon.mm; sourceTree = ""; }; + 925A89112223951200D22428 /* MetalConvolutionActivation.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalConvolutionActivation.metal; sourceTree = ""; }; + 925A8913222395ED00D22428 /* MetalConvolution1x1.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalConvolution1x1.mm; sourceTree = ""; }; + 925A89172223961F00D22428 /* MetalConvolution1x1.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalConvolution1x1.metal; sourceTree = ""; }; + 925E87DF220447900000192E /* MetalConvolutionWinograd.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalConvolutionWinograd.metal; sourceTree = ""; }; 925F018821FF1E0B00E648A1 /* SqueezeNetTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = SqueezeNetTest.cpp; path = ../../test/Model/SqueezeNetTest.cpp; sourceTree = SOURCE_ROOT; }; 925F018A21FF222E00E648A1 /* model */ = {isa = PBXFileReference; lastKnownFileType = folder; name = model; path = ../../resource/model; sourceTree = ""; }; 925F018C21FFF3D300E648A1 /* MobileNetTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = MobileNetTest.cpp; path = ../../test/Model/MobileNetTest.cpp; sourceTree = SOURCE_ROOT; }; + 9260B27021A7C5CD00D48C97 /* MetalQuantizedMaxPool.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalQuantizedMaxPool.mm; sourceTree = ""; }; + 9260B27421A7C5DC00D48C97 /* MetalQuantizedMaxPool.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalQuantizedMaxPool.metal; sourceTree = ""; }; + 9260B27621A7C5EA00D48C97 /* MetalQuantizedAvgPool.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalQuantizedAvgPool.mm; sourceTree = ""; }; + 9260B27A21A7C5FC00D48C97 /* MetalQuantizedAvgPool.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalQuantizedAvgPool.metal; sourceTree = ""; }; + 92682C4B2181729200B52B9D /* MetalTile.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalTile.mm; sourceTree = ""; }; + 92682C4F218172A300B52B9D /* MetalTile.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalTile.metal; sourceTree = ""; }; + 92682C5121819BF100B52B9D /* MetalSeLU.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalSeLU.mm; sourceTree = ""; }; + 92682C5521819BFA00B52B9D /* MetalSeLU.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalSeLU.metal; sourceTree = ""; }; + 92682C5D2181A2EF00B52B9D /* MetalFill.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalFill.mm; sourceTree = ""; }; + 92682C612181A2F900B52B9D /* MetalFill.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalFill.metal; sourceTree = ""; }; 926F5C5F1FFF3D360078EE0A /* libc.tbd */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.text-based-dylib-definition"; name = libc.tbd; path = usr/lib/libc.tbd; sourceTree = SDKROOT; }; 9273AB4B1FE7BE4D00477B22 /* Playground.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = Playground.app; sourceTree = BUILT_PRODUCTS_DIR; }; 9273AB4D1FE7BE4D00477B22 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; @@ -1008,8 +1070,16 @@ 9273AB561FE7BE4D00477B22 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; 9273AB5B1FE7BE4D00477B22 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; 9273AB5C1FE7BE4D00477B22 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; + 92921A84219C24CD00B063D1 /* MetalPack.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalPack.mm; sourceTree = ""; }; + 92921A88219C272B00B063D1 /* MetalPack.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalPack.metal; sourceTree = ""; }; + 92965EDD2175B3C300B86ABE /* MetalConcat.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalConcat.metal; sourceTree = ""; }; 92A4E0FB21F05A4F000B0919 /* MemoryUtilsTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = MemoryUtilsTest.cpp; sourceTree = ""; }; 92A4E10221F07C76000B0919 /* AutoStorageTest.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = AutoStorageTest.cpp; sourceTree = ""; }; + 92A8D6FE21A40695009C2201 /* MetalTFQuantizedConv2D.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalTFQuantizedConv2D.mm; sourceTree = ""; }; + 92A8D70221A406A8009C2201 /* MetalTFQuantizedConv2D.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalTFQuantizedConv2D.metal; sourceTree = ""; }; + 92A8D70721A54087009C2201 /* MetalDefine.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalDefine.metal; sourceTree = ""; }; + 92C674F722549A1600011D33 /* MetalReLU6.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = MetalReLU6.mm; sourceTree = ""; }; + 92C674FB22549A2500011D33 /* MetalReLU6.metal */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.metal; path = MetalReLU6.metal; sourceTree = ""; }; 92C674FD22549C9900011D33 /* ReLU6Test.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = ReLU6Test.cpp; sourceTree = ""; }; 92D765B8222819EF00178BE5 /* BackendTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BackendTest.cpp; sourceTree = ""; }; 92D765B9222819EF00178BE5 /* ScheduleTest.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ScheduleTest.cpp; sourceTree = ""; }; @@ -1595,6 +1665,25 @@ path = ../../../include; sourceTree = ""; }; + 4829A2CA23CC26AD00623BF5 /* expr */ = { + isa = PBXGroup; + children = ( + 4829A2CB23CC26AD00623BF5 /* MatMulTest.cpp */, + 4829A2CC23CC26AD00623BF5 /* GatherTest.cpp */, + 4829A2CD23CC26AD00623BF5 /* MatrixBandTest.cpp */, + 4829A2CE23CC26AD00623BF5 /* ExtraTest.cpp */, + 4829A2CF23CC26AD00623BF5 /* AllAnyTest.cpp */, + 4829A2D023CC26AD00623BF5 /* MultiThreadLoad.cpp */, + 4829A2D123CC26AD00623BF5 /* ConvInt8Test.cpp */, + 4829A2D223CC26AD00623BF5 /* ExprResizeTest.cpp */, + 4829A2D323CC26AD00623BF5 /* ReverseSequenceTest.cpp */, + 4829A2D423CC26AD00623BF5 /* ReplaceTest.cpp */, + 4829A2D523CC26AD00623BF5 /* PaddingTest.cpp */, + ); + name = expr; + path = ../../../test/expr; + sourceTree = ""; + }; 48593FB423A89B2F0069452A /* express */ = { isa = PBXGroup; children = ( @@ -1682,17 +1771,25 @@ 92EAC19B21CB3CE20056F4C2 /* MetalCast.metal */, 92EAC19721CB3CD60056F4C2 /* MetalCast.mm */, 1F501F2C2397BA4C004E8721 /* MetalConcat.hpp */, + 92965EDD2175B3C300B86ABE /* MetalConcat.metal */, 488873E6215B639D0079B12E /* MetalConcat.mm */, 1F501F2B2397BA4C004E8721 /* MetalConvolution.hpp */, 488873DC215B639D0079B12E /* MetalConvolution.metal */, 488873E1215B639D0079B12E /* MetalConvolution.mm */, 1F501F192397BA4B004E8721 /* MetalConvolution1x1.hpp */, + 925A89172223961F00D22428 /* MetalConvolution1x1.metal */, + 925A8913222395ED00D22428 /* MetalConvolution1x1.mm */, + 925A89112223951200D22428 /* MetalConvolutionActivation.metal */, 1F501F112397BA4A004E8721 /* MetalConvolutionCommon.hpp */, + 925801422223B8D100555D43 /* MetalConvolutionCommon.mm */, 1F501F092397BA4A004E8721 /* MetalConvolutionDepthwise.hpp */, + 925801402223B79600555D43 /* MetalConvolutionDepthwise.metal */, + 9258013C2223B77C00555D43 /* MetalConvolutionDepthwise.mm */, 1F501F292397BA4C004E8721 /* MetalConvolutionGEMM.hpp */, 92369E63222544FE009D3A05 /* MetalConvolutionGEMM.metal */, 92369E61222544DD009D3A05 /* MetalConvolutionGEMM.mm */, 1F501F1E2397BA4B004E8721 /* MetalConvolutionWinograd.hpp */, + 925E87DF220447900000192E /* MetalConvolutionWinograd.metal */, 48C054862201996200E91945 /* MetalConvolutionWinograd.mm */, 1F501F152397BA4B004E8721 /* MetalCrop.hpp */, 92EEFF29217F0F0F00F89377 /* MetalCrop.metal */, @@ -1704,6 +1801,7 @@ 488873FC215B639D0079B12E /* MetalDeconvolution.metal */, 488873F2215B639D0079B12E /* MetalDeconvolution.mm */, 1F501F0A2397BA4A004E8721 /* MetalDefine.h */, + 92A8D70721A54087009C2201 /* MetalDefine.metal */, 1F501F282397BA4C004E8721 /* MetalDequantize.hpp */, 920004D621EDC30E00BCE892 /* MetalDequantize.metal */, 920004D521EDC30E00BCE892 /* MetalDequantize.mm */, @@ -1711,6 +1809,9 @@ 4888740F215B639D0079B12E /* MetalEltwise.metal */, 488873DE215B639D0079B12E /* MetalEltwise.mm */, 1F501F132397BA4B004E8721 /* MetalFill.hpp */, + 92682C612181A2F900B52B9D /* MetalFill.metal */, + 92682C5D2181A2EF00B52B9D /* MetalFill.mm */, + 924F132621ABEA28006D46A4 /* MetalFixedPoint.metal */, 1F501F062397BA4A004E8721 /* MetalGather.hpp */, 923B7F8B21A653BB002AFCE0 /* MetalGather.metal */, 923B7F8721A653AB002AFCE0 /* MetalGather.mm */, @@ -1733,6 +1834,8 @@ 488873D6215B639D0079B12E /* MetalNormalize.mm */, AE7BE4BC22855665002CEEA6 /* MetalOPRegister.mm */, 1F501F2D2397BA4C004E8721 /* MetalPack.hpp */, + 92921A88219C272B00B063D1 /* MetalPack.metal */, + 92921A84219C24CD00B063D1 /* MetalPack.mm */, 1F501F2E2397BA4C004E8721 /* MetalPermute.hpp */, 488873CD215B639D0079B12E /* MetalPermute.metal */, 4888740B215B639D0079B12E /* MetalPermute.mm */, @@ -1746,10 +1849,16 @@ 92351C8921992AC6002CA341 /* MetalQuantizedAdd.metal */, 92351C8521992AB2002CA341 /* MetalQuantizedAdd.mm */, 1F501F322397BA4C004E8721 /* MetalQuantizedAvgPool.hpp */, + 9260B27A21A7C5FC00D48C97 /* MetalQuantizedAvgPool.metal */, + 9260B27621A7C5EA00D48C97 /* MetalQuantizedAvgPool.mm */, 1F501F122397BA4A004E8721 /* MetalQuantizedMaxPool.hpp */, + 9260B27421A7C5DC00D48C97 /* MetalQuantizedMaxPool.metal */, + 9260B27021A7C5CD00D48C97 /* MetalQuantizedMaxPool.mm */, 1F501EFA2397BA49004E8721 /* MetalQuantizedReshape.hpp */, 923B7F9921A69E2E002AFCE0 /* MetalQuantizedReshape.mm */, 1F501F262397BA4C004E8721 /* MetalQuantizedSoftmax.hpp */, + 924F132421ABD47F006D46A4 /* MetalQuantizedSoftmax.metal */, + 924F132021ABD470006D46A4 /* MetalQuantizedSoftmax.mm */, 1F501F272397BA4C004E8721 /* MetalRange.hpp */, 92256952219D6E1000F251E2 /* MetalRange.metal */, 9225694E219D6E0200F251E2 /* MetalRange.mm */, @@ -1763,6 +1872,8 @@ 488873D1215B639D0079B12E /* MetalReLU.metal */, 488873F3215B639D0079B12E /* MetalReLU.mm */, 1F501F052397BA49004E8721 /* MetalReLU6.hpp */, + 92C674FB22549A2500011D33 /* MetalReLU6.metal */, + 92C674F722549A1600011D33 /* MetalReLU6.mm */, 1F501F352397BA4D004E8721 /* MetalReshape.hpp */, 488873CA215B639D0079B12E /* MetalReshape.metal */, 488873FA215B639D0079B12E /* MetalReshape.mm */, @@ -1776,6 +1887,8 @@ 488873F6215B639D0079B12E /* MetalScale.metal */, 488873F8215B639D0079B12E /* MetalScale.mm */, 1F501F242397BA4B004E8721 /* MetalSeLU.hpp */, + 92682C5521819BFA00B52B9D /* MetalSeLU.metal */, + 92682C5121819BF100B52B9D /* MetalSeLU.mm */, 1F501F232397BA4B004E8721 /* MetalSigmoid.hpp */, CE96FE6D21707D58004AB400 /* MetalSigmoid.metal */, CE96FE6C21707D58004AB400 /* MetalSigmoid.mm */, @@ -1800,14 +1913,22 @@ 1F501F142397BA4B004E8721 /* MetalSqueeze.hpp */, 9223E10D21D327F40067544A /* MetalSqueeze.mm */, 1F501F332397BA4C004E8721 /* MetalStridedSlice.hpp */, + 9257597B219EA08400918499 /* MetalStridedSlice.metal */, + 92575977219EA07F00918499 /* MetalStridedSlice.mm */, 1F501F1C2397BA4B004E8721 /* MetalTanH.hpp */, 488873FB215B639D0079B12E /* MetalTanH.metal */, 488873CF215B639D0079B12E /* MetalTanH.mm */, 1F501F0D2397BA4A004E8721 /* MetalTensorConverter.hpp */, CE96FE5F21707D57004AB400 /* MetalTensorConverter.mm */, 1F501F1D2397BA4B004E8721 /* MetalTFQuantizedConv2D.hpp */, + 92A8D70221A406A8009C2201 /* MetalTFQuantizedConv2D.metal */, + 92A8D6FE21A40695009C2201 /* MetalTFQuantizedConv2D.mm */, 1F501F172397BA4B004E8721 /* MetalTile.hpp */, + 92682C4F218172A300B52B9D /* MetalTile.metal */, + 92682C4B2181729200B52B9D /* MetalTile.mm */, 1F501F102397BA4A004E8721 /* MetalTranspose.hpp */, + 924F131B21A81C80006D46A4 /* MetalTranspose.metal */, + 924F131721A81C74006D46A4 /* MetalTranspose.mm */, 1F501EFF2397BA49004E8721 /* MetalUnary.hpp */, CE96FE6921707D58004AB400 /* MetalUnary.metal */, CE96FE6621707D57004AB400 /* MetalUnary.mm */, @@ -2073,6 +2194,7 @@ 9200045021EDBCEC00BCE892 /* Tests */ = { isa = PBXGroup; children = ( + 4829A2CA23CC26AD00623BF5 /* expr */, 9200045C21EDBDF600BCE892 /* core */, 9200045E21EDBDF600BCE892 /* cv */, 925F018721FF1DF400E648A1 /* model */, @@ -2475,10 +2597,6 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( - 1FD9536D23A89CA200888FC3 /* (null) in Headers */, - 1FD9566323A89D8A00888FC3 /* (null) in Headers */, - 1FD9534223A89CA100888FC3 /* (null) in Headers */, - 1FD9549323A89D1300888FC3 /* (null) in Headers */, 1F501F812397BA5B004E8721 /* AutoTime.hpp in Headers */, 92FF04A523AA0BFB00AC97F6 /* AutoStorage.h in Headers */, 92FF02AF23AA0B5A00AC97F6 /* CPUConcat.hpp in Headers */, @@ -2500,10 +2618,6 @@ 92FF026E23AA0B5A00AC97F6 /* CPUQuantizationUtils.hpp in Headers */, 92FF03AA23AA0B5A00AC97F6 /* ConvolutionFloatFactory.h in Headers */, 1F501F862397BA5B004E8721 /* Rect.h in Headers */, - 1FD9566723A89D8A00888FC3 /* (null) in Headers */, - 1FD9553F23A89D4F00888FC3 /* (null) in Headers */, - 1FD953D723A89CD100888FC3 /* (null) in Headers */, - 1FD9533C23A89CA100888FC3 /* (null) in Headers */, 1F501F8B2397BA5B004E8721 /* MNNSharedContext.h in Headers */, 92FF029623AA0B5A00AC97F6 /* CPUCast.hpp in Headers */, 92FF038923AA0B5A00AC97F6 /* CPUSigmoid.hpp in Headers */, @@ -2514,9 +2628,6 @@ 92FF038423AA0B5A00AC97F6 /* CPUBatchMatMul.hpp in Headers */, 92FF027323AA0B5A00AC97F6 /* CPUPoolInt8.hpp in Headers */, 1F501F802397BA5B004E8721 /* MNNDefine.h in Headers */, - 1FD9535B23A89CA200888FC3 /* (null) in Headers */, - 1FD952CF23A89CA100888FC3 /* (null) in Headers */, - 1FD956A623A89D8A00888FC3 /* (null) in Headers */, 1F501F7F2397BA5B004E8721 /* HalideRuntime.h in Headers */, 92FF025823AA0B5A00AC97F6 /* CPUSqueeze.hpp in Headers */, 92FF029E23AA0B5A00AC97F6 /* CPUDeconvolutionDepthwise.hpp in Headers */, @@ -2872,7 +2983,7 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - 924F131C21A81C80006D46A4 /* (null) in Sources */, + 924F131C21A81C80006D46A4 /* MetalTranspose.metal in Sources */, 92FF04BD23AA0BFB00AC97F6 /* Execution.cpp in Sources */, 92FF030A23AA0B5A00AC97F6 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S in Sources */, 92FF03B023AA0B5A00AC97F6 /* ConvolutionGroup.cpp in Sources */, @@ -2882,6 +2993,7 @@ 488875A9215B639F0079B12E /* MNNMetalContext.mm in Sources */, 92FF037B23AA0B5A00AC97F6 /* CPUQuantizedConcat.cpp in Sources */, 4888759B215B639F0079B12E /* MetalSpatialProduct.mm in Sources */, + 92682C5321819BF100B52B9D /* MetalSeLU.mm in Sources */, 923B7F9521A680A1002AFCE0 /* MetalGatherV2.metal in Sources */, 92FF02D423AA0B5A00AC97F6 /* MNNScaleBias2FloatC4.S in Sources */, 92FF032C23AA0B5A00AC97F6 /* MNNWinogradMatrixProductRight.S in Sources */, @@ -2894,6 +3006,7 @@ 92FF02C223AA0B5A00AC97F6 /* MNNLoadU8AndSum.S in Sources */, 92FF039823AA0B5A00AC97F6 /* CPUThreshold.cpp in Sources */, 92FF02E323AA0B5A00AC97F6 /* MNNExpC8.S in Sources */, + 9260B27821A7C5EA00D48C97 /* MetalQuantizedAvgPool.mm in Sources */, 92FF044D23AA0B7100AC97F6 /* ShapeConst.cpp in Sources */, 92FF030223AA0B5A00AC97F6 /* MNNQuanToDestUint8.S in Sources */, 92FF037323AA0B5A00AC97F6 /* CPUEltwiseInt8.cpp in Sources */, @@ -2924,10 +3037,12 @@ 92EAC19C21CB3CE20056F4C2 /* MetalCast.metal in Sources */, 92FF02F623AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWise.S in Sources */, 92FF042323AA0B7100AC97F6 /* ShapeScatterNd.cpp in Sources */, + 925A89182223961F00D22428 /* MetalConvolution1x1.metal in Sources */, 488875A8215B639F0079B12E /* MetalNormalize.mm in Sources */, 92FF045A23AA0B7100AC97F6 /* ShapeBinaryOp.cpp in Sources */, 92FF02E523AA0B5A00AC97F6 /* MNNConvDwF23SourceTransUnit.S in Sources */, 92FF02DA23AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWiseInt8.S in Sources */, + 9260B27221A7C5CD00D48C97 /* MetalQuantizedMaxPool.mm in Sources */, 92FF033623AA0B5A00AC97F6 /* MNNConvRunForUnitDepthWise.S in Sources */, 92FF029C23AA0B5A00AC97F6 /* CPUPack.cpp in Sources */, 92FF043523AA0B7100AC97F6 /* ShapeConvolution3D.cpp in Sources */, @@ -2937,6 +3052,7 @@ 92FF02FD23AA0B5A00AC97F6 /* MNNScaleAddInt8.S in Sources */, 92FF04A723AA0BFB00AC97F6 /* BackendRegister.cpp in Sources */, 92FF02DF23AA0B5A00AC97F6 /* MNNBilinearProcC1.S in Sources */, + 925E87E0220447900000192E /* MetalConvolutionWinograd.metal in Sources */, 92FF035123AA0B5A00AC97F6 /* CPUCrop.cpp in Sources */, 92FF031523AA0B5A00AC97F6 /* MNNScaleBias2FloatC4.S in Sources */, 488875D9215B639F0079B12E /* MetalSlice.metal in Sources */, @@ -2948,21 +3064,25 @@ 92FF045223AA0B7100AC97F6 /* ShapeResize.cpp in Sources */, 9243106C2239FE0B0016DA25 /* MetalSize.mm in Sources */, 92256947219D698100F251E2 /* MetalRank.mm in Sources */, + 92921A86219C24CD00B063D1 /* MetalPack.mm in Sources */, 92FF034023AA0B5A00AC97F6 /* CPUShape.cpp in Sources */, 92FF02B023AA0B5A00AC97F6 /* CPUDequantize.cpp in Sources */, 92FF04C223AA0BFB00AC97F6 /* Pipeline.cpp in Sources */, 92FF04C423AA0BFB00AC97F6 /* Session.cpp in Sources */, 488875C6215B639F0079B12E /* MetalPooling.mm in Sources */, 48A8A61321D101A700C2B9A7 /* ImageSampler.cpp in Sources */, + 9258013E2223B77C00555D43 /* MetalConvolutionDepthwise.mm in Sources */, 92FF02D123AA0B5A00AC97F6 /* MNNMaxFloat.S in Sources */, 92FF026923AA0B5A00AC97F6 /* CPUSelu.cpp in Sources */, 92FF03B123AA0B5A00AC97F6 /* ConvolutionFloatFactory.cpp in Sources */, 92FF027E23AA0B5A00AC97F6 /* CPUTranspose.cpp in Sources */, 488875C8215B639F0079B12E /* MetalScale.metal in Sources */, 92FF032123AA0B5A00AC97F6 /* MNNPowC8.S in Sources */, + 92A8D70021A40695009C2201 /* MetalTFQuantizedConv2D.mm in Sources */, 92FF033023AA0B5A00AC97F6 /* MNNCubicSampleC4.S in Sources */, 92FF03C323AA0B5A00AC97F6 /* CPUEltwise.cpp in Sources */, 92FF02F223AA0B5A00AC97F6 /* MNNBlitC3ToFloatRGBA.S in Sources */, + 92C674F922549A1600011D33 /* MetalReLU6.mm in Sources */, 488875D3215B639F0079B12E /* MetalSpatialProduct.metal in Sources */, CE96FE8121707D58004AB400 /* MetalMatMul.metal in Sources */, 92FF030323AA0B5A00AC97F6 /* MNNLoadU8AndSum.S in Sources */, @@ -2976,7 +3096,7 @@ 92FF042923AA0B7100AC97F6 /* ShapeLinSpace.cpp in Sources */, 92FF03A723AA0B5A00AC97F6 /* ConvolutionIntFactory.cpp in Sources */, 92FF027523AA0B5A00AC97F6 /* CPUConvolution.cpp in Sources */, - 924F132521ABD47F006D46A4 /* (null) in Sources */, + 924F132521ABD47F006D46A4 /* MetalQuantizedSoftmax.metal in Sources */, 92FF043B23AA0B7100AC97F6 /* ShapeDetectionPostProcess.cpp in Sources */, 92FF037523AA0B5A00AC97F6 /* CPUUnpack.cpp in Sources */, 92FF03A023AA0B5A00AC97F6 /* ConvolutionWinograd.cpp in Sources */, @@ -2997,6 +3117,7 @@ 92FF039C23AA0B5A00AC97F6 /* Convolution3D3x3.cpp in Sources */, 92FF028523AA0B5A00AC97F6 /* CPUBroadcastTo.cpp in Sources */, 923B7F9221A68091002AFCE0 /* MetalGatherV2.mm in Sources */, + 92C674FC22549A2500011D33 /* MetalReLU6.metal in Sources */, 92FF043423AA0B7100AC97F6 /* ShapeStridedSlice.cpp in Sources */, 92FF02EB23AA0B5A00AC97F6 /* MNNGemmFloatOne_4.S in Sources */, 488875BB215B639F0079B12E /* MetalSoftmax.metal in Sources */, @@ -3010,10 +3131,13 @@ 92FF032E23AA0B5A00AC97F6 /* MNNReluWithSlopeChannel.S in Sources */, 92FF034E23AA0B5A00AC97F6 /* CPUDepthToSpace.cpp in Sources */, 92FF044823AA0B7100AC97F6 /* ShapeGather.cpp in Sources */, + 9257597C219EA08400918499 /* MetalStridedSlice.metal in Sources */, 48C054882201996200E91945 /* MetalConvolutionWinograd.mm in Sources */, 488875DA215B639F0079B12E /* MetalResize.metal in Sources */, + 925A8915222395ED00D22428 /* MetalConvolution1x1.mm in Sources */, 92FF032323AA0B5A00AC97F6 /* MNNExpC8.S in Sources */, 488875D7215B639F0079B12E /* MetalBackend.mm in Sources */, + 92A8D70821A54087009C2201 /* MetalDefine.metal in Sources */, 92FF044C23AA0B7100AC97F6 /* ShapePool3D.cpp in Sources */, 92FF028B23AA0B5A00AC97F6 /* CPUBatchToSpaceND.cpp in Sources */, 92FF029823AA0B5A00AC97F6 /* CPUTFQuantizedConv2D.cpp in Sources */, @@ -3031,6 +3155,7 @@ 92FF02C323AA0B5A00AC97F6 /* MNNCubicLineC4.S in Sources */, 92351C8A21992AC6002CA341 /* MetalQuantizedAdd.metal in Sources */, 92FF02B323AA0B5A00AC97F6 /* CPUInstanceNorm.cpp in Sources */, + 92965EDE2175B3C300B86ABE /* MetalConcat.metal in Sources */, 9223E10F21D327F40067544A /* MetalSqueeze.mm in Sources */, 488875AB215B639F0079B12E /* MetalLSTM.metal in Sources */, 92FF042223AA0B7100AC97F6 /* ShapeConcat.cpp in Sources */, @@ -3050,12 +3175,14 @@ 92FF043323AA0B7100AC97F6 /* ShapeCrop.cpp in Sources */, 92EEFF302180159600F89377 /* MetalReduction.mm in Sources */, 92FF02C423AA0B5A00AC97F6 /* MNNAddBiasRelu6.S in Sources */, + 92A8D70321A406A8009C2201 /* MetalTFQuantizedConv2D.metal in Sources */, 92FF02B523AA0B5A00AC97F6 /* CPUTopKV2.cpp in Sources */, 92FF038323AA0B5A00AC97F6 /* CPUSoftmax.cpp in Sources */, 92FF038123AA0B5A00AC97F6 /* CPUNormalize.cpp in Sources */, 92FF032923AA0B5A00AC97F6 /* MNNDepthWiseInt8AddBiasScaleUnit.S in Sources */, 92FF02BD23AA0B5A00AC97F6 /* MNNMatrixProd.S in Sources */, 92FF032B23AA0B5A00AC97F6 /* MNNGemmFloatOne_4.S in Sources */, + 925801412223B79600555D43 /* MetalConvolutionDepthwise.metal in Sources */, 92FF02A223AA0B5A00AC97F6 /* CPUSize.cpp in Sources */, 92FF02EE23AA0B5A00AC97F6 /* MNNReluWithSlopeChannel.S in Sources */, 92FF036A23AA0B5A00AC97F6 /* CPURNNSequenceGRU.cpp in Sources */, @@ -3119,7 +3246,7 @@ 488875B0215B639F0079B12E /* MetalEltwise.mm in Sources */, 92FF029A23AA0B5A00AC97F6 /* CPUQuantizedMaxPool.cpp in Sources */, 92FF02D923AA0B5A00AC97F6 /* MNNGemmInt8toFloat32_8x4_Unit.S in Sources */, - 924F132721ABEA28006D46A4 /* (null) in Sources */, + 924F132721ABEA28006D46A4 /* MetalFixedPoint.metal in Sources */, 488875B2215B639F0079B12E /* MetalBackend.metal in Sources */, 92FF042423AA0B7100AC97F6 /* ShapeROIPooling.cpp in Sources */, 92FF033123AA0B5A00AC97F6 /* MNNCoefLine.S in Sources */, @@ -3134,15 +3261,17 @@ 92FF03BE23AA0B5A00AC97F6 /* DeconvolutionWithStride.cpp in Sources */, 92FF044923AA0B7100AC97F6 /* ShapeGatherND.cpp in Sources */, 92FF02E123AA0B5A00AC97F6 /* MNNPowC8.S in Sources */, + 92682C4D2181729200B52B9D /* MetalTile.mm in Sources */, 92FF02B123AA0B5A00AC97F6 /* CPUBackend.cpp in Sources */, 92FF02C823AA0B5A00AC97F6 /* MNNNV21ToBGRUnit.S in Sources */, 92FF03CB23AA0B5A00AC97F6 /* CPUGatherV2.cpp in Sources */, 92FF045C23AA0B7100AC97F6 /* ShapeBroadcastTo.cpp in Sources */, 92FF02AE23AA0B5A00AC97F6 /* CPUProposal.cpp in Sources */, 920004D921EDC30E00BCE892 /* MetalDequantize.metal in Sources */, - 924F132221ABD470006D46A4 /* (null) in Sources */, + 924F132221ABD470006D46A4 /* MetalQuantizedSoftmax.mm in Sources */, 92FF042723AA0B7100AC97F6 /* ShapeMatMul.cpp in Sources */, 92FF042823AA0B7100AC97F6 /* ShapeInterp.cpp in Sources */, + 9260B27B21A7C5FC00D48C97 /* MetalQuantizedAvgPool.metal in Sources */, 92FF02D623AA0B5A00AC97F6 /* MNNConvRunForLineDepthWiseInt8.S in Sources */, CE96FE7821707D58004AB400 /* MetalUnary.mm in Sources */, 92FF04BA23AA0BFB00AC97F6 /* WrapExecution.cpp in Sources */, @@ -3180,12 +3309,15 @@ 92FF030F23AA0B5A00AC97F6 /* MNNPackC4.S in Sources */, 92EAC19921CB3CD60056F4C2 /* MetalCast.mm in Sources */, 92FF031D23AA0B5A00AC97F6 /* MNNConvRunForLineDepthWiseUint8.S in Sources */, + 92575979219EA07F00918499 /* MetalStridedSlice.mm in Sources */, 92FF030123AA0B5A00AC97F6 /* MNNAddC4WithStride.S in Sources */, 92FF02E223AA0B5A00AC97F6 /* MNNMatrixAdd.S in Sources */, + 92921A89219C272B00B063D1 /* MetalPack.metal in Sources */, 92FF038223AA0B5A00AC97F6 /* CPUSetDiff1D.cpp in Sources */, 92FF030423AA0B5A00AC97F6 /* MNNCubicLineC4.S in Sources */, 92FF029523AA0B5A00AC97F6 /* CPUBatchMatMul.cpp in Sources */, 92FF031B23AA0B5A00AC97F6 /* MNNScaleAndAddBias.S in Sources */, + 925A89122223951200D22428 /* MetalConvolutionActivation.metal in Sources */, 92FF02AD23AA0B5A00AC97F6 /* CPUConvInt8.cpp in Sources */, 92FF042123AA0B7100AC97F6 /* ShapeDeconvolution.cpp in Sources */, 92369E64222544FE009D3A05 /* MetalConvolutionGEMM.metal in Sources */, @@ -3217,6 +3349,7 @@ 92FF033523AA0B5A00AC97F6 /* MNNInt8ScaleToFloat.S in Sources */, 923B7F8921A653AB002AFCE0 /* MetalGather.mm in Sources */, 488875DD215B639F0079B12E /* MetalPermute.mm in Sources */, + 9260B27521A7C5DC00D48C97 /* MetalQuantizedMaxPool.metal in Sources */, 92FF02CC23AA0B5A00AC97F6 /* MNNGemmFloatCommon_4.S in Sources */, 92369E62222544DE009D3A05 /* MetalConvolutionGEMM.mm in Sources */, 92FF026F23AA0B5A00AC97F6 /* CPUInt8ToFloat.cpp in Sources */, @@ -3246,17 +3379,21 @@ 92FF02A323AA0B5A00AC97F6 /* CPUQuantizedLogistic.cpp in Sources */, 9225694A219D698900F251E2 /* MetalRank.metal in Sources */, 92FF032F23AA0B5A00AC97F6 /* MNNAddBias.S in Sources */, + 92682C622181A2F900B52B9D /* MetalFill.metal in Sources */, 48887728215B639F0079B12E /* WingoradGenerater.cpp in Sources */, 92FF045423AA0B7100AC97F6 /* ShapeRNNSequenceGRU.cpp in Sources */, + 92682C5621819BFA00B52B9D /* MetalSeLU.metal in Sources */, 92FF02AA23AA0B5A00AC97F6 /* CPUSpaceToDepth.cpp in Sources */, 92FF02FF23AA0B5A00AC97F6 /* MNNFloat2Int8.S in Sources */, AE7BE4BD22855665002CEEA6 /* MetalOPRegister.mm in Sources */, 92FF033423AA0B5A00AC97F6 /* MNNUInt8ToInt16WithOffsetC4Common.S in Sources */, + 92682C5F2181A2EF00B52B9D /* MetalFill.mm in Sources */, 92FF036B23AA0B5A00AC97F6 /* CPUResize.cpp in Sources */, 92FF02C723AA0B5A00AC97F6 /* MNNCopyC4WithStride.S in Sources */, CE96FE7F21707D58004AB400 /* MetalSigmoid.metal in Sources */, 488875B8215B639F0079B12E /* MetalConcat.mm in Sources */, 92FF030923AA0B5A00AC97F6 /* MNNNV21ToBGRUnit.S in Sources */, + 92682C50218172A300B52B9D /* MetalTile.metal in Sources */, 92FF032623AA0B5A00AC97F6 /* MNNWinogradMatrixProductLeft.S in Sources */, 92FF04C023AA0BFB00AC97F6 /* Tensor.cpp in Sources */, 92FF045D23AA0B7100AC97F6 /* ShapeCast.cpp in Sources */, @@ -3285,6 +3422,7 @@ 92FF032023AA0B5A00AC97F6 /* MNNMatrixSub.S in Sources */, 92FF036323AA0B5A00AC97F6 /* CPUScale.cpp in Sources */, 92FF02FE23AA0B5A00AC97F6 /* MNNMatrixProd.S in Sources */, + 925801442223B8D100555D43 /* MetalConvolutionCommon.mm in Sources */, 92FF026723AA0B5A00AC97F6 /* CPUReduceJoin.cpp in Sources */, 92FF039B23AA0B5A00AC97F6 /* CommonOptFunction.cpp in Sources */, 92FF02BC23AA0B5A00AC97F6 /* MNNScaleAddInt8.S in Sources */, @@ -3292,7 +3430,7 @@ 92FF026323AA0B5A00AC97F6 /* CPUFloatToInt8.cpp in Sources */, 4888759D215B639F0079B12E /* MetalLRN.metal in Sources */, 488875A1215B639F0079B12E /* MetalTanH.mm in Sources */, - 924F131921A81C74006D46A4 /* (null) in Sources */, + 924F131921A81C74006D46A4 /* MetalTranspose.mm in Sources */, 92FF035423AA0B5A00AC97F6 /* CPUSelect.cpp in Sources */, 92FF02C923AA0B5A00AC97F6 /* MNNLineDepthWiseInt8AddBiasScaleUnit.S in Sources */, 92FF032823AA0B5A00AC97F6 /* MNNSamplerC1BilinearOpt.S in Sources */, @@ -3323,10 +3461,13 @@ 92A4E0FC21F05A4F000B0919 /* MemoryUtilsTest.cpp in Sources */, 920004B521EDBDF600BCE892 /* BinaryOPTest.cpp in Sources */, 92D765BD222819EF00178BE5 /* DirectedAcyclicGraphTest.cpp in Sources */, + 4829A2D623CC26AE00623BF5 /* MatMulTest.cpp in Sources */, 920004D221EDBE1100BCE892 /* MNNTestSuite.cpp in Sources */, 920004BE21EDBDF600BCE892 /* CastTest.cpp in Sources */, 920004AB21EDBDF600BCE892 /* InterpTest.cpp in Sources */, 920004C421EDBDF600BCE892 /* ConcatTest.cpp in Sources */, + 4829A2DB23CC26AE00623BF5 /* MultiThreadLoad.cpp in Sources */, + 4829A2DA23CC26AE00623BF5 /* AllAnyTest.cpp in Sources */, 920004AA21EDBDF600BCE892 /* GatherV2Test.cpp in Sources */, 920004B621EDBDF600BCE892 /* BatchToSpaceNDTest.cpp in Sources */, 920004BB21EDBDF600BCE892 /* ScaleTest.cpp in Sources */, @@ -3338,7 +3479,9 @@ 920004B921EDBDF600BCE892 /* NormalizeTest.cpp in Sources */, 920004A421EDBDF600BCE892 /* MatMulTest.cpp in Sources */, 920004C721EDBDF600BCE892 /* CropTest.cpp in Sources */, + 4829A2D823CC26AE00623BF5 /* MatrixBandTest.cpp in Sources */, 920004BD21EDBDF600BCE892 /* PermuteTest.cpp in Sources */, + 4829A2DD23CC26AE00623BF5 /* ExprResizeTest.cpp in Sources */, 92D765BC222819EF00178BE5 /* ScheduleTest.cpp in Sources */, 9273AB5D1FE7BE4D00477B22 /* main.m in Sources */, 920004A121EDBDF600BCE892 /* LSTMTest.cpp in Sources */, @@ -3348,6 +3491,8 @@ 925702D021EF0F5300A2A3CA /* TensorUtilsTest.cpp in Sources */, 920004A621EDBDF600BCE892 /* LRNTest.cpp in Sources */, 920004B021EDBDF600BCE892 /* TensorConverterTest.cpp in Sources */, + 4829A2E023CC26AE00623BF5 /* PaddingTest.cpp in Sources */, + 4829A2DE23CC26AE00623BF5 /* ReverseSequenceTest.cpp in Sources */, 920004AF21EDBDF600BCE892 /* DequantizeTest.cpp in Sources */, 920004CC21EDBDF600BCE892 /* RangeTest.cpp in Sources */, 920004BF21EDBDF600BCE892 /* ResizeTest.cpp in Sources */, @@ -3370,17 +3515,21 @@ 920004B321EDBDF600BCE892 /* ReLUTest.cpp in Sources */, 9200049D21EDBDF600BCE892 /* TFQuantizedConv2DTest.cpp in Sources */, 920004D321EDBE1100BCE892 /* TestUtils.cpp in Sources */, + 4829A2DF23CC26AE00623BF5 /* ReplaceTest.cpp in Sources */, 920004A721EDBDF600BCE892 /* RankTest.cpp in Sources */, 920004CB21EDBDF600BCE892 /* SpaceToBatchNDTest.cpp in Sources */, 920004B421EDBDF600BCE892 /* DeconvolutionTest.cpp in Sources */, 920004C821EDBDF600BCE892 /* SliceTFTest.cpp in Sources */, + 4829A2D923CC26AE00623BF5 /* ExtraTest.cpp in Sources */, 920004D421EDBE1100BCE892 /* TestUtils.mm in Sources */, 9200049B21EDBDF600BCE892 /* MatrixTest.cpp in Sources */, 920004B121EDBDF600BCE892 /* SpatialProductTest.cpp in Sources */, 92D765BB222819EF00178BE5 /* BackendTest.cpp in Sources */, 9200049921EDBDF600BCE892 /* TensorTest.cpp in Sources */, + 4829A2DC23CC26AE00623BF5 /* ConvInt8Test.cpp in Sources */, 920004BA21EDBDF600BCE892 /* StridedSliceTest.cpp in Sources */, 920004D121EDBDF600BCE892 /* CropAndResizeTest.cpp in Sources */, + 4829A2D723CC26AE00623BF5 /* GatherTest.cpp in Sources */, EBAFCE672231133F000D4EF4 /* QuantizedAddTest.cpp in Sources */, 9273AB4F1FE7BE4D00477B22 /* AppDelegate.mm in Sources */, 92C674FF22549C9900011D33 /* ReLU6Test.cpp in Sources */, @@ -3642,7 +3791,7 @@ IPHONEOS_DEPLOYMENT_TARGET = 9.0; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)"; - PRODUCT_BUNDLE_IDENTIFIER = com.cat.MNN.playground6w; + PRODUCT_BUNDLE_IDENTIFIER = com.cat.MNN.playgroundv45; PRODUCT_NAME = "$(TARGET_NAME)"; TARGETED_DEVICE_FAMILY = "1,2"; }; @@ -3665,7 +3814,7 @@ IPHONEOS_DEPLOYMENT_TARGET = 9.0; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks"; OTHER_CPLUSPLUSFLAGS = "$(OTHER_CFLAGS)"; - PRODUCT_BUNDLE_IDENTIFIER = com.cat.MNN.playground6w; + PRODUCT_BUNDLE_IDENTIFIER = com.cat.MNN.playgroundv45; PRODUCT_NAME = "$(TARGET_NAME)"; TARGETED_DEVICE_FAMILY = "1,2"; }; diff --git a/pymnn/pip_package/MNNTools/MNN_FB/BinaryOpOperation.py b/pymnn/pip_package/MNNTools/MNN_FB/BinaryOpOperation.py index f7fb3730..41463677 100644 --- a/pymnn/pip_package/MNNTools/MNN_FB/BinaryOpOperation.py +++ b/pymnn/pip_package/MNNTools/MNN_FB/BinaryOpOperation.py @@ -21,4 +21,8 @@ class BinaryOpOperation(object): EQUAL = 15 LESS_EQUAL = 16 FLOORMOD = 17 + MOD = 19 + ATAN2 = 20 + LOGICALOR = 21 + NOTEQUAL = 22 diff --git a/pymnn/pip_package/MNNTools/MNN_FB/DetectionPostProcessParam.py b/pymnn/pip_package/MNNTools/MNN_FB/DetectionPostProcessParam.py new file mode 100644 index 00000000..a9cb3900 --- /dev/null +++ b/pymnn/pip_package/MNNTools/MNN_FB/DetectionPostProcessParam.py @@ -0,0 +1,102 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: MNN + +import flatbuffers + +class DetectionPostProcessParam(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsDetectionPostProcessParam(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DetectionPostProcessParam() + x.Init(buf, n + offset) + return x + + # DetectionPostProcessParam + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # DetectionPostProcessParam + def MaxDetections(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DetectionPostProcessParam + def MaxClassesPerDetection(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DetectionPostProcessParam + def DetectionsPerClass(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DetectionPostProcessParam + def NmsScoreThreshold(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # DetectionPostProcessParam + def IouThreshold(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # DetectionPostProcessParam + def NumClasses(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DetectionPostProcessParam + def UseRegularNMS(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # DetectionPostProcessParam + def CenterSizeEncoding(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # DetectionPostProcessParam + def CenterSizeEncodingAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) + return 0 + + # DetectionPostProcessParam + def CenterSizeEncodingLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + +def DetectionPostProcessParamStart(builder): builder.StartObject(8) +def DetectionPostProcessParamAddMaxDetections(builder, maxDetections): builder.PrependInt32Slot(0, maxDetections, 0) +def DetectionPostProcessParamAddMaxClassesPerDetection(builder, maxClassesPerDetection): builder.PrependInt32Slot(1, maxClassesPerDetection, 0) +def DetectionPostProcessParamAddDetectionsPerClass(builder, detectionsPerClass): builder.PrependInt32Slot(2, detectionsPerClass, 0) +def DetectionPostProcessParamAddNmsScoreThreshold(builder, nmsScoreThreshold): builder.PrependFloat32Slot(3, nmsScoreThreshold, 0.0) +def DetectionPostProcessParamAddIouThreshold(builder, iouThreshold): builder.PrependFloat32Slot(4, iouThreshold, 0.0) +def DetectionPostProcessParamAddNumClasses(builder, numClasses): builder.PrependInt32Slot(5, numClasses, 0) +def DetectionPostProcessParamAddUseRegularNMS(builder, useRegularNMS): builder.PrependBoolSlot(6, useRegularNMS, 0) +def DetectionPostProcessParamAddCenterSizeEncoding(builder, centerSizeEncoding): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(centerSizeEncoding), 0) +def DetectionPostProcessParamStartCenterSizeEncodingVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def DetectionPostProcessParamEnd(builder): return builder.EndObject() diff --git a/pymnn/pip_package/MNNTools/MNN_FB/Net.py b/pymnn/pip_package/MNNTools/MNN_FB/Net.py index 06294bc8..bf4fd63d 100644 --- a/pymnn/pip_package/MNNTools/MNN_FB/Net.py +++ b/pymnn/pip_package/MNNTools/MNN_FB/Net.py @@ -127,7 +127,14 @@ class Net(object): return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) return 0 -def NetStart(builder): builder.StartObject(9) + # Net + def Usage(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def NetStart(builder): builder.StartObject(10) def NetAddBizCode(builder, bizCode): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(bizCode), 0) def NetAddExtraTensorDescribe(builder, extraTensorDescribe): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(extraTensorDescribe), 0) def NetStartExtraTensorDescribeVector(builder, numElems): return builder.StartVector(4, numElems, 4) @@ -141,4 +148,5 @@ def NetAddSourceType(builder, sourceType): builder.PrependInt8Slot(6, sourceType def NetAddTensorName(builder, tensorName): builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(tensorName), 0) def NetStartTensorNameVector(builder, numElems): return builder.StartVector(4, numElems, 4) def NetAddTensorNumber(builder, tensorNumber): builder.PrependInt32Slot(8, tensorNumber, 0) +def NetAddUsage(builder, usage): builder.PrependInt8Slot(9, usage, 0) def NetEnd(builder): return builder.EndObject() diff --git a/pymnn/pip_package/MNNTools/MNN_FB/OneHotParam.py b/pymnn/pip_package/MNNTools/MNN_FB/OneHotParam.py new file mode 100644 index 00000000..0e8b374b --- /dev/null +++ b/pymnn/pip_package/MNNTools/MNN_FB/OneHotParam.py @@ -0,0 +1,38 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: MNN + +import flatbuffers + +class OneHotParam(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsOneHotParam(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = OneHotParam() + x.Init(buf, n + offset) + return x + + # OneHotParam + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # OneHotParam + def DType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # OneHotParam + def Axis(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return -1 + +def OneHotParamStart(builder): builder.StartObject(2) +def OneHotParamAddDType(builder, dType): builder.PrependInt32Slot(0, dType, 1) +def OneHotParamAddAxis(builder, axis): builder.PrependInt32Slot(1, axis, -1) +def OneHotParamEnd(builder): return builder.EndObject() diff --git a/pymnn/pip_package/MNNTools/MNN_FB/OpParameter.py b/pymnn/pip_package/MNNTools/MNN_FB/OpParameter.py index 3462111e..f4c3c4c5 100644 --- a/pymnn/pip_package/MNNTools/MNN_FB/OpParameter.py +++ b/pymnn/pip_package/MNNTools/MNN_FB/OpParameter.py @@ -85,4 +85,7 @@ class OpParameter(object): Pool3D = 79 Convolution3D = 80 ELU = 81 + DetectionPostProcessParam = 82 + OneHotParam = 83 + PadParam = 84 diff --git a/pymnn/pip_package/MNNTools/MNN_FB/OpType.py b/pymnn/pip_package/MNNTools/MNN_FB/OpType.py index 262dfa1e..c76cfbae 100644 --- a/pymnn/pip_package/MNNTools/MNN_FB/OpType.py +++ b/pymnn/pip_package/MNNTools/MNN_FB/OpType.py @@ -7,7 +7,7 @@ class OpType(object): QuantizedAdd = 1 ArgMax = 2 AsString = 3 - BatchNorm = 4 + InstanceNorm = 4 BatchToSpaceND = 5 Bias = 6 BinaryOp = 7 @@ -119,8 +119,16 @@ class OpType(object): Convolution3D = 113 MatrixBandPart = 114 GatherND = 115 + DetectionPostProcess = 116 + UnravelIndex = 117 + ScatterNd = 118 + OneHot = 119 + BroadcastTo = 120 + Dilation2D = 121 MaxLayerCount = 128 ConvertTensor = 129 + ArgMin = 130 + LinSpace = 131 PLUGIN = 256 Select = 257 ZerosLike = 258 @@ -131,6 +139,8 @@ class OpType(object): PoolGrad = 263 SoftmaxGrad = 264 Conv2DBackPropFilter = 265 + TrainableParam = 266 + BatchNorm = 267 Extra = 512 ConvInt8 = 513 Int8ToFloat = 514 diff --git a/pymnn/pip_package/MNNTools/MNN_FB/PadParam.py b/pymnn/pip_package/MNNTools/MNN_FB/PadParam.py new file mode 100644 index 00000000..8f6f1819 --- /dev/null +++ b/pymnn/pip_package/MNNTools/MNN_FB/PadParam.py @@ -0,0 +1,30 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: MNN + +import flatbuffers + +class PadParam(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsPadParam(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PadParam() + x.Init(buf, n + offset) + return x + + # PadParam + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # PadParam + def Mode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def PadParamStart(builder): builder.StartObject(1) +def PadParamAddMode(builder, mode): builder.PrependInt8Slot(0, mode, 0) +def PadParamEnd(builder): return builder.EndObject() diff --git a/pymnn/pip_package/MNNTools/MNN_FB/PadValueMode.py b/pymnn/pip_package/MNNTools/MNN_FB/PadValueMode.py new file mode 100644 index 00000000..d7b24f13 --- /dev/null +++ b/pymnn/pip_package/MNNTools/MNN_FB/PadValueMode.py @@ -0,0 +1,9 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: MNN + +class PadValueMode(object): + CONSTANT = 0 + REFLECT = 1 + SYMMETRIC = 2 + diff --git a/pymnn/pip_package/MNNTools/MNN_FB/UnaryOpOperation.py b/pymnn/pip_package/MNNTools/MNN_FB/UnaryOpOperation.py index 437a3f6a..74747175 100644 --- a/pymnn/pip_package/MNNTools/MNN_FB/UnaryOpOperation.py +++ b/pymnn/pip_package/MNNTools/MNN_FB/UnaryOpOperation.py @@ -20,4 +20,16 @@ class UnaryOpOperation(object): ATAN = 14 RECIPROCAL = 15 LOG1P = 16 + BNLL = 17 + ACOSH = 18 + SINH = 19 + ASINH = 20 + ATANH = 21 + SIGN = 22 + ROUND = 23 + COSH = 24 + ERF = 25 + ERFC = 26 + ERFINV = 27 + EXPM1 = 28 diff --git a/pymnn/pip_package/MNNTools/MNN_FB/Usage.py b/pymnn/pip_package/MNNTools/MNN_FB/Usage.py new file mode 100644 index 00000000..9355ccbb --- /dev/null +++ b/pymnn/pip_package/MNNTools/MNN_FB/Usage.py @@ -0,0 +1,8 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: MNN + +class Usage(object): + INFERENCE = 0 + TRAIN = 1 + diff --git a/pymnn/pip_package/build_deps.py b/pymnn/pip_package/build_deps.py index 4d353b44..95fc93eb 100644 --- a/pymnn/pip_package/build_deps.py +++ b/pymnn/pip_package/build_deps.py @@ -19,9 +19,12 @@ def build_deps(): os.chdir(cmake_build_dir) if IS_WINDOWS: os.system('cmake -G "Ninja" -DMNN_BUILD_QUANTOOLS=ON -DMNN_BUILD_CONVERTER=on\ - -DMNN_BUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release .. && ninja') + -DMNN_BUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release\ + -DMNN_AAPL_FMWK=OFF -DMNN_SEP_BUILD=OFF -DMNN_SCHEMA_SUFFIX=default .. && ninja') else: - os.system('cmake -DMNN_BUILD_QUANTOOLS=ON -DMNN_BUILD_CONVERTER=on -DMNN_BUILD_SHARED_LIBS=OFF .. && make -j4') + os.system('cmake -DMNN_BUILD_QUANTOOLS=ON -DMNN_BUILD_CONVERTER=on\ + -DMNN_BUILD_SHARED_LIBS=OFF -DMNN_AAPL_FMWK=OFF -DMNN_SEP_BUILD=OFF\ + -DMNN_SCHEMA_SUFFIX=default .. && make -j4') ################################################################################ # Building dependent libraries ################################################################################ diff --git a/pymnn/pip_package/setup.py b/pymnn/pip_package/setup.py index 153c55bb..6c1c2019 100644 --- a/pymnn/pip_package/setup.py +++ b/pymnn/pip_package/setup.py @@ -95,20 +95,7 @@ def configure_extension_build(): tools_compile_args = [] tools_libraries = [] tools_library_dirs = [os.path.join(root_dir, BUILD_DIR)] - tools_library_dirs += [os.path.join(root_dir, BUILD_DIR, "express")] tools_library_dirs += [os.path.join(root_dir, BUILD_DIR, "tools", "converter")] - tools_library_dirs += [os.path.join(root_dir, BUILD_DIR, "tools", "converter",\ - "source", "tflite")] - tools_library_dirs += [os.path.join(root_dir, BUILD_DIR, "tools", "converter",\ - "source", "onnx")] - tools_library_dirs += [os.path.join(root_dir, BUILD_DIR, "tools", "converter",\ - "source", "optimizer")] - tools_library_dirs += [os.path.join(root_dir, BUILD_DIR, "tools", "converter",\ - "source", "MNN")] - tools_library_dirs += [os.path.join(root_dir, BUILD_DIR, "tools", "converter",\ - "source", "caffe")] - tools_library_dirs += [os.path.join(root_dir, BUILD_DIR, "tools", "converter",\ - "source", "tensorflow")] tools_link_args = [] tools_sources = [os.path.join(root_dir, "pymnn", "src", "MNNTools.cc")] tools_sources += [os.path.join(root_dir, "tools", "quantization",\ @@ -135,8 +122,7 @@ def configure_extension_build(): tools_include_dirs += [os.path.join(root_dir, "3rd_party", "imageHelper")] tools_include_dirs += [os.path.join(root_dir, "source", "core")] tools_include_dirs += [os.path.join(root_dir, "schema", "current")] - #tools_depend = ['-lCOMMON_LIB', '-ltflite', '-lonnx', '-loptimizer',\ - # '-lMNN', '-lMNN_Express', '-lmnn_bizcode', '-lcaffe', '-ltensorflow'] + tools_include_dirs += [os.path.join(root_dir, "source")] tools_depend = ['-lMNN', '-lMNNConvertDeps'] engine_extra_link_args = [] tools_extra_link_args = [] diff --git a/pymnn/src/MNN.cc b/pymnn/src/MNN.cc index 850c4447..e74d7f93 100644 --- a/pymnn/src/MNN.cc +++ b/pymnn/src/MNN.cc @@ -1010,6 +1010,7 @@ static void PyMNNInterpreter_dealloc(PyMNNInterpreter *self) { delete self->interpreter; self->interpreter = NULL; } + delete self->modelPath; Py_TYPE(self)->tp_free((PyObject*)self); } diff --git a/pymnn/src/MNNTools.cc b/pymnn/src/MNNTools.cc index 63ccfa13..0056f790 100644 --- a/pymnn/src/MNNTools.cc +++ b/pymnn/src/MNNTools.cc @@ -36,6 +36,7 @@ static PyObject* PyTool_Converter(PyObject *self, PyObject *args) { modelPath.bizCode = std::string(""); modelPath.benchmarkModel = false; modelPath.saveHalfFloat = static_cast(PyLong_AsLong(fp16)); + modelPath.forTraining = false; if(prototxtFile){ modelPath.prototxtFile = std::string(prototxtFile); } @@ -57,7 +58,7 @@ static PyObject* PyTool_Converter(PyObject *self, PyObject *args) { if (modelPath.model != modelConfig::MNN) { std::cout << "Start to Optimize the MNN Net..." << std::endl; - std::unique_ptr newNet = optimizeNet(netT); + std::unique_ptr newNet = optimizeNet(netT, modelPath.forTraining); writeFb(newNet, modelPath.MNNModel, modelPath.benchmarkModel,modelPath.saveHalfFloat); } else { writeFb(netT, modelPath.MNNModel, modelPath.benchmarkModel,modelPath.saveHalfFloat); diff --git a/pymnn/src/util.h b/pymnn/src/util.h index 733d1507..01f9b420 100644 --- a/pymnn/src/util.h +++ b/pymnn/src/util.h @@ -17,6 +17,8 @@ inline std::string object2String(PyObject* obj) { Py_XDECREF(bytes); return s; } + //just to pass compile.It should never comes to here. + return std::string(""); } inline PyObject* char2Object(const char* str) { diff --git a/schema/current/BasicOptimizer_generated.h b/schema/current/BasicOptimizer_generated.h deleted file mode 100644 index 6bb12f7e..00000000 --- a/schema/current/BasicOptimizer_generated.h +++ /dev/null @@ -1,388 +0,0 @@ -// automatically generated by the FlatBuffers compiler, do not modify - - -#ifndef FLATBUFFERS_GENERATED_BASICOPTIMIZER_MNN_OPTIMIZER_H_ -#define FLATBUFFERS_GENERATED_BASICOPTIMIZER_MNN_OPTIMIZER_H_ - - -#include "CaffeOp_generated.h" -#include "GpuLibrary_generated.h" -#include "MNN_generated.h" -#include "TFQuantizeOp_generated.h" -#include "Tensor_generated.h" -#include "TensorflowOp_generated.h" -#include "Type_generated.h" -#include "UserDefine_generated.h" - -namespace MNN { -namespace Optimizer { - -struct BackendConfig; -struct BackendConfigT; - -struct Merge; -struct MergeT; - -inline const flatbuffers::TypeTable *BackendConfigTypeTable(); - -inline const flatbuffers::TypeTable *MergeTypeTable(); - -struct BackendConfigT : public flatbuffers::NativeTable { - typedef BackendConfig TableType; - int32_t memroy; - MNN::ForwardType type; - int32_t precision; - int32_t power; - int32_t numberThread; - BackendConfigT() - : memroy(0), - type(MNN::ForwardType_CPU), - precision(0), - power(0), - numberThread(1) { - } -}; - -struct BackendConfig FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BackendConfigT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return BackendConfigTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MEMROY = 4, - VT_TYPE = 6, - VT_PRECISION = 8, - VT_POWER = 10, - VT_NUMBERTHREAD = 12 - }; - int32_t memroy() const { - return GetField(VT_MEMROY, 0); - } - MNN::ForwardType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - int32_t precision() const { - return GetField(VT_PRECISION, 0); - } - int32_t power() const { - return GetField(VT_POWER, 0); - } - int32_t numberThread() const { - return GetField(VT_NUMBERTHREAD, 1); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_MEMROY) && - VerifyField(verifier, VT_TYPE) && - VerifyField(verifier, VT_PRECISION) && - VerifyField(verifier, VT_POWER) && - VerifyField(verifier, VT_NUMBERTHREAD) && - verifier.EndTable(); - } - BackendConfigT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BackendConfigT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BackendConfigT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BackendConfigBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_memroy(int32_t memroy) { - fbb_.AddElement(BackendConfig::VT_MEMROY, memroy, 0); - } - void add_type(MNN::ForwardType type) { - fbb_.AddElement(BackendConfig::VT_TYPE, static_cast(type), 0); - } - void add_precision(int32_t precision) { - fbb_.AddElement(BackendConfig::VT_PRECISION, precision, 0); - } - void add_power(int32_t power) { - fbb_.AddElement(BackendConfig::VT_POWER, power, 0); - } - void add_numberThread(int32_t numberThread) { - fbb_.AddElement(BackendConfig::VT_NUMBERTHREAD, numberThread, 1); - } - explicit BackendConfigBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BackendConfigBuilder &operator=(const BackendConfigBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBackendConfig( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t memroy = 0, - MNN::ForwardType type = MNN::ForwardType_CPU, - int32_t precision = 0, - int32_t power = 0, - int32_t numberThread = 1) { - BackendConfigBuilder builder_(_fbb); - builder_.add_numberThread(numberThread); - builder_.add_power(power); - builder_.add_precision(precision); - builder_.add_memroy(memroy); - builder_.add_type(type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBackendConfig(flatbuffers::FlatBufferBuilder &_fbb, const BackendConfigT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MergeT : public flatbuffers::NativeTable { - typedef Merge TableType; - std::vector outputIndexes; - std::vector inputIndexes; - int32_t tensorNumber; - std::unique_ptr backend; - std::vector> oplists; - MergeT() - : tensorNumber(0) { - } -}; - -struct Merge FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MergeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return MergeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTPUTINDEXES = 4, - VT_INPUTINDEXES = 6, - VT_TENSORNUMBER = 8, - VT_BACKEND = 10, - VT_OPLISTS = 12 - }; - const flatbuffers::Vector *outputIndexes() const { - return GetPointer *>(VT_OUTPUTINDEXES); - } - const flatbuffers::Vector *inputIndexes() const { - return GetPointer *>(VT_INPUTINDEXES); - } - int32_t tensorNumber() const { - return GetField(VT_TENSORNUMBER, 0); - } - const BackendConfig *backend() const { - return GetPointer(VT_BACKEND); - } - const flatbuffers::Vector> *oplists() const { - return GetPointer> *>(VT_OPLISTS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_OUTPUTINDEXES) && - verifier.VerifyVector(outputIndexes()) && - VerifyOffset(verifier, VT_INPUTINDEXES) && - verifier.VerifyVector(inputIndexes()) && - VerifyField(verifier, VT_TENSORNUMBER) && - VerifyOffset(verifier, VT_BACKEND) && - verifier.VerifyTable(backend()) && - VerifyOffset(verifier, VT_OPLISTS) && - verifier.VerifyVector(oplists()) && - verifier.VerifyVectorOfTables(oplists()) && - verifier.EndTable(); - } - MergeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MergeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MergeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MergeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_outputIndexes(flatbuffers::Offset> outputIndexes) { - fbb_.AddOffset(Merge::VT_OUTPUTINDEXES, outputIndexes); - } - void add_inputIndexes(flatbuffers::Offset> inputIndexes) { - fbb_.AddOffset(Merge::VT_INPUTINDEXES, inputIndexes); - } - void add_tensorNumber(int32_t tensorNumber) { - fbb_.AddElement(Merge::VT_TENSORNUMBER, tensorNumber, 0); - } - void add_backend(flatbuffers::Offset backend) { - fbb_.AddOffset(Merge::VT_BACKEND, backend); - } - void add_oplists(flatbuffers::Offset>> oplists) { - fbb_.AddOffset(Merge::VT_OPLISTS, oplists); - } - explicit MergeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MergeBuilder &operator=(const MergeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMerge( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> outputIndexes = 0, - flatbuffers::Offset> inputIndexes = 0, - int32_t tensorNumber = 0, - flatbuffers::Offset backend = 0, - flatbuffers::Offset>> oplists = 0) { - MergeBuilder builder_(_fbb); - builder_.add_oplists(oplists); - builder_.add_backend(backend); - builder_.add_tensorNumber(tensorNumber); - builder_.add_inputIndexes(inputIndexes); - builder_.add_outputIndexes(outputIndexes); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateMergeDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *outputIndexes = nullptr, - const std::vector *inputIndexes = nullptr, - int32_t tensorNumber = 0, - flatbuffers::Offset backend = 0, - const std::vector> *oplists = nullptr) { - auto outputIndexes__ = outputIndexes ? _fbb.CreateVector(*outputIndexes) : 0; - auto inputIndexes__ = inputIndexes ? _fbb.CreateVector(*inputIndexes) : 0; - auto oplists__ = oplists ? _fbb.CreateVector>(*oplists) : 0; - return MNN::Optimizer::CreateMerge( - _fbb, - outputIndexes__, - inputIndexes__, - tensorNumber, - backend, - oplists__); -} - -flatbuffers::Offset CreateMerge(flatbuffers::FlatBufferBuilder &_fbb, const MergeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -inline BackendConfigT *BackendConfig::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BackendConfigT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BackendConfig::UnPackTo(BackendConfigT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = memroy(); _o->memroy = _e; }; - { auto _e = type(); _o->type = _e; }; - { auto _e = precision(); _o->precision = _e; }; - { auto _e = power(); _o->power = _e; }; - { auto _e = numberThread(); _o->numberThread = _e; }; -} - -inline flatbuffers::Offset BackendConfig::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BackendConfigT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBackendConfig(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBackendConfig(flatbuffers::FlatBufferBuilder &_fbb, const BackendConfigT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BackendConfigT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _memroy = _o->memroy; - auto _type = _o->type; - auto _precision = _o->precision; - auto _power = _o->power; - auto _numberThread = _o->numberThread; - return MNN::Optimizer::CreateBackendConfig( - _fbb, - _memroy, - _type, - _precision, - _power, - _numberThread); -} - -inline MergeT *Merge::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MergeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Merge::UnPackTo(MergeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = outputIndexes(); if (_e) { _o->outputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputIndexes[_i] = _e->Get(_i); } } }; - { auto _e = inputIndexes(); if (_e) { _o->inputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputIndexes[_i] = _e->Get(_i); } } }; - { auto _e = tensorNumber(); _o->tensorNumber = _e; }; - { auto _e = backend(); if (_e) _o->backend = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = oplists(); if (_e) { _o->oplists.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->oplists[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; -} - -inline flatbuffers::Offset Merge::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MergeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMerge(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMerge(flatbuffers::FlatBufferBuilder &_fbb, const MergeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MergeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _outputIndexes = _o->outputIndexes.size() ? _fbb.CreateVector(_o->outputIndexes) : 0; - auto _inputIndexes = _o->inputIndexes.size() ? _fbb.CreateVector(_o->inputIndexes) : 0; - auto _tensorNumber = _o->tensorNumber; - auto _backend = _o->backend ? CreateBackendConfig(_fbb, _o->backend.get(), _rehasher) : 0; - auto _oplists = _o->oplists.size() ? _fbb.CreateVector> (_o->oplists.size(), [](size_t i, _VectorArgs *__va) { return CreateOp(*__va->__fbb, __va->__o->oplists[i].get(), __va->__rehasher); }, &_va ) : 0; - return MNN::Optimizer::CreateMerge( - _fbb, - _outputIndexes, - _inputIndexes, - _tensorNumber, - _backend, - _oplists); -} - -inline const flatbuffers::TypeTable *BackendConfigTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - MNN::ForwardTypeTypeTable - }; - static const char * const names[] = { - "memroy", - "type", - "precision", - "power", - "numberThread" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *MergeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 1, 1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - BackendConfigTypeTable, - MNN::OpTypeTable - }; - static const char * const names[] = { - "outputIndexes", - "inputIndexes", - "tensorNumber", - "backend", - "oplists" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -} // namespace Optimizer -} // namespace MNN - -#endif // FLATBUFFERS_GENERATED_BASICOPTIMIZER_MNN_OPTIMIZER_H_ diff --git a/schema/current/CaffeOp_generated.h b/schema/current/CaffeOp_generated.h deleted file mode 100644 index 218aa7d5..00000000 --- a/schema/current/CaffeOp_generated.h +++ /dev/null @@ -1,6178 +0,0 @@ -// automatically generated by the FlatBuffers compiler, do not modify - - -#ifndef FLATBUFFERS_GENERATED_CAFFEOP_MNN_H_ -#define FLATBUFFERS_GENERATED_CAFFEOP_MNN_H_ - - -#include "Tensor_generated.h" -#include "Type_generated.h" - -namespace MNN { - -struct Convolution2DCommon; -struct Convolution2DCommonT; - -struct Convolution3DCommon; -struct Convolution3DCommonT; - -struct IDSTQuan; -struct IDSTQuanT; - -struct QuantizedFloatParam; -struct QuantizedFloatParamT; - -struct Convolution2D; -struct Convolution2DT; - -struct Convolution3D; -struct Convolution3DT; - -struct InnerProduct; -struct InnerProductT; - -struct Pool; -struct PoolT; - -struct Pool3D; -struct Pool3DT; - -struct Relu; -struct ReluT; - -struct Relu6; -struct Relu6T; - -struct PRelu; -struct PReluT; - -struct ELU; -struct ELUT; - -struct LRN; -struct LRNT; - -struct ArgMax; -struct ArgMaxT; - -struct Axis; -struct AxisT; - -struct Input; -struct InputT; - -struct LSTM; -struct LSTMT; - -struct Slice; -struct SliceT; - -struct BatchNorm; -struct BatchNormT; - -struct Scale; -struct ScaleT; - -struct Eltwise; -struct EltwiseT; - -struct Flatten; -struct FlattenT; - -struct Permute; -struct PermuteT; - -struct Reshape; -struct ReshapeT; - -struct DetectionOutput; -struct DetectionOutputT; - -struct RoiPooling; -struct RoiPoolingT; - -struct Proposal; -struct ProposalT; - -struct Interp; -struct InterpT; - -struct Resize; -struct ResizeT; - -struct PriorBox; -struct PriorBoxT; - -struct Normalize; -struct NormalizeT; - -struct EltwiseInt8; -struct EltwiseInt8T; - -inline const flatbuffers::TypeTable *Convolution2DCommonTypeTable(); - -inline const flatbuffers::TypeTable *Convolution3DCommonTypeTable(); - -inline const flatbuffers::TypeTable *IDSTQuanTypeTable(); - -inline const flatbuffers::TypeTable *QuantizedFloatParamTypeTable(); - -inline const flatbuffers::TypeTable *Convolution2DTypeTable(); - -inline const flatbuffers::TypeTable *Convolution3DTypeTable(); - -inline const flatbuffers::TypeTable *InnerProductTypeTable(); - -inline const flatbuffers::TypeTable *PoolTypeTable(); - -inline const flatbuffers::TypeTable *Pool3DTypeTable(); - -inline const flatbuffers::TypeTable *ReluTypeTable(); - -inline const flatbuffers::TypeTable *Relu6TypeTable(); - -inline const flatbuffers::TypeTable *PReluTypeTable(); - -inline const flatbuffers::TypeTable *ELUTypeTable(); - -inline const flatbuffers::TypeTable *LRNTypeTable(); - -inline const flatbuffers::TypeTable *ArgMaxTypeTable(); - -inline const flatbuffers::TypeTable *AxisTypeTable(); - -inline const flatbuffers::TypeTable *InputTypeTable(); - -inline const flatbuffers::TypeTable *LSTMTypeTable(); - -inline const flatbuffers::TypeTable *SliceTypeTable(); - -inline const flatbuffers::TypeTable *BatchNormTypeTable(); - -inline const flatbuffers::TypeTable *ScaleTypeTable(); - -inline const flatbuffers::TypeTable *EltwiseTypeTable(); - -inline const flatbuffers::TypeTable *FlattenTypeTable(); - -inline const flatbuffers::TypeTable *PermuteTypeTable(); - -inline const flatbuffers::TypeTable *ReshapeTypeTable(); - -inline const flatbuffers::TypeTable *DetectionOutputTypeTable(); - -inline const flatbuffers::TypeTable *RoiPoolingTypeTable(); - -inline const flatbuffers::TypeTable *ProposalTypeTable(); - -inline const flatbuffers::TypeTable *InterpTypeTable(); - -inline const flatbuffers::TypeTable *ResizeTypeTable(); - -inline const flatbuffers::TypeTable *PriorBoxTypeTable(); - -inline const flatbuffers::TypeTable *NormalizeTypeTable(); - -inline const flatbuffers::TypeTable *EltwiseInt8TypeTable(); - -enum PadMode { - PadMode_CAFFE = 0, - PadMode_VALID = 1, - PadMode_SAME = 2, - PadMode_MIN = PadMode_CAFFE, - PadMode_MAX = PadMode_SAME -}; - -inline const PadMode (&EnumValuesPadMode())[3] { - static const PadMode values[] = { - PadMode_CAFFE, - PadMode_VALID, - PadMode_SAME - }; - return values; -} - -inline const char * const *EnumNamesPadMode() { - static const char * const names[] = { - "CAFFE", - "VALID", - "SAME", - nullptr - }; - return names; -} - -inline const char *EnumNamePadMode(PadMode e) { - if (e < PadMode_CAFFE || e > PadMode_SAME) return ""; - const size_t index = static_cast(e); - return EnumNamesPadMode()[index]; -} - -enum PoolType { - PoolType_MAXPOOL = 0, - PoolType_AVEPOOL = 1, - PoolType_MIN = PoolType_MAXPOOL, - PoolType_MAX = PoolType_AVEPOOL -}; - -inline const PoolType (&EnumValuesPoolType())[2] { - static const PoolType values[] = { - PoolType_MAXPOOL, - PoolType_AVEPOOL - }; - return values; -} - -inline const char * const *EnumNamesPoolType() { - static const char * const names[] = { - "MAXPOOL", - "AVEPOOL", - nullptr - }; - return names; -} - -inline const char *EnumNamePoolType(PoolType e) { - if (e < PoolType_MAXPOOL || e > PoolType_AVEPOOL) return ""; - const size_t index = static_cast(e); - return EnumNamesPoolType()[index]; -} - -enum PoolPadType { - PoolPadType_CAFFE = 0, - PoolPadType_VALID = 1, - PoolPadType_SAME = 2, - PoolPadType_MIN = PoolPadType_CAFFE, - PoolPadType_MAX = PoolPadType_SAME -}; - -inline const PoolPadType (&EnumValuesPoolPadType())[3] { - static const PoolPadType values[] = { - PoolPadType_CAFFE, - PoolPadType_VALID, - PoolPadType_SAME - }; - return values; -} - -inline const char * const *EnumNamesPoolPadType() { - static const char * const names[] = { - "CAFFE", - "VALID", - "SAME", - nullptr - }; - return names; -} - -inline const char *EnumNamePoolPadType(PoolPadType e) { - if (e < PoolPadType_CAFFE || e > PoolPadType_SAME) return ""; - const size_t index = static_cast(e); - return EnumNamesPoolPadType()[index]; -} - -enum EltwiseType { - EltwiseType_PROD = 0, - EltwiseType_SUM = 1, - EltwiseType_MAXIMUM = 2, - EltwiseType_SUB = 3, - EltwiseType_MIN = EltwiseType_PROD, - EltwiseType_MAX = EltwiseType_SUB -}; - -inline const EltwiseType (&EnumValuesEltwiseType())[4] { - static const EltwiseType values[] = { - EltwiseType_PROD, - EltwiseType_SUM, - EltwiseType_MAXIMUM, - EltwiseType_SUB - }; - return values; -} - -inline const char * const *EnumNamesEltwiseType() { - static const char * const names[] = { - "PROD", - "SUM", - "MAXIMUM", - "SUB", - nullptr - }; - return names; -} - -inline const char *EnumNameEltwiseType(EltwiseType e) { - if (e < EltwiseType_PROD || e > EltwiseType_SUB) return ""; - const size_t index = static_cast(e); - return EnumNamesEltwiseType()[index]; -} - -struct Convolution2DCommonT : public flatbuffers::NativeTable { - typedef Convolution2DCommon TableType; - int32_t padX; - int32_t padY; - int32_t kernelX; - int32_t kernelY; - int32_t strideX; - int32_t strideY; - int32_t dilateX; - int32_t dilateY; - PadMode padMode; - int32_t group; - int32_t outputCount; - int32_t inputCount; - bool relu; - bool relu6; - Convolution2DCommonT() - : padX(0), - padY(0), - kernelX(1), - kernelY(1), - strideX(1), - strideY(1), - dilateX(1), - dilateY(1), - padMode(PadMode_CAFFE), - group(1), - outputCount(0), - inputCount(0), - relu(false), - relu6(false) { - } -}; - -struct Convolution2DCommon FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Convolution2DCommonT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return Convolution2DCommonTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADX = 4, - VT_PADY = 6, - VT_KERNELX = 8, - VT_KERNELY = 10, - VT_STRIDEX = 12, - VT_STRIDEY = 14, - VT_DILATEX = 16, - VT_DILATEY = 18, - VT_PADMODE = 20, - VT_GROUP = 22, - VT_OUTPUTCOUNT = 24, - VT_INPUTCOUNT = 26, - VT_RELU = 28, - VT_RELU6 = 30 - }; - int32_t padX() const { - return GetField(VT_PADX, 0); - } - int32_t padY() const { - return GetField(VT_PADY, 0); - } - int32_t kernelX() const { - return GetField(VT_KERNELX, 1); - } - int32_t kernelY() const { - return GetField(VT_KERNELY, 1); - } - int32_t strideX() const { - return GetField(VT_STRIDEX, 1); - } - int32_t strideY() const { - return GetField(VT_STRIDEY, 1); - } - int32_t dilateX() const { - return GetField(VT_DILATEX, 1); - } - int32_t dilateY() const { - return GetField(VT_DILATEY, 1); - } - PadMode padMode() const { - return static_cast(GetField(VT_PADMODE, 0)); - } - int32_t group() const { - return GetField(VT_GROUP, 1); - } - int32_t outputCount() const { - return GetField(VT_OUTPUTCOUNT, 0); - } - int32_t inputCount() const { - return GetField(VT_INPUTCOUNT, 0); - } - bool relu() const { - return GetField(VT_RELU, 0) != 0; - } - bool relu6() const { - return GetField(VT_RELU6, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADX) && - VerifyField(verifier, VT_PADY) && - VerifyField(verifier, VT_KERNELX) && - VerifyField(verifier, VT_KERNELY) && - VerifyField(verifier, VT_STRIDEX) && - VerifyField(verifier, VT_STRIDEY) && - VerifyField(verifier, VT_DILATEX) && - VerifyField(verifier, VT_DILATEY) && - VerifyField(verifier, VT_PADMODE) && - VerifyField(verifier, VT_GROUP) && - VerifyField(verifier, VT_OUTPUTCOUNT) && - VerifyField(verifier, VT_INPUTCOUNT) && - VerifyField(verifier, VT_RELU) && - VerifyField(verifier, VT_RELU6) && - verifier.EndTable(); - } - Convolution2DCommonT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Convolution2DCommonT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DCommonT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Convolution2DCommonBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padX(int32_t padX) { - fbb_.AddElement(Convolution2DCommon::VT_PADX, padX, 0); - } - void add_padY(int32_t padY) { - fbb_.AddElement(Convolution2DCommon::VT_PADY, padY, 0); - } - void add_kernelX(int32_t kernelX) { - fbb_.AddElement(Convolution2DCommon::VT_KERNELX, kernelX, 1); - } - void add_kernelY(int32_t kernelY) { - fbb_.AddElement(Convolution2DCommon::VT_KERNELY, kernelY, 1); - } - void add_strideX(int32_t strideX) { - fbb_.AddElement(Convolution2DCommon::VT_STRIDEX, strideX, 1); - } - void add_strideY(int32_t strideY) { - fbb_.AddElement(Convolution2DCommon::VT_STRIDEY, strideY, 1); - } - void add_dilateX(int32_t dilateX) { - fbb_.AddElement(Convolution2DCommon::VT_DILATEX, dilateX, 1); - } - void add_dilateY(int32_t dilateY) { - fbb_.AddElement(Convolution2DCommon::VT_DILATEY, dilateY, 1); - } - void add_padMode(PadMode padMode) { - fbb_.AddElement(Convolution2DCommon::VT_PADMODE, static_cast(padMode), 0); - } - void add_group(int32_t group) { - fbb_.AddElement(Convolution2DCommon::VT_GROUP, group, 1); - } - void add_outputCount(int32_t outputCount) { - fbb_.AddElement(Convolution2DCommon::VT_OUTPUTCOUNT, outputCount, 0); - } - void add_inputCount(int32_t inputCount) { - fbb_.AddElement(Convolution2DCommon::VT_INPUTCOUNT, inputCount, 0); - } - void add_relu(bool relu) { - fbb_.AddElement(Convolution2DCommon::VT_RELU, static_cast(relu), 0); - } - void add_relu6(bool relu6) { - fbb_.AddElement(Convolution2DCommon::VT_RELU6, static_cast(relu6), 0); - } - explicit Convolution2DCommonBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Convolution2DCommonBuilder &operator=(const Convolution2DCommonBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConvolution2DCommon( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t padX = 0, - int32_t padY = 0, - int32_t kernelX = 1, - int32_t kernelY = 1, - int32_t strideX = 1, - int32_t strideY = 1, - int32_t dilateX = 1, - int32_t dilateY = 1, - PadMode padMode = PadMode_CAFFE, - int32_t group = 1, - int32_t outputCount = 0, - int32_t inputCount = 0, - bool relu = false, - bool relu6 = false) { - Convolution2DCommonBuilder builder_(_fbb); - builder_.add_inputCount(inputCount); - builder_.add_outputCount(outputCount); - builder_.add_group(group); - builder_.add_dilateY(dilateY); - builder_.add_dilateX(dilateX); - builder_.add_strideY(strideY); - builder_.add_strideX(strideX); - builder_.add_kernelY(kernelY); - builder_.add_kernelX(kernelX); - builder_.add_padY(padY); - builder_.add_padX(padX); - builder_.add_relu6(relu6); - builder_.add_relu(relu); - builder_.add_padMode(padMode); - return builder_.Finish(); -} - -flatbuffers::Offset CreateConvolution2DCommon(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DCommonT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Convolution3DCommonT : public flatbuffers::NativeTable { - typedef Convolution3DCommon TableType; - std::vector dilates; - std::vector strides; - std::vector kernels; - std::vector pads; - PadMode padMode; - int32_t inputCount; - int32_t outputCount; - bool relu; - bool relu6; - Convolution3DCommonT() - : padMode(PadMode_CAFFE), - inputCount(0), - outputCount(0), - relu(false), - relu6(false) { - } -}; - -struct Convolution3DCommon FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Convolution3DCommonT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return Convolution3DCommonTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DILATES = 4, - VT_STRIDES = 6, - VT_KERNELS = 8, - VT_PADS = 10, - VT_PADMODE = 12, - VT_INPUTCOUNT = 14, - VT_OUTPUTCOUNT = 16, - VT_RELU = 18, - VT_RELU6 = 20 - }; - const flatbuffers::Vector *dilates() const { - return GetPointer *>(VT_DILATES); - } - const flatbuffers::Vector *strides() const { - return GetPointer *>(VT_STRIDES); - } - const flatbuffers::Vector *kernels() const { - return GetPointer *>(VT_KERNELS); - } - const flatbuffers::Vector *pads() const { - return GetPointer *>(VT_PADS); - } - PadMode padMode() const { - return static_cast(GetField(VT_PADMODE, 0)); - } - int32_t inputCount() const { - return GetField(VT_INPUTCOUNT, 0); - } - int32_t outputCount() const { - return GetField(VT_OUTPUTCOUNT, 0); - } - bool relu() const { - return GetField(VT_RELU, 0) != 0; - } - bool relu6() const { - return GetField(VT_RELU6, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_DILATES) && - verifier.VerifyVector(dilates()) && - VerifyOffset(verifier, VT_STRIDES) && - verifier.VerifyVector(strides()) && - VerifyOffset(verifier, VT_KERNELS) && - verifier.VerifyVector(kernels()) && - VerifyOffset(verifier, VT_PADS) && - verifier.VerifyVector(pads()) && - VerifyField(verifier, VT_PADMODE) && - VerifyField(verifier, VT_INPUTCOUNT) && - VerifyField(verifier, VT_OUTPUTCOUNT) && - VerifyField(verifier, VT_RELU) && - VerifyField(verifier, VT_RELU6) && - verifier.EndTable(); - } - Convolution3DCommonT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Convolution3DCommonT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DCommonT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Convolution3DCommonBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_dilates(flatbuffers::Offset> dilates) { - fbb_.AddOffset(Convolution3DCommon::VT_DILATES, dilates); - } - void add_strides(flatbuffers::Offset> strides) { - fbb_.AddOffset(Convolution3DCommon::VT_STRIDES, strides); - } - void add_kernels(flatbuffers::Offset> kernels) { - fbb_.AddOffset(Convolution3DCommon::VT_KERNELS, kernels); - } - void add_pads(flatbuffers::Offset> pads) { - fbb_.AddOffset(Convolution3DCommon::VT_PADS, pads); - } - void add_padMode(PadMode padMode) { - fbb_.AddElement(Convolution3DCommon::VT_PADMODE, static_cast(padMode), 0); - } - void add_inputCount(int32_t inputCount) { - fbb_.AddElement(Convolution3DCommon::VT_INPUTCOUNT, inputCount, 0); - } - void add_outputCount(int32_t outputCount) { - fbb_.AddElement(Convolution3DCommon::VT_OUTPUTCOUNT, outputCount, 0); - } - void add_relu(bool relu) { - fbb_.AddElement(Convolution3DCommon::VT_RELU, static_cast(relu), 0); - } - void add_relu6(bool relu6) { - fbb_.AddElement(Convolution3DCommon::VT_RELU6, static_cast(relu6), 0); - } - explicit Convolution3DCommonBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Convolution3DCommonBuilder &operator=(const Convolution3DCommonBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConvolution3DCommon( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> dilates = 0, - flatbuffers::Offset> strides = 0, - flatbuffers::Offset> kernels = 0, - flatbuffers::Offset> pads = 0, - PadMode padMode = PadMode_CAFFE, - int32_t inputCount = 0, - int32_t outputCount = 0, - bool relu = false, - bool relu6 = false) { - Convolution3DCommonBuilder builder_(_fbb); - builder_.add_outputCount(outputCount); - builder_.add_inputCount(inputCount); - builder_.add_pads(pads); - builder_.add_kernels(kernels); - builder_.add_strides(strides); - builder_.add_dilates(dilates); - builder_.add_relu6(relu6); - builder_.add_relu(relu); - builder_.add_padMode(padMode); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateConvolution3DCommonDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *dilates = nullptr, - const std::vector *strides = nullptr, - const std::vector *kernels = nullptr, - const std::vector *pads = nullptr, - PadMode padMode = PadMode_CAFFE, - int32_t inputCount = 0, - int32_t outputCount = 0, - bool relu = false, - bool relu6 = false) { - auto dilates__ = dilates ? _fbb.CreateVector(*dilates) : 0; - auto strides__ = strides ? _fbb.CreateVector(*strides) : 0; - auto kernels__ = kernels ? _fbb.CreateVector(*kernels) : 0; - auto pads__ = pads ? _fbb.CreateVector(*pads) : 0; - return MNN::CreateConvolution3DCommon( - _fbb, - dilates__, - strides__, - kernels__, - pads__, - padMode, - inputCount, - outputCount, - relu, - relu6); -} - -flatbuffers::Offset CreateConvolution3DCommon(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DCommonT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct IDSTQuanT : public flatbuffers::NativeTable { - typedef IDSTQuan TableType; - std::vector buffer; - std::vector alpha; - int32_t type; - bool useInt32; - float quantScale; - float scaleIn; - float scaleOut; - int32_t aMax; - int32_t aMin; - int32_t readType; - bool has_scaleInt; - IDSTQuanT() - : type(0), - useInt32(false), - quantScale(0.0f), - scaleIn(0.0f), - scaleOut(0.0f), - aMax(0), - aMin(0), - readType(0), - has_scaleInt(false) { - } -}; - -struct IDSTQuan FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef IDSTQuanT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return IDSTQuanTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BUFFER = 4, - VT_ALPHA = 6, - VT_TYPE = 8, - VT_USEINT32 = 10, - VT_QUANTSCALE = 12, - VT_SCALEIN = 14, - VT_SCALEOUT = 16, - VT_AMAX = 18, - VT_AMIN = 20, - VT_READTYPE = 22, - VT_HAS_SCALEINT = 24 - }; - const flatbuffers::Vector *buffer() const { - return GetPointer *>(VT_BUFFER); - } - const flatbuffers::Vector *alpha() const { - return GetPointer *>(VT_ALPHA); - } - int32_t type() const { - return GetField(VT_TYPE, 0); - } - bool useInt32() const { - return GetField(VT_USEINT32, 0) != 0; - } - float quantScale() const { - return GetField(VT_QUANTSCALE, 0.0f); - } - float scaleIn() const { - return GetField(VT_SCALEIN, 0.0f); - } - float scaleOut() const { - return GetField(VT_SCALEOUT, 0.0f); - } - int32_t aMax() const { - return GetField(VT_AMAX, 0); - } - int32_t aMin() const { - return GetField(VT_AMIN, 0); - } - int32_t readType() const { - return GetField(VT_READTYPE, 0); - } - bool has_scaleInt() const { - return GetField(VT_HAS_SCALEINT, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_BUFFER) && - verifier.VerifyVector(buffer()) && - VerifyOffset(verifier, VT_ALPHA) && - verifier.VerifyVector(alpha()) && - VerifyField(verifier, VT_TYPE) && - VerifyField(verifier, VT_USEINT32) && - VerifyField(verifier, VT_QUANTSCALE) && - VerifyField(verifier, VT_SCALEIN) && - VerifyField(verifier, VT_SCALEOUT) && - VerifyField(verifier, VT_AMAX) && - VerifyField(verifier, VT_AMIN) && - VerifyField(verifier, VT_READTYPE) && - VerifyField(verifier, VT_HAS_SCALEINT) && - verifier.EndTable(); - } - IDSTQuanT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(IDSTQuanT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const IDSTQuanT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct IDSTQuanBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_buffer(flatbuffers::Offset> buffer) { - fbb_.AddOffset(IDSTQuan::VT_BUFFER, buffer); - } - void add_alpha(flatbuffers::Offset> alpha) { - fbb_.AddOffset(IDSTQuan::VT_ALPHA, alpha); - } - void add_type(int32_t type) { - fbb_.AddElement(IDSTQuan::VT_TYPE, type, 0); - } - void add_useInt32(bool useInt32) { - fbb_.AddElement(IDSTQuan::VT_USEINT32, static_cast(useInt32), 0); - } - void add_quantScale(float quantScale) { - fbb_.AddElement(IDSTQuan::VT_QUANTSCALE, quantScale, 0.0f); - } - void add_scaleIn(float scaleIn) { - fbb_.AddElement(IDSTQuan::VT_SCALEIN, scaleIn, 0.0f); - } - void add_scaleOut(float scaleOut) { - fbb_.AddElement(IDSTQuan::VT_SCALEOUT, scaleOut, 0.0f); - } - void add_aMax(int32_t aMax) { - fbb_.AddElement(IDSTQuan::VT_AMAX, aMax, 0); - } - void add_aMin(int32_t aMin) { - fbb_.AddElement(IDSTQuan::VT_AMIN, aMin, 0); - } - void add_readType(int32_t readType) { - fbb_.AddElement(IDSTQuan::VT_READTYPE, readType, 0); - } - void add_has_scaleInt(bool has_scaleInt) { - fbb_.AddElement(IDSTQuan::VT_HAS_SCALEINT, static_cast(has_scaleInt), 0); - } - explicit IDSTQuanBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - IDSTQuanBuilder &operator=(const IDSTQuanBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateIDSTQuan( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> buffer = 0, - flatbuffers::Offset> alpha = 0, - int32_t type = 0, - bool useInt32 = false, - float quantScale = 0.0f, - float scaleIn = 0.0f, - float scaleOut = 0.0f, - int32_t aMax = 0, - int32_t aMin = 0, - int32_t readType = 0, - bool has_scaleInt = false) { - IDSTQuanBuilder builder_(_fbb); - builder_.add_readType(readType); - builder_.add_aMin(aMin); - builder_.add_aMax(aMax); - builder_.add_scaleOut(scaleOut); - builder_.add_scaleIn(scaleIn); - builder_.add_quantScale(quantScale); - builder_.add_type(type); - builder_.add_alpha(alpha); - builder_.add_buffer(buffer); - builder_.add_has_scaleInt(has_scaleInt); - builder_.add_useInt32(useInt32); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateIDSTQuanDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *buffer = nullptr, - const std::vector *alpha = nullptr, - int32_t type = 0, - bool useInt32 = false, - float quantScale = 0.0f, - float scaleIn = 0.0f, - float scaleOut = 0.0f, - int32_t aMax = 0, - int32_t aMin = 0, - int32_t readType = 0, - bool has_scaleInt = false) { - auto buffer__ = buffer ? _fbb.CreateVector(*buffer) : 0; - auto alpha__ = alpha ? _fbb.CreateVector(*alpha) : 0; - return MNN::CreateIDSTQuan( - _fbb, - buffer__, - alpha__, - type, - useInt32, - quantScale, - scaleIn, - scaleOut, - aMax, - aMin, - readType, - has_scaleInt); -} - -flatbuffers::Offset CreateIDSTQuan(flatbuffers::FlatBufferBuilder &_fbb, const IDSTQuanT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizedFloatParamT : public flatbuffers::NativeTable { - typedef QuantizedFloatParam TableType; - std::vector weight; - std::vector bias; - std::vector scale; - std::vector tensorScale; - QuantizedFloatParamT() { - } -}; - -struct QuantizedFloatParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedFloatParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedFloatParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_WEIGHT = 4, - VT_BIAS = 6, - VT_SCALE = 8, - VT_TENSORSCALE = 10 - }; - const flatbuffers::Vector *weight() const { - return GetPointer *>(VT_WEIGHT); - } - const flatbuffers::Vector *bias() const { - return GetPointer *>(VT_BIAS); - } - const flatbuffers::Vector *scale() const { - return GetPointer *>(VT_SCALE); - } - const flatbuffers::Vector *tensorScale() const { - return GetPointer *>(VT_TENSORSCALE); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_WEIGHT) && - verifier.VerifyVector(weight()) && - VerifyOffset(verifier, VT_BIAS) && - verifier.VerifyVector(bias()) && - VerifyOffset(verifier, VT_SCALE) && - verifier.VerifyVector(scale()) && - VerifyOffset(verifier, VT_TENSORSCALE) && - verifier.VerifyVector(tensorScale()) && - verifier.EndTable(); - } - QuantizedFloatParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedFloatParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedFloatParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedFloatParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_weight(flatbuffers::Offset> weight) { - fbb_.AddOffset(QuantizedFloatParam::VT_WEIGHT, weight); - } - void add_bias(flatbuffers::Offset> bias) { - fbb_.AddOffset(QuantizedFloatParam::VT_BIAS, bias); - } - void add_scale(flatbuffers::Offset> scale) { - fbb_.AddOffset(QuantizedFloatParam::VT_SCALE, scale); - } - void add_tensorScale(flatbuffers::Offset> tensorScale) { - fbb_.AddOffset(QuantizedFloatParam::VT_TENSORSCALE, tensorScale); - } - explicit QuantizedFloatParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedFloatParamBuilder &operator=(const QuantizedFloatParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedFloatParam( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> weight = 0, - flatbuffers::Offset> bias = 0, - flatbuffers::Offset> scale = 0, - flatbuffers::Offset> tensorScale = 0) { - QuantizedFloatParamBuilder builder_(_fbb); - builder_.add_tensorScale(tensorScale); - builder_.add_scale(scale); - builder_.add_bias(bias); - builder_.add_weight(weight); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateQuantizedFloatParamDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *weight = nullptr, - const std::vector *bias = nullptr, - const std::vector *scale = nullptr, - const std::vector *tensorScale = nullptr) { - auto weight__ = weight ? _fbb.CreateVector(*weight) : 0; - auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; - auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; - auto tensorScale__ = tensorScale ? _fbb.CreateVector(*tensorScale) : 0; - return MNN::CreateQuantizedFloatParam( - _fbb, - weight__, - bias__, - scale__, - tensorScale__); -} - -flatbuffers::Offset CreateQuantizedFloatParam(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedFloatParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Convolution2DT : public flatbuffers::NativeTable { - typedef Convolution2D TableType; - std::unique_ptr common; - std::vector weight; - std::vector bias; - std::unique_ptr quanParameter; - std::unique_ptr symmetricQuan; - Convolution2DT() { - } -}; - -struct Convolution2D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Convolution2DT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return Convolution2DTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_COMMON = 4, - VT_WEIGHT = 6, - VT_BIAS = 8, - VT_QUANPARAMETER = 10, - VT_SYMMETRICQUAN = 12 - }; - const Convolution2DCommon *common() const { - return GetPointer(VT_COMMON); - } - const flatbuffers::Vector *weight() const { - return GetPointer *>(VT_WEIGHT); - } - const flatbuffers::Vector *bias() const { - return GetPointer *>(VT_BIAS); - } - const IDSTQuan *quanParameter() const { - return GetPointer(VT_QUANPARAMETER); - } - const QuantizedFloatParam *symmetricQuan() const { - return GetPointer(VT_SYMMETRICQUAN); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_COMMON) && - verifier.VerifyTable(common()) && - VerifyOffset(verifier, VT_WEIGHT) && - verifier.VerifyVector(weight()) && - VerifyOffset(verifier, VT_BIAS) && - verifier.VerifyVector(bias()) && - VerifyOffset(verifier, VT_QUANPARAMETER) && - verifier.VerifyTable(quanParameter()) && - VerifyOffset(verifier, VT_SYMMETRICQUAN) && - verifier.VerifyTable(symmetricQuan()) && - verifier.EndTable(); - } - Convolution2DT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Convolution2DT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Convolution2DBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_common(flatbuffers::Offset common) { - fbb_.AddOffset(Convolution2D::VT_COMMON, common); - } - void add_weight(flatbuffers::Offset> weight) { - fbb_.AddOffset(Convolution2D::VT_WEIGHT, weight); - } - void add_bias(flatbuffers::Offset> bias) { - fbb_.AddOffset(Convolution2D::VT_BIAS, bias); - } - void add_quanParameter(flatbuffers::Offset quanParameter) { - fbb_.AddOffset(Convolution2D::VT_QUANPARAMETER, quanParameter); - } - void add_symmetricQuan(flatbuffers::Offset symmetricQuan) { - fbb_.AddOffset(Convolution2D::VT_SYMMETRICQUAN, symmetricQuan); - } - explicit Convolution2DBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Convolution2DBuilder &operator=(const Convolution2DBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConvolution2D( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset common = 0, - flatbuffers::Offset> weight = 0, - flatbuffers::Offset> bias = 0, - flatbuffers::Offset quanParameter = 0, - flatbuffers::Offset symmetricQuan = 0) { - Convolution2DBuilder builder_(_fbb); - builder_.add_symmetricQuan(symmetricQuan); - builder_.add_quanParameter(quanParameter); - builder_.add_bias(bias); - builder_.add_weight(weight); - builder_.add_common(common); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateConvolution2DDirect( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset common = 0, - const std::vector *weight = nullptr, - const std::vector *bias = nullptr, - flatbuffers::Offset quanParameter = 0, - flatbuffers::Offset symmetricQuan = 0) { - auto weight__ = weight ? _fbb.CreateVector(*weight) : 0; - auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; - return MNN::CreateConvolution2D( - _fbb, - common, - weight__, - bias__, - quanParameter, - symmetricQuan); -} - -flatbuffers::Offset CreateConvolution2D(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Convolution3DT : public flatbuffers::NativeTable { - typedef Convolution3D TableType; - std::unique_ptr common; - std::vector weight; - std::vector bias; - Convolution3DT() { - } -}; - -struct Convolution3D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Convolution3DT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return Convolution3DTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_COMMON = 4, - VT_WEIGHT = 6, - VT_BIAS = 8 - }; - const Convolution3DCommon *common() const { - return GetPointer(VT_COMMON); - } - const flatbuffers::Vector *weight() const { - return GetPointer *>(VT_WEIGHT); - } - const flatbuffers::Vector *bias() const { - return GetPointer *>(VT_BIAS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_COMMON) && - verifier.VerifyTable(common()) && - VerifyOffset(verifier, VT_WEIGHT) && - verifier.VerifyVector(weight()) && - VerifyOffset(verifier, VT_BIAS) && - verifier.VerifyVector(bias()) && - verifier.EndTable(); - } - Convolution3DT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Convolution3DT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Convolution3DBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_common(flatbuffers::Offset common) { - fbb_.AddOffset(Convolution3D::VT_COMMON, common); - } - void add_weight(flatbuffers::Offset> weight) { - fbb_.AddOffset(Convolution3D::VT_WEIGHT, weight); - } - void add_bias(flatbuffers::Offset> bias) { - fbb_.AddOffset(Convolution3D::VT_BIAS, bias); - } - explicit Convolution3DBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Convolution3DBuilder &operator=(const Convolution3DBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConvolution3D( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset common = 0, - flatbuffers::Offset> weight = 0, - flatbuffers::Offset> bias = 0) { - Convolution3DBuilder builder_(_fbb); - builder_.add_bias(bias); - builder_.add_weight(weight); - builder_.add_common(common); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateConvolution3DDirect( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset common = 0, - const std::vector *weight = nullptr, - const std::vector *bias = nullptr) { - auto weight__ = weight ? _fbb.CreateVector(*weight) : 0; - auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; - return MNN::CreateConvolution3D( - _fbb, - common, - weight__, - bias__); -} - -flatbuffers::Offset CreateConvolution3D(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct InnerProductT : public flatbuffers::NativeTable { - typedef InnerProduct TableType; - int32_t outputCount; - int32_t biasTerm; - int32_t weightSize; - std::vector weight; - std::vector bias; - int32_t axis; - bool transpose; - std::unique_ptr quanParameter; - InnerProductT() - : outputCount(0), - biasTerm(0), - weightSize(0), - axis(0), - transpose(false) { - } -}; - -struct InnerProduct FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef InnerProductT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return InnerProductTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTPUTCOUNT = 4, - VT_BIASTERM = 6, - VT_WEIGHTSIZE = 8, - VT_WEIGHT = 10, - VT_BIAS = 12, - VT_AXIS = 14, - VT_TRANSPOSE = 16, - VT_QUANPARAMETER = 18 - }; - int32_t outputCount() const { - return GetField(VT_OUTPUTCOUNT, 0); - } - int32_t biasTerm() const { - return GetField(VT_BIASTERM, 0); - } - int32_t weightSize() const { - return GetField(VT_WEIGHTSIZE, 0); - } - const flatbuffers::Vector *weight() const { - return GetPointer *>(VT_WEIGHT); - } - const flatbuffers::Vector *bias() const { - return GetPointer *>(VT_BIAS); - } - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool transpose() const { - return GetField(VT_TRANSPOSE, 0) != 0; - } - const IDSTQuan *quanParameter() const { - return GetPointer(VT_QUANPARAMETER); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUTPUTCOUNT) && - VerifyField(verifier, VT_BIASTERM) && - VerifyField(verifier, VT_WEIGHTSIZE) && - VerifyOffset(verifier, VT_WEIGHT) && - verifier.VerifyVector(weight()) && - VerifyOffset(verifier, VT_BIAS) && - verifier.VerifyVector(bias()) && - VerifyField(verifier, VT_AXIS) && - VerifyField(verifier, VT_TRANSPOSE) && - VerifyOffset(verifier, VT_QUANPARAMETER) && - verifier.VerifyTable(quanParameter()) && - verifier.EndTable(); - } - InnerProductT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(InnerProductT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const InnerProductT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct InnerProductBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_outputCount(int32_t outputCount) { - fbb_.AddElement(InnerProduct::VT_OUTPUTCOUNT, outputCount, 0); - } - void add_biasTerm(int32_t biasTerm) { - fbb_.AddElement(InnerProduct::VT_BIASTERM, biasTerm, 0); - } - void add_weightSize(int32_t weightSize) { - fbb_.AddElement(InnerProduct::VT_WEIGHTSIZE, weightSize, 0); - } - void add_weight(flatbuffers::Offset> weight) { - fbb_.AddOffset(InnerProduct::VT_WEIGHT, weight); - } - void add_bias(flatbuffers::Offset> bias) { - fbb_.AddOffset(InnerProduct::VT_BIAS, bias); - } - void add_axis(int32_t axis) { - fbb_.AddElement(InnerProduct::VT_AXIS, axis, 0); - } - void add_transpose(bool transpose) { - fbb_.AddElement(InnerProduct::VT_TRANSPOSE, static_cast(transpose), 0); - } - void add_quanParameter(flatbuffers::Offset quanParameter) { - fbb_.AddOffset(InnerProduct::VT_QUANPARAMETER, quanParameter); - } - explicit InnerProductBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - InnerProductBuilder &operator=(const InnerProductBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateInnerProduct( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t outputCount = 0, - int32_t biasTerm = 0, - int32_t weightSize = 0, - flatbuffers::Offset> weight = 0, - flatbuffers::Offset> bias = 0, - int32_t axis = 0, - bool transpose = false, - flatbuffers::Offset quanParameter = 0) { - InnerProductBuilder builder_(_fbb); - builder_.add_quanParameter(quanParameter); - builder_.add_axis(axis); - builder_.add_bias(bias); - builder_.add_weight(weight); - builder_.add_weightSize(weightSize); - builder_.add_biasTerm(biasTerm); - builder_.add_outputCount(outputCount); - builder_.add_transpose(transpose); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateInnerProductDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t outputCount = 0, - int32_t biasTerm = 0, - int32_t weightSize = 0, - const std::vector *weight = nullptr, - const std::vector *bias = nullptr, - int32_t axis = 0, - bool transpose = false, - flatbuffers::Offset quanParameter = 0) { - auto weight__ = weight ? _fbb.CreateVector(*weight) : 0; - auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; - return MNN::CreateInnerProduct( - _fbb, - outputCount, - biasTerm, - weightSize, - weight__, - bias__, - axis, - transpose, - quanParameter); -} - -flatbuffers::Offset CreateInnerProduct(flatbuffers::FlatBufferBuilder &_fbb, const InnerProductT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PoolT : public flatbuffers::NativeTable { - typedef Pool TableType; - int32_t padX; - int32_t padY; - bool isGlobal; - int32_t kernelX; - int32_t kernelY; - int32_t strideX; - int32_t strideY; - PoolType type; - PoolPadType padType; - DataType dataType; - bool ceilModel; - PoolT() - : padX(0), - padY(0), - isGlobal(false), - kernelX(0), - kernelY(0), - strideX(0), - strideY(0), - type(PoolType_MAXPOOL), - padType(PoolPadType_CAFFE), - dataType(DataType_DT_FLOAT), - ceilModel(true) { - } -}; - -struct Pool FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PoolT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return PoolTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADX = 4, - VT_PADY = 6, - VT_ISGLOBAL = 8, - VT_KERNELX = 10, - VT_KERNELY = 12, - VT_STRIDEX = 14, - VT_STRIDEY = 16, - VT_TYPE = 18, - VT_PADTYPE = 20, - VT_DATATYPE = 22, - VT_CEILMODEL = 24 - }; - int32_t padX() const { - return GetField(VT_PADX, 0); - } - int32_t padY() const { - return GetField(VT_PADY, 0); - } - bool isGlobal() const { - return GetField(VT_ISGLOBAL, 0) != 0; - } - int32_t kernelX() const { - return GetField(VT_KERNELX, 0); - } - int32_t kernelY() const { - return GetField(VT_KERNELY, 0); - } - int32_t strideX() const { - return GetField(VT_STRIDEX, 0); - } - int32_t strideY() const { - return GetField(VT_STRIDEY, 0); - } - PoolType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - PoolPadType padType() const { - return static_cast(GetField(VT_PADTYPE, 0)); - } - DataType dataType() const { - return static_cast(GetField(VT_DATATYPE, 1)); - } - bool ceilModel() const { - return GetField(VT_CEILMODEL, 1) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADX) && - VerifyField(verifier, VT_PADY) && - VerifyField(verifier, VT_ISGLOBAL) && - VerifyField(verifier, VT_KERNELX) && - VerifyField(verifier, VT_KERNELY) && - VerifyField(verifier, VT_STRIDEX) && - VerifyField(verifier, VT_STRIDEY) && - VerifyField(verifier, VT_TYPE) && - VerifyField(verifier, VT_PADTYPE) && - VerifyField(verifier, VT_DATATYPE) && - VerifyField(verifier, VT_CEILMODEL) && - verifier.EndTable(); - } - PoolT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PoolT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PoolT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PoolBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padX(int32_t padX) { - fbb_.AddElement(Pool::VT_PADX, padX, 0); - } - void add_padY(int32_t padY) { - fbb_.AddElement(Pool::VT_PADY, padY, 0); - } - void add_isGlobal(bool isGlobal) { - fbb_.AddElement(Pool::VT_ISGLOBAL, static_cast(isGlobal), 0); - } - void add_kernelX(int32_t kernelX) { - fbb_.AddElement(Pool::VT_KERNELX, kernelX, 0); - } - void add_kernelY(int32_t kernelY) { - fbb_.AddElement(Pool::VT_KERNELY, kernelY, 0); - } - void add_strideX(int32_t strideX) { - fbb_.AddElement(Pool::VT_STRIDEX, strideX, 0); - } - void add_strideY(int32_t strideY) { - fbb_.AddElement(Pool::VT_STRIDEY, strideY, 0); - } - void add_type(PoolType type) { - fbb_.AddElement(Pool::VT_TYPE, static_cast(type), 0); - } - void add_padType(PoolPadType padType) { - fbb_.AddElement(Pool::VT_PADTYPE, static_cast(padType), 0); - } - void add_dataType(DataType dataType) { - fbb_.AddElement(Pool::VT_DATATYPE, static_cast(dataType), 1); - } - void add_ceilModel(bool ceilModel) { - fbb_.AddElement(Pool::VT_CEILMODEL, static_cast(ceilModel), 1); - } - explicit PoolBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PoolBuilder &operator=(const PoolBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePool( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t padX = 0, - int32_t padY = 0, - bool isGlobal = false, - int32_t kernelX = 0, - int32_t kernelY = 0, - int32_t strideX = 0, - int32_t strideY = 0, - PoolType type = PoolType_MAXPOOL, - PoolPadType padType = PoolPadType_CAFFE, - DataType dataType = DataType_DT_FLOAT, - bool ceilModel = true) { - PoolBuilder builder_(_fbb); - builder_.add_dataType(dataType); - builder_.add_strideY(strideY); - builder_.add_strideX(strideX); - builder_.add_kernelY(kernelY); - builder_.add_kernelX(kernelX); - builder_.add_padY(padY); - builder_.add_padX(padX); - builder_.add_ceilModel(ceilModel); - builder_.add_padType(padType); - builder_.add_type(type); - builder_.add_isGlobal(isGlobal); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePool(flatbuffers::FlatBufferBuilder &_fbb, const PoolT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Pool3DT : public flatbuffers::NativeTable { - typedef Pool3D TableType; - std::vector strides; - std::vector kernels; - std::vector pads; - PoolType type; - PoolPadType padType; - Pool3DT() - : type(PoolType_MAXPOOL), - padType(PoolPadType_CAFFE) { - } -}; - -struct Pool3D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Pool3DT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return Pool3DTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_STRIDES = 4, - VT_KERNELS = 6, - VT_PADS = 8, - VT_TYPE = 10, - VT_PADTYPE = 12 - }; - const flatbuffers::Vector *strides() const { - return GetPointer *>(VT_STRIDES); - } - const flatbuffers::Vector *kernels() const { - return GetPointer *>(VT_KERNELS); - } - const flatbuffers::Vector *pads() const { - return GetPointer *>(VT_PADS); - } - PoolType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - PoolPadType padType() const { - return static_cast(GetField(VT_PADTYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_STRIDES) && - verifier.VerifyVector(strides()) && - VerifyOffset(verifier, VT_KERNELS) && - verifier.VerifyVector(kernels()) && - VerifyOffset(verifier, VT_PADS) && - verifier.VerifyVector(pads()) && - VerifyField(verifier, VT_TYPE) && - VerifyField(verifier, VT_PADTYPE) && - verifier.EndTable(); - } - Pool3DT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Pool3DT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool3DT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Pool3DBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_strides(flatbuffers::Offset> strides) { - fbb_.AddOffset(Pool3D::VT_STRIDES, strides); - } - void add_kernels(flatbuffers::Offset> kernels) { - fbb_.AddOffset(Pool3D::VT_KERNELS, kernels); - } - void add_pads(flatbuffers::Offset> pads) { - fbb_.AddOffset(Pool3D::VT_PADS, pads); - } - void add_type(PoolType type) { - fbb_.AddElement(Pool3D::VT_TYPE, static_cast(type), 0); - } - void add_padType(PoolPadType padType) { - fbb_.AddElement(Pool3D::VT_PADTYPE, static_cast(padType), 0); - } - explicit Pool3DBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Pool3DBuilder &operator=(const Pool3DBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePool3D( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> strides = 0, - flatbuffers::Offset> kernels = 0, - flatbuffers::Offset> pads = 0, - PoolType type = PoolType_MAXPOOL, - PoolPadType padType = PoolPadType_CAFFE) { - Pool3DBuilder builder_(_fbb); - builder_.add_pads(pads); - builder_.add_kernels(kernels); - builder_.add_strides(strides); - builder_.add_padType(padType); - builder_.add_type(type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreatePool3DDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *strides = nullptr, - const std::vector *kernels = nullptr, - const std::vector *pads = nullptr, - PoolType type = PoolType_MAXPOOL, - PoolPadType padType = PoolPadType_CAFFE) { - auto strides__ = strides ? _fbb.CreateVector(*strides) : 0; - auto kernels__ = kernels ? _fbb.CreateVector(*kernels) : 0; - auto pads__ = pads ? _fbb.CreateVector(*pads) : 0; - return MNN::CreatePool3D( - _fbb, - strides__, - kernels__, - pads__, - type, - padType); -} - -flatbuffers::Offset CreatePool3D(flatbuffers::FlatBufferBuilder &_fbb, const Pool3DT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReluT : public flatbuffers::NativeTable { - typedef Relu TableType; - float slope; - ReluT() - : slope(0.0f) { - } -}; - -struct Relu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReluT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ReluTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SLOPE = 4 - }; - float slope() const { - return GetField(VT_SLOPE, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SLOPE) && - verifier.EndTable(); - } - ReluT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReluT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReluT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReluBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_slope(float slope) { - fbb_.AddElement(Relu::VT_SLOPE, slope, 0.0f); - } - explicit ReluBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReluBuilder &operator=(const ReluBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRelu( - flatbuffers::FlatBufferBuilder &_fbb, - float slope = 0.0f) { - ReluBuilder builder_(_fbb); - builder_.add_slope(slope); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRelu(flatbuffers::FlatBufferBuilder &_fbb, const ReluT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Relu6T : public flatbuffers::NativeTable { - typedef Relu6 TableType; - float slope; - Relu6T() - : slope(0.0f) { - } -}; - -struct Relu6 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Relu6T NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return Relu6TypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SLOPE = 4 - }; - float slope() const { - return GetField(VT_SLOPE, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SLOPE) && - verifier.EndTable(); - } - Relu6T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Relu6T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Relu6T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Relu6Builder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_slope(float slope) { - fbb_.AddElement(Relu6::VT_SLOPE, slope, 0.0f); - } - explicit Relu6Builder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Relu6Builder &operator=(const Relu6Builder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRelu6( - flatbuffers::FlatBufferBuilder &_fbb, - float slope = 0.0f) { - Relu6Builder builder_(_fbb); - builder_.add_slope(slope); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRelu6(flatbuffers::FlatBufferBuilder &_fbb, const Relu6T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PReluT : public flatbuffers::NativeTable { - typedef PRelu TableType; - int32_t slopeCount; - std::vector slope; - PReluT() - : slopeCount(0) { - } -}; - -struct PRelu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PReluT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return PReluTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SLOPECOUNT = 4, - VT_SLOPE = 6 - }; - int32_t slopeCount() const { - return GetField(VT_SLOPECOUNT, 0); - } - const flatbuffers::Vector *slope() const { - return GetPointer *>(VT_SLOPE); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SLOPECOUNT) && - VerifyOffset(verifier, VT_SLOPE) && - verifier.VerifyVector(slope()) && - verifier.EndTable(); - } - PReluT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PReluT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PReluT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PReluBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_slopeCount(int32_t slopeCount) { - fbb_.AddElement(PRelu::VT_SLOPECOUNT, slopeCount, 0); - } - void add_slope(flatbuffers::Offset> slope) { - fbb_.AddOffset(PRelu::VT_SLOPE, slope); - } - explicit PReluBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PReluBuilder &operator=(const PReluBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePRelu( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t slopeCount = 0, - flatbuffers::Offset> slope = 0) { - PReluBuilder builder_(_fbb); - builder_.add_slope(slope); - builder_.add_slopeCount(slopeCount); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreatePReluDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t slopeCount = 0, - const std::vector *slope = nullptr) { - auto slope__ = slope ? _fbb.CreateVector(*slope) : 0; - return MNN::CreatePRelu( - _fbb, - slopeCount, - slope__); -} - -flatbuffers::Offset CreatePRelu(flatbuffers::FlatBufferBuilder &_fbb, const PReluT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ELUT : public flatbuffers::NativeTable { - typedef ELU TableType; - float alpha; - ELUT() - : alpha(0.0f) { - } -}; - -struct ELU FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ELUT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ELUTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALPHA = 4 - }; - float alpha() const { - return GetField(VT_ALPHA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALPHA) && - verifier.EndTable(); - } - ELUT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ELUT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ELUT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ELUBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_alpha(float alpha) { - fbb_.AddElement(ELU::VT_ALPHA, alpha, 0.0f); - } - explicit ELUBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ELUBuilder &operator=(const ELUBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateELU( - flatbuffers::FlatBufferBuilder &_fbb, - float alpha = 0.0f) { - ELUBuilder builder_(_fbb); - builder_.add_alpha(alpha); - return builder_.Finish(); -} - -flatbuffers::Offset CreateELU(flatbuffers::FlatBufferBuilder &_fbb, const ELUT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LRNT : public flatbuffers::NativeTable { - typedef LRN TableType; - int32_t regionType; - int32_t localSize; - float alpha; - float beta; - LRNT() - : regionType(0), - localSize(0), - alpha(0.0f), - beta(0.0f) { - } -}; - -struct LRN FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LRNT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return LRNTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_REGIONTYPE = 4, - VT_LOCALSIZE = 6, - VT_ALPHA = 8, - VT_BETA = 10 - }; - int32_t regionType() const { - return GetField(VT_REGIONTYPE, 0); - } - int32_t localSize() const { - return GetField(VT_LOCALSIZE, 0); - } - float alpha() const { - return GetField(VT_ALPHA, 0.0f); - } - float beta() const { - return GetField(VT_BETA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_REGIONTYPE) && - VerifyField(verifier, VT_LOCALSIZE) && - VerifyField(verifier, VT_ALPHA) && - VerifyField(verifier, VT_BETA) && - verifier.EndTable(); - } - LRNT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LRNT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LRNT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LRNBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_regionType(int32_t regionType) { - fbb_.AddElement(LRN::VT_REGIONTYPE, regionType, 0); - } - void add_localSize(int32_t localSize) { - fbb_.AddElement(LRN::VT_LOCALSIZE, localSize, 0); - } - void add_alpha(float alpha) { - fbb_.AddElement(LRN::VT_ALPHA, alpha, 0.0f); - } - void add_beta(float beta) { - fbb_.AddElement(LRN::VT_BETA, beta, 0.0f); - } - explicit LRNBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LRNBuilder &operator=(const LRNBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLRN( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t regionType = 0, - int32_t localSize = 0, - float alpha = 0.0f, - float beta = 0.0f) { - LRNBuilder builder_(_fbb); - builder_.add_beta(beta); - builder_.add_alpha(alpha); - builder_.add_localSize(localSize); - builder_.add_regionType(regionType); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLRN(flatbuffers::FlatBufferBuilder &_fbb, const LRNT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ArgMaxT : public flatbuffers::NativeTable { - typedef ArgMax TableType; - int32_t outMaxVal; - int32_t topK; - int32_t axis; - int32_t softmaxThreshold; - ArgMaxT() - : outMaxVal(0), - topK(0), - axis(0), - softmaxThreshold(0) { - } -}; - -struct ArgMax FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ArgMaxT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ArgMaxTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTMAXVAL = 4, - VT_TOPK = 6, - VT_AXIS = 8, - VT_SOFTMAXTHRESHOLD = 10 - }; - int32_t outMaxVal() const { - return GetField(VT_OUTMAXVAL, 0); - } - int32_t topK() const { - return GetField(VT_TOPK, 0); - } - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - int32_t softmaxThreshold() const { - return GetField(VT_SOFTMAXTHRESHOLD, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUTMAXVAL) && - VerifyField(verifier, VT_TOPK) && - VerifyField(verifier, VT_AXIS) && - VerifyField(verifier, VT_SOFTMAXTHRESHOLD) && - verifier.EndTable(); - } - ArgMaxT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ArgMaxT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ArgMaxBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_outMaxVal(int32_t outMaxVal) { - fbb_.AddElement(ArgMax::VT_OUTMAXVAL, outMaxVal, 0); - } - void add_topK(int32_t topK) { - fbb_.AddElement(ArgMax::VT_TOPK, topK, 0); - } - void add_axis(int32_t axis) { - fbb_.AddElement(ArgMax::VT_AXIS, axis, 0); - } - void add_softmaxThreshold(int32_t softmaxThreshold) { - fbb_.AddElement(ArgMax::VT_SOFTMAXTHRESHOLD, softmaxThreshold, 0); - } - explicit ArgMaxBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ArgMaxBuilder &operator=(const ArgMaxBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateArgMax( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t outMaxVal = 0, - int32_t topK = 0, - int32_t axis = 0, - int32_t softmaxThreshold = 0) { - ArgMaxBuilder builder_(_fbb); - builder_.add_softmaxThreshold(softmaxThreshold); - builder_.add_axis(axis); - builder_.add_topK(topK); - builder_.add_outMaxVal(outMaxVal); - return builder_.Finish(); -} - -flatbuffers::Offset CreateArgMax(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AxisT : public flatbuffers::NativeTable { - typedef Axis TableType; - int32_t axis; - AxisT() - : axis(0) { - } -}; - -struct Axis FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AxisT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return AxisTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); - } - AxisT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AxisT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AxisT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AxisBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(Axis::VT_AXIS, axis, 0); - } - explicit AxisBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - AxisBuilder &operator=(const AxisBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAxis( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0) { - AxisBuilder builder_(_fbb); - builder_.add_axis(axis); - return builder_.Finish(); -} - -flatbuffers::Offset CreateAxis(flatbuffers::FlatBufferBuilder &_fbb, const AxisT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct InputT : public flatbuffers::NativeTable { - typedef Input TableType; - std::vector dims; - DataType dtype; - MNN_DATA_FORMAT dformat; - InputT() - : dtype(DataType_DT_FLOAT), - dformat(MNN_DATA_FORMAT_NC4HW4) { - } -}; - -struct Input FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef InputT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return InputTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DIMS = 4, - VT_DTYPE = 6, - VT_DFORMAT = 8 - }; - const flatbuffers::Vector *dims() const { - return GetPointer *>(VT_DIMS); - } - DataType dtype() const { - return static_cast(GetField(VT_DTYPE, 1)); - } - MNN_DATA_FORMAT dformat() const { - return static_cast(GetField(VT_DFORMAT, 2)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_DIMS) && - verifier.VerifyVector(dims()) && - VerifyField(verifier, VT_DTYPE) && - VerifyField(verifier, VT_DFORMAT) && - verifier.EndTable(); - } - InputT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(InputT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const InputT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct InputBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_dims(flatbuffers::Offset> dims) { - fbb_.AddOffset(Input::VT_DIMS, dims); - } - void add_dtype(DataType dtype) { - fbb_.AddElement(Input::VT_DTYPE, static_cast(dtype), 1); - } - void add_dformat(MNN_DATA_FORMAT dformat) { - fbb_.AddElement(Input::VT_DFORMAT, static_cast(dformat), 2); - } - explicit InputBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - InputBuilder &operator=(const InputBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateInput( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> dims = 0, - DataType dtype = DataType_DT_FLOAT, - MNN_DATA_FORMAT dformat = MNN_DATA_FORMAT_NC4HW4) { - InputBuilder builder_(_fbb); - builder_.add_dtype(dtype); - builder_.add_dims(dims); - builder_.add_dformat(dformat); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateInputDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *dims = nullptr, - DataType dtype = DataType_DT_FLOAT, - MNN_DATA_FORMAT dformat = MNN_DATA_FORMAT_NC4HW4) { - auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; - return MNN::CreateInput( - _fbb, - dims__, - dtype, - dformat); -} - -flatbuffers::Offset CreateInput(flatbuffers::FlatBufferBuilder &_fbb, const InputT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LSTMT : public flatbuffers::NativeTable { - typedef LSTM TableType; - int32_t outputCount; - int32_t weightSize; - float clippingThreshold; - std::unique_ptr weightI; - std::unique_ptr weightH; - std::unique_ptr bias; - std::unique_ptr weightIQ; - std::unique_ptr weightIA; - float quantScale; - LSTMT() - : outputCount(0), - weightSize(0), - clippingThreshold(0.0f), - quantScale(0.0f) { - } -}; - -struct LSTM FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LSTMT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return LSTMTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTPUTCOUNT = 4, - VT_WEIGHTSIZE = 6, - VT_CLIPPINGTHRESHOLD = 8, - VT_WEIGHTI = 10, - VT_WEIGHTH = 12, - VT_BIAS = 14, - VT_WEIGHTIQ = 16, - VT_WEIGHTIA = 18, - VT_QUANTSCALE = 20 - }; - int32_t outputCount() const { - return GetField(VT_OUTPUTCOUNT, 0); - } - int32_t weightSize() const { - return GetField(VT_WEIGHTSIZE, 0); - } - float clippingThreshold() const { - return GetField(VT_CLIPPINGTHRESHOLD, 0.0f); - } - const Blob *weightI() const { - return GetPointer(VT_WEIGHTI); - } - const Blob *weightH() const { - return GetPointer(VT_WEIGHTH); - } - const Blob *bias() const { - return GetPointer(VT_BIAS); - } - const Blob *weightIQ() const { - return GetPointer(VT_WEIGHTIQ); - } - const Blob *weightIA() const { - return GetPointer(VT_WEIGHTIA); - } - float quantScale() const { - return GetField(VT_QUANTSCALE, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUTPUTCOUNT) && - VerifyField(verifier, VT_WEIGHTSIZE) && - VerifyField(verifier, VT_CLIPPINGTHRESHOLD) && - VerifyOffset(verifier, VT_WEIGHTI) && - verifier.VerifyTable(weightI()) && - VerifyOffset(verifier, VT_WEIGHTH) && - verifier.VerifyTable(weightH()) && - VerifyOffset(verifier, VT_BIAS) && - verifier.VerifyTable(bias()) && - VerifyOffset(verifier, VT_WEIGHTIQ) && - verifier.VerifyTable(weightIQ()) && - VerifyOffset(verifier, VT_WEIGHTIA) && - verifier.VerifyTable(weightIA()) && - VerifyField(verifier, VT_QUANTSCALE) && - verifier.EndTable(); - } - LSTMT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LSTMT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LSTMBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_outputCount(int32_t outputCount) { - fbb_.AddElement(LSTM::VT_OUTPUTCOUNT, outputCount, 0); - } - void add_weightSize(int32_t weightSize) { - fbb_.AddElement(LSTM::VT_WEIGHTSIZE, weightSize, 0); - } - void add_clippingThreshold(float clippingThreshold) { - fbb_.AddElement(LSTM::VT_CLIPPINGTHRESHOLD, clippingThreshold, 0.0f); - } - void add_weightI(flatbuffers::Offset weightI) { - fbb_.AddOffset(LSTM::VT_WEIGHTI, weightI); - } - void add_weightH(flatbuffers::Offset weightH) { - fbb_.AddOffset(LSTM::VT_WEIGHTH, weightH); - } - void add_bias(flatbuffers::Offset bias) { - fbb_.AddOffset(LSTM::VT_BIAS, bias); - } - void add_weightIQ(flatbuffers::Offset weightIQ) { - fbb_.AddOffset(LSTM::VT_WEIGHTIQ, weightIQ); - } - void add_weightIA(flatbuffers::Offset weightIA) { - fbb_.AddOffset(LSTM::VT_WEIGHTIA, weightIA); - } - void add_quantScale(float quantScale) { - fbb_.AddElement(LSTM::VT_QUANTSCALE, quantScale, 0.0f); - } - explicit LSTMBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LSTMBuilder &operator=(const LSTMBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLSTM( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t outputCount = 0, - int32_t weightSize = 0, - float clippingThreshold = 0.0f, - flatbuffers::Offset weightI = 0, - flatbuffers::Offset weightH = 0, - flatbuffers::Offset bias = 0, - flatbuffers::Offset weightIQ = 0, - flatbuffers::Offset weightIA = 0, - float quantScale = 0.0f) { - LSTMBuilder builder_(_fbb); - builder_.add_quantScale(quantScale); - builder_.add_weightIA(weightIA); - builder_.add_weightIQ(weightIQ); - builder_.add_bias(bias); - builder_.add_weightH(weightH); - builder_.add_weightI(weightI); - builder_.add_clippingThreshold(clippingThreshold); - builder_.add_weightSize(weightSize); - builder_.add_outputCount(outputCount); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLSTM(flatbuffers::FlatBufferBuilder &_fbb, const LSTMT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SliceT : public flatbuffers::NativeTable { - typedef Slice TableType; - int32_t axis; - std::vector slicePoints; - NetSource sourceType; - SliceT() - : axis(0), - sourceType(NetSource_CAFFE) { - } -}; - -struct Slice FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SliceT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SliceTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4, - VT_SLICEPOINTS = 6, - VT_SOURCETYPE = 8 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - const flatbuffers::Vector *slicePoints() const { - return GetPointer *>(VT_SLICEPOINTS); - } - NetSource sourceType() const { - return static_cast(GetField(VT_SOURCETYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && - VerifyOffset(verifier, VT_SLICEPOINTS) && - verifier.VerifyVector(slicePoints()) && - VerifyField(verifier, VT_SOURCETYPE) && - verifier.EndTable(); - } - SliceT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SliceT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SliceBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(Slice::VT_AXIS, axis, 0); - } - void add_slicePoints(flatbuffers::Offset> slicePoints) { - fbb_.AddOffset(Slice::VT_SLICEPOINTS, slicePoints); - } - void add_sourceType(NetSource sourceType) { - fbb_.AddElement(Slice::VT_SOURCETYPE, static_cast(sourceType), 0); - } - explicit SliceBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SliceBuilder &operator=(const SliceBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSlice( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0, - flatbuffers::Offset> slicePoints = 0, - NetSource sourceType = NetSource_CAFFE) { - SliceBuilder builder_(_fbb); - builder_.add_slicePoints(slicePoints); - builder_.add_axis(axis); - builder_.add_sourceType(sourceType); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSliceDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0, - const std::vector *slicePoints = nullptr, - NetSource sourceType = NetSource_CAFFE) { - auto slicePoints__ = slicePoints ? _fbb.CreateVector(*slicePoints) : 0; - return MNN::CreateSlice( - _fbb, - axis, - slicePoints__, - sourceType); -} - -flatbuffers::Offset CreateSlice(flatbuffers::FlatBufferBuilder &_fbb, const SliceT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BatchNormT : public flatbuffers::NativeTable { - typedef BatchNorm TableType; - int32_t channels; - std::vector slopeData; - std::vector meanData; - std::vector varData; - std::vector biasData; - std::vector Adata; - std::vector Bdata; - float epsilon; - BatchNormT() - : channels(0), - epsilon(0.001f) { - } -}; - -struct BatchNorm FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BatchNormT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return BatchNormTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_CHANNELS = 4, - VT_SLOPEDATA = 6, - VT_MEANDATA = 8, - VT_VARDATA = 10, - VT_BIASDATA = 12, - VT_ADATA = 14, - VT_BDATA = 16, - VT_EPSILON = 18 - }; - int32_t channels() const { - return GetField(VT_CHANNELS, 0); - } - const flatbuffers::Vector *slopeData() const { - return GetPointer *>(VT_SLOPEDATA); - } - const flatbuffers::Vector *meanData() const { - return GetPointer *>(VT_MEANDATA); - } - const flatbuffers::Vector *varData() const { - return GetPointer *>(VT_VARDATA); - } - const flatbuffers::Vector *biasData() const { - return GetPointer *>(VT_BIASDATA); - } - const flatbuffers::Vector *Adata() const { - return GetPointer *>(VT_ADATA); - } - const flatbuffers::Vector *Bdata() const { - return GetPointer *>(VT_BDATA); - } - float epsilon() const { - return GetField(VT_EPSILON, 0.001f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_CHANNELS) && - VerifyOffset(verifier, VT_SLOPEDATA) && - verifier.VerifyVector(slopeData()) && - VerifyOffset(verifier, VT_MEANDATA) && - verifier.VerifyVector(meanData()) && - VerifyOffset(verifier, VT_VARDATA) && - verifier.VerifyVector(varData()) && - VerifyOffset(verifier, VT_BIASDATA) && - verifier.VerifyVector(biasData()) && - VerifyOffset(verifier, VT_ADATA) && - verifier.VerifyVector(Adata()) && - VerifyOffset(verifier, VT_BDATA) && - verifier.VerifyVector(Bdata()) && - VerifyField(verifier, VT_EPSILON) && - verifier.EndTable(); - } - BatchNormT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BatchNormT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchNormT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BatchNormBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_channels(int32_t channels) { - fbb_.AddElement(BatchNorm::VT_CHANNELS, channels, 0); - } - void add_slopeData(flatbuffers::Offset> slopeData) { - fbb_.AddOffset(BatchNorm::VT_SLOPEDATA, slopeData); - } - void add_meanData(flatbuffers::Offset> meanData) { - fbb_.AddOffset(BatchNorm::VT_MEANDATA, meanData); - } - void add_varData(flatbuffers::Offset> varData) { - fbb_.AddOffset(BatchNorm::VT_VARDATA, varData); - } - void add_biasData(flatbuffers::Offset> biasData) { - fbb_.AddOffset(BatchNorm::VT_BIASDATA, biasData); - } - void add_Adata(flatbuffers::Offset> Adata) { - fbb_.AddOffset(BatchNorm::VT_ADATA, Adata); - } - void add_Bdata(flatbuffers::Offset> Bdata) { - fbb_.AddOffset(BatchNorm::VT_BDATA, Bdata); - } - void add_epsilon(float epsilon) { - fbb_.AddElement(BatchNorm::VT_EPSILON, epsilon, 0.001f); - } - explicit BatchNormBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BatchNormBuilder &operator=(const BatchNormBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBatchNorm( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t channels = 0, - flatbuffers::Offset> slopeData = 0, - flatbuffers::Offset> meanData = 0, - flatbuffers::Offset> varData = 0, - flatbuffers::Offset> biasData = 0, - flatbuffers::Offset> Adata = 0, - flatbuffers::Offset> Bdata = 0, - float epsilon = 0.001f) { - BatchNormBuilder builder_(_fbb); - builder_.add_epsilon(epsilon); - builder_.add_Bdata(Bdata); - builder_.add_Adata(Adata); - builder_.add_biasData(biasData); - builder_.add_varData(varData); - builder_.add_meanData(meanData); - builder_.add_slopeData(slopeData); - builder_.add_channels(channels); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateBatchNormDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t channels = 0, - const std::vector *slopeData = nullptr, - const std::vector *meanData = nullptr, - const std::vector *varData = nullptr, - const std::vector *biasData = nullptr, - const std::vector *Adata = nullptr, - const std::vector *Bdata = nullptr, - float epsilon = 0.001f) { - auto slopeData__ = slopeData ? _fbb.CreateVector(*slopeData) : 0; - auto meanData__ = meanData ? _fbb.CreateVector(*meanData) : 0; - auto varData__ = varData ? _fbb.CreateVector(*varData) : 0; - auto biasData__ = biasData ? _fbb.CreateVector(*biasData) : 0; - auto Adata__ = Adata ? _fbb.CreateVector(*Adata) : 0; - auto Bdata__ = Bdata ? _fbb.CreateVector(*Bdata) : 0; - return MNN::CreateBatchNorm( - _fbb, - channels, - slopeData__, - meanData__, - varData__, - biasData__, - Adata__, - Bdata__, - epsilon); -} - -flatbuffers::Offset CreateBatchNorm(flatbuffers::FlatBufferBuilder &_fbb, const BatchNormT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ScaleT : public flatbuffers::NativeTable { - typedef Scale TableType; - int32_t channels; - std::vector scaleData; - std::vector biasData; - ScaleT() - : channels(0) { - } -}; - -struct Scale FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ScaleT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ScaleTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_CHANNELS = 4, - VT_SCALEDATA = 6, - VT_BIASDATA = 8 - }; - int32_t channels() const { - return GetField(VT_CHANNELS, 0); - } - const flatbuffers::Vector *scaleData() const { - return GetPointer *>(VT_SCALEDATA); - } - const flatbuffers::Vector *biasData() const { - return GetPointer *>(VT_BIASDATA); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_CHANNELS) && - VerifyOffset(verifier, VT_SCALEDATA) && - verifier.VerifyVector(scaleData()) && - VerifyOffset(verifier, VT_BIASDATA) && - verifier.VerifyVector(biasData()) && - verifier.EndTable(); - } - ScaleT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ScaleT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScaleT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ScaleBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_channels(int32_t channels) { - fbb_.AddElement(Scale::VT_CHANNELS, channels, 0); - } - void add_scaleData(flatbuffers::Offset> scaleData) { - fbb_.AddOffset(Scale::VT_SCALEDATA, scaleData); - } - void add_biasData(flatbuffers::Offset> biasData) { - fbb_.AddOffset(Scale::VT_BIASDATA, biasData); - } - explicit ScaleBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ScaleBuilder &operator=(const ScaleBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateScale( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t channels = 0, - flatbuffers::Offset> scaleData = 0, - flatbuffers::Offset> biasData = 0) { - ScaleBuilder builder_(_fbb); - builder_.add_biasData(biasData); - builder_.add_scaleData(scaleData); - builder_.add_channels(channels); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateScaleDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t channels = 0, - const std::vector *scaleData = nullptr, - const std::vector *biasData = nullptr) { - auto scaleData__ = scaleData ? _fbb.CreateVector(*scaleData) : 0; - auto biasData__ = biasData ? _fbb.CreateVector(*biasData) : 0; - return MNN::CreateScale( - _fbb, - channels, - scaleData__, - biasData__); -} - -flatbuffers::Offset CreateScale(flatbuffers::FlatBufferBuilder &_fbb, const ScaleT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct EltwiseT : public flatbuffers::NativeTable { - typedef Eltwise TableType; - EltwiseType type; - std::vector coeff; - EltwiseT() - : type(EltwiseType_PROD) { - } -}; - -struct Eltwise FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef EltwiseT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return EltwiseTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TYPE = 4, - VT_COEFF = 6 - }; - EltwiseType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - const flatbuffers::Vector *coeff() const { - return GetPointer *>(VT_COEFF); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TYPE) && - VerifyOffset(verifier, VT_COEFF) && - verifier.VerifyVector(coeff()) && - verifier.EndTable(); - } - EltwiseT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(EltwiseT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct EltwiseBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_type(EltwiseType type) { - fbb_.AddElement(Eltwise::VT_TYPE, static_cast(type), 0); - } - void add_coeff(flatbuffers::Offset> coeff) { - fbb_.AddOffset(Eltwise::VT_COEFF, coeff); - } - explicit EltwiseBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - EltwiseBuilder &operator=(const EltwiseBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateEltwise( - flatbuffers::FlatBufferBuilder &_fbb, - EltwiseType type = EltwiseType_PROD, - flatbuffers::Offset> coeff = 0) { - EltwiseBuilder builder_(_fbb); - builder_.add_coeff(coeff); - builder_.add_type(type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateEltwiseDirect( - flatbuffers::FlatBufferBuilder &_fbb, - EltwiseType type = EltwiseType_PROD, - const std::vector *coeff = nullptr) { - auto coeff__ = coeff ? _fbb.CreateVector(*coeff) : 0; - return MNN::CreateEltwise( - _fbb, - type, - coeff__); -} - -flatbuffers::Offset CreateEltwise(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FlattenT : public flatbuffers::NativeTable { - typedef Flatten TableType; - int32_t axis; - int32_t endAxis; - FlattenT() - : axis(0), - endAxis(0) { - } -}; - -struct Flatten FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FlattenT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return FlattenTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4, - VT_ENDAXIS = 6 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - int32_t endAxis() const { - return GetField(VT_ENDAXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && - VerifyField(verifier, VT_ENDAXIS) && - verifier.EndTable(); - } - FlattenT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FlattenT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FlattenT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FlattenBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(Flatten::VT_AXIS, axis, 0); - } - void add_endAxis(int32_t endAxis) { - fbb_.AddElement(Flatten::VT_ENDAXIS, endAxis, 0); - } - explicit FlattenBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FlattenBuilder &operator=(const FlattenBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFlatten( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0, - int32_t endAxis = 0) { - FlattenBuilder builder_(_fbb); - builder_.add_endAxis(endAxis); - builder_.add_axis(axis); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFlatten(flatbuffers::FlatBufferBuilder &_fbb, const FlattenT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PermuteT : public flatbuffers::NativeTable { - typedef Permute TableType; - std::vector dims; - PermuteT() { - } -}; - -struct Permute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PermuteT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return PermuteTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DIMS = 4 - }; - const flatbuffers::Vector *dims() const { - return GetPointer *>(VT_DIMS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_DIMS) && - verifier.VerifyVector(dims()) && - verifier.EndTable(); - } - PermuteT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PermuteT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PermuteT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PermuteBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_dims(flatbuffers::Offset> dims) { - fbb_.AddOffset(Permute::VT_DIMS, dims); - } - explicit PermuteBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PermuteBuilder &operator=(const PermuteBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePermute( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> dims = 0) { - PermuteBuilder builder_(_fbb); - builder_.add_dims(dims); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreatePermuteDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *dims = nullptr) { - auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; - return MNN::CreatePermute( - _fbb, - dims__); -} - -flatbuffers::Offset CreatePermute(flatbuffers::FlatBufferBuilder &_fbb, const PermuteT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReshapeT : public flatbuffers::NativeTable { - typedef Reshape TableType; - std::vector dims; - MNN_DATA_FORMAT dimType; - ReshapeT() - : dimType(MNN_DATA_FORMAT_NCHW) { - } -}; - -struct Reshape FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReshapeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ReshapeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DIMS = 4, - VT_DIMTYPE = 6 - }; - const flatbuffers::Vector *dims() const { - return GetPointer *>(VT_DIMS); - } - MNN_DATA_FORMAT dimType() const { - return static_cast(GetField(VT_DIMTYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_DIMS) && - verifier.VerifyVector(dims()) && - VerifyField(verifier, VT_DIMTYPE) && - verifier.EndTable(); - } - ReshapeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReshapeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReshapeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_dims(flatbuffers::Offset> dims) { - fbb_.AddOffset(Reshape::VT_DIMS, dims); - } - void add_dimType(MNN_DATA_FORMAT dimType) { - fbb_.AddElement(Reshape::VT_DIMTYPE, static_cast(dimType), 0); - } - explicit ReshapeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReshapeBuilder &operator=(const ReshapeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReshape( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> dims = 0, - MNN_DATA_FORMAT dimType = MNN_DATA_FORMAT_NCHW) { - ReshapeBuilder builder_(_fbb); - builder_.add_dims(dims); - builder_.add_dimType(dimType); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateReshapeDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *dims = nullptr, - MNN_DATA_FORMAT dimType = MNN_DATA_FORMAT_NCHW) { - auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; - return MNN::CreateReshape( - _fbb, - dims__, - dimType); -} - -flatbuffers::Offset CreateReshape(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DetectionOutputT : public flatbuffers::NativeTable { - typedef DetectionOutput TableType; - int32_t classCount; - float nmsThresholdold; - int32_t nmsTopK; - int32_t keepTopK; - float confidenceThreshold; - int32_t shareLocation; - int32_t backgroundLable; - int32_t varianceEncodedTarget; - int32_t codeType; - float objectnessScore; - DetectionOutputT() - : classCount(0), - nmsThresholdold(0.0f), - nmsTopK(0), - keepTopK(0), - confidenceThreshold(0.0f), - shareLocation(0), - backgroundLable(0), - varianceEncodedTarget(0), - codeType(0), - objectnessScore(0.01f) { - } -}; - -struct DetectionOutput FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DetectionOutputT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return DetectionOutputTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_CLASSCOUNT = 4, - VT_NMSTHRESHOLDOLD = 6, - VT_NMSTOPK = 8, - VT_KEEPTOPK = 10, - VT_CONFIDENCETHRESHOLD = 12, - VT_SHARELOCATION = 14, - VT_BACKGROUNDLABLE = 16, - VT_VARIANCEENCODEDTARGET = 18, - VT_CODETYPE = 20, - VT_OBJECTNESSSCORE = 22 - }; - int32_t classCount() const { - return GetField(VT_CLASSCOUNT, 0); - } - float nmsThresholdold() const { - return GetField(VT_NMSTHRESHOLDOLD, 0.0f); - } - int32_t nmsTopK() const { - return GetField(VT_NMSTOPK, 0); - } - int32_t keepTopK() const { - return GetField(VT_KEEPTOPK, 0); - } - float confidenceThreshold() const { - return GetField(VT_CONFIDENCETHRESHOLD, 0.0f); - } - int32_t shareLocation() const { - return GetField(VT_SHARELOCATION, 0); - } - int32_t backgroundLable() const { - return GetField(VT_BACKGROUNDLABLE, 0); - } - int32_t varianceEncodedTarget() const { - return GetField(VT_VARIANCEENCODEDTARGET, 0); - } - int32_t codeType() const { - return GetField(VT_CODETYPE, 0); - } - float objectnessScore() const { - return GetField(VT_OBJECTNESSSCORE, 0.01f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_CLASSCOUNT) && - VerifyField(verifier, VT_NMSTHRESHOLDOLD) && - VerifyField(verifier, VT_NMSTOPK) && - VerifyField(verifier, VT_KEEPTOPK) && - VerifyField(verifier, VT_CONFIDENCETHRESHOLD) && - VerifyField(verifier, VT_SHARELOCATION) && - VerifyField(verifier, VT_BACKGROUNDLABLE) && - VerifyField(verifier, VT_VARIANCEENCODEDTARGET) && - VerifyField(verifier, VT_CODETYPE) && - VerifyField(verifier, VT_OBJECTNESSSCORE) && - verifier.EndTable(); - } - DetectionOutputT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DetectionOutputT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DetectionOutputT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DetectionOutputBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_classCount(int32_t classCount) { - fbb_.AddElement(DetectionOutput::VT_CLASSCOUNT, classCount, 0); - } - void add_nmsThresholdold(float nmsThresholdold) { - fbb_.AddElement(DetectionOutput::VT_NMSTHRESHOLDOLD, nmsThresholdold, 0.0f); - } - void add_nmsTopK(int32_t nmsTopK) { - fbb_.AddElement(DetectionOutput::VT_NMSTOPK, nmsTopK, 0); - } - void add_keepTopK(int32_t keepTopK) { - fbb_.AddElement(DetectionOutput::VT_KEEPTOPK, keepTopK, 0); - } - void add_confidenceThreshold(float confidenceThreshold) { - fbb_.AddElement(DetectionOutput::VT_CONFIDENCETHRESHOLD, confidenceThreshold, 0.0f); - } - void add_shareLocation(int32_t shareLocation) { - fbb_.AddElement(DetectionOutput::VT_SHARELOCATION, shareLocation, 0); - } - void add_backgroundLable(int32_t backgroundLable) { - fbb_.AddElement(DetectionOutput::VT_BACKGROUNDLABLE, backgroundLable, 0); - } - void add_varianceEncodedTarget(int32_t varianceEncodedTarget) { - fbb_.AddElement(DetectionOutput::VT_VARIANCEENCODEDTARGET, varianceEncodedTarget, 0); - } - void add_codeType(int32_t codeType) { - fbb_.AddElement(DetectionOutput::VT_CODETYPE, codeType, 0); - } - void add_objectnessScore(float objectnessScore) { - fbb_.AddElement(DetectionOutput::VT_OBJECTNESSSCORE, objectnessScore, 0.01f); - } - explicit DetectionOutputBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DetectionOutputBuilder &operator=(const DetectionOutputBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDetectionOutput( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t classCount = 0, - float nmsThresholdold = 0.0f, - int32_t nmsTopK = 0, - int32_t keepTopK = 0, - float confidenceThreshold = 0.0f, - int32_t shareLocation = 0, - int32_t backgroundLable = 0, - int32_t varianceEncodedTarget = 0, - int32_t codeType = 0, - float objectnessScore = 0.01f) { - DetectionOutputBuilder builder_(_fbb); - builder_.add_objectnessScore(objectnessScore); - builder_.add_codeType(codeType); - builder_.add_varianceEncodedTarget(varianceEncodedTarget); - builder_.add_backgroundLable(backgroundLable); - builder_.add_shareLocation(shareLocation); - builder_.add_confidenceThreshold(confidenceThreshold); - builder_.add_keepTopK(keepTopK); - builder_.add_nmsTopK(nmsTopK); - builder_.add_nmsThresholdold(nmsThresholdold); - builder_.add_classCount(classCount); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDetectionOutput(flatbuffers::FlatBufferBuilder &_fbb, const DetectionOutputT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RoiPoolingT : public flatbuffers::NativeTable { - typedef RoiPooling TableType; - int32_t pooledWidth; - int32_t pooledHeight; - float spatialScale; - RoiPoolingT() - : pooledWidth(0), - pooledHeight(0), - spatialScale(0.0f) { - } -}; - -struct RoiPooling FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RoiPoolingT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return RoiPoolingTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_POOLEDWIDTH = 4, - VT_POOLEDHEIGHT = 6, - VT_SPATIALSCALE = 8 - }; - int32_t pooledWidth() const { - return GetField(VT_POOLEDWIDTH, 0); - } - int32_t pooledHeight() const { - return GetField(VT_POOLEDHEIGHT, 0); - } - float spatialScale() const { - return GetField(VT_SPATIALSCALE, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_POOLEDWIDTH) && - VerifyField(verifier, VT_POOLEDHEIGHT) && - VerifyField(verifier, VT_SPATIALSCALE) && - verifier.EndTable(); - } - RoiPoolingT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RoiPoolingT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RoiPoolingT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RoiPoolingBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_pooledWidth(int32_t pooledWidth) { - fbb_.AddElement(RoiPooling::VT_POOLEDWIDTH, pooledWidth, 0); - } - void add_pooledHeight(int32_t pooledHeight) { - fbb_.AddElement(RoiPooling::VT_POOLEDHEIGHT, pooledHeight, 0); - } - void add_spatialScale(float spatialScale) { - fbb_.AddElement(RoiPooling::VT_SPATIALSCALE, spatialScale, 0.0f); - } - explicit RoiPoolingBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RoiPoolingBuilder &operator=(const RoiPoolingBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRoiPooling( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t pooledWidth = 0, - int32_t pooledHeight = 0, - float spatialScale = 0.0f) { - RoiPoolingBuilder builder_(_fbb); - builder_.add_spatialScale(spatialScale); - builder_.add_pooledHeight(pooledHeight); - builder_.add_pooledWidth(pooledWidth); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRoiPooling(flatbuffers::FlatBufferBuilder &_fbb, const RoiPoolingT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ProposalT : public flatbuffers::NativeTable { - typedef Proposal TableType; - int32_t featStride; - int32_t baseSize; - int32_t preNmsTopN; - int32_t afterNmsTopN; - float nmsThreshold; - int32_t minSize; - std::unique_ptr ratios; - std::unique_ptr scales; - std::unique_ptr anchors; - ProposalT() - : featStride(0), - baseSize(0), - preNmsTopN(0), - afterNmsTopN(0), - nmsThreshold(0.0f), - minSize(0) { - } -}; - -struct Proposal FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ProposalT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ProposalTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FEATSTRIDE = 4, - VT_BASESIZE = 6, - VT_PRENMSTOPN = 8, - VT_AFTERNMSTOPN = 10, - VT_NMSTHRESHOLD = 12, - VT_MINSIZE = 14, - VT_RATIOS = 16, - VT_SCALES = 18, - VT_ANCHORS = 20 - }; - int32_t featStride() const { - return GetField(VT_FEATSTRIDE, 0); - } - int32_t baseSize() const { - return GetField(VT_BASESIZE, 0); - } - int32_t preNmsTopN() const { - return GetField(VT_PRENMSTOPN, 0); - } - int32_t afterNmsTopN() const { - return GetField(VT_AFTERNMSTOPN, 0); - } - float nmsThreshold() const { - return GetField(VT_NMSTHRESHOLD, 0.0f); - } - int32_t minSize() const { - return GetField(VT_MINSIZE, 0); - } - const Blob *ratios() const { - return GetPointer(VT_RATIOS); - } - const Blob *scales() const { - return GetPointer(VT_SCALES); - } - const Blob *anchors() const { - return GetPointer(VT_ANCHORS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FEATSTRIDE) && - VerifyField(verifier, VT_BASESIZE) && - VerifyField(verifier, VT_PRENMSTOPN) && - VerifyField(verifier, VT_AFTERNMSTOPN) && - VerifyField(verifier, VT_NMSTHRESHOLD) && - VerifyField(verifier, VT_MINSIZE) && - VerifyOffset(verifier, VT_RATIOS) && - verifier.VerifyTable(ratios()) && - VerifyOffset(verifier, VT_SCALES) && - verifier.VerifyTable(scales()) && - VerifyOffset(verifier, VT_ANCHORS) && - verifier.VerifyTable(anchors()) && - verifier.EndTable(); - } - ProposalT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ProposalT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ProposalT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ProposalBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_featStride(int32_t featStride) { - fbb_.AddElement(Proposal::VT_FEATSTRIDE, featStride, 0); - } - void add_baseSize(int32_t baseSize) { - fbb_.AddElement(Proposal::VT_BASESIZE, baseSize, 0); - } - void add_preNmsTopN(int32_t preNmsTopN) { - fbb_.AddElement(Proposal::VT_PRENMSTOPN, preNmsTopN, 0); - } - void add_afterNmsTopN(int32_t afterNmsTopN) { - fbb_.AddElement(Proposal::VT_AFTERNMSTOPN, afterNmsTopN, 0); - } - void add_nmsThreshold(float nmsThreshold) { - fbb_.AddElement(Proposal::VT_NMSTHRESHOLD, nmsThreshold, 0.0f); - } - void add_minSize(int32_t minSize) { - fbb_.AddElement(Proposal::VT_MINSIZE, minSize, 0); - } - void add_ratios(flatbuffers::Offset ratios) { - fbb_.AddOffset(Proposal::VT_RATIOS, ratios); - } - void add_scales(flatbuffers::Offset scales) { - fbb_.AddOffset(Proposal::VT_SCALES, scales); - } - void add_anchors(flatbuffers::Offset anchors) { - fbb_.AddOffset(Proposal::VT_ANCHORS, anchors); - } - explicit ProposalBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ProposalBuilder &operator=(const ProposalBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateProposal( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t featStride = 0, - int32_t baseSize = 0, - int32_t preNmsTopN = 0, - int32_t afterNmsTopN = 0, - float nmsThreshold = 0.0f, - int32_t minSize = 0, - flatbuffers::Offset ratios = 0, - flatbuffers::Offset scales = 0, - flatbuffers::Offset anchors = 0) { - ProposalBuilder builder_(_fbb); - builder_.add_anchors(anchors); - builder_.add_scales(scales); - builder_.add_ratios(ratios); - builder_.add_minSize(minSize); - builder_.add_nmsThreshold(nmsThreshold); - builder_.add_afterNmsTopN(afterNmsTopN); - builder_.add_preNmsTopN(preNmsTopN); - builder_.add_baseSize(baseSize); - builder_.add_featStride(featStride); - return builder_.Finish(); -} - -flatbuffers::Offset CreateProposal(flatbuffers::FlatBufferBuilder &_fbb, const ProposalT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct InterpT : public flatbuffers::NativeTable { - typedef Interp TableType; - float widthScale; - float heightScale; - int32_t outputWidth; - int32_t outputHeight; - int32_t resizeType; - bool alignCorners; - InterpT() - : widthScale(0.0f), - heightScale(0.0f), - outputWidth(0), - outputHeight(0), - resizeType(0), - alignCorners(false) { - } -}; - -struct Interp FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef InterpT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return InterpTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_WIDTHSCALE = 4, - VT_HEIGHTSCALE = 6, - VT_OUTPUTWIDTH = 8, - VT_OUTPUTHEIGHT = 10, - VT_RESIZETYPE = 12, - VT_ALIGNCORNERS = 14 - }; - float widthScale() const { - return GetField(VT_WIDTHSCALE, 0.0f); - } - float heightScale() const { - return GetField(VT_HEIGHTSCALE, 0.0f); - } - int32_t outputWidth() const { - return GetField(VT_OUTPUTWIDTH, 0); - } - int32_t outputHeight() const { - return GetField(VT_OUTPUTHEIGHT, 0); - } - int32_t resizeType() const { - return GetField(VT_RESIZETYPE, 0); - } - bool alignCorners() const { - return GetField(VT_ALIGNCORNERS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_WIDTHSCALE) && - VerifyField(verifier, VT_HEIGHTSCALE) && - VerifyField(verifier, VT_OUTPUTWIDTH) && - VerifyField(verifier, VT_OUTPUTHEIGHT) && - VerifyField(verifier, VT_RESIZETYPE) && - VerifyField(verifier, VT_ALIGNCORNERS) && - verifier.EndTable(); - } - InterpT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(InterpT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const InterpT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct InterpBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_widthScale(float widthScale) { - fbb_.AddElement(Interp::VT_WIDTHSCALE, widthScale, 0.0f); - } - void add_heightScale(float heightScale) { - fbb_.AddElement(Interp::VT_HEIGHTSCALE, heightScale, 0.0f); - } - void add_outputWidth(int32_t outputWidth) { - fbb_.AddElement(Interp::VT_OUTPUTWIDTH, outputWidth, 0); - } - void add_outputHeight(int32_t outputHeight) { - fbb_.AddElement(Interp::VT_OUTPUTHEIGHT, outputHeight, 0); - } - void add_resizeType(int32_t resizeType) { - fbb_.AddElement(Interp::VT_RESIZETYPE, resizeType, 0); - } - void add_alignCorners(bool alignCorners) { - fbb_.AddElement(Interp::VT_ALIGNCORNERS, static_cast(alignCorners), 0); - } - explicit InterpBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - InterpBuilder &operator=(const InterpBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateInterp( - flatbuffers::FlatBufferBuilder &_fbb, - float widthScale = 0.0f, - float heightScale = 0.0f, - int32_t outputWidth = 0, - int32_t outputHeight = 0, - int32_t resizeType = 0, - bool alignCorners = false) { - InterpBuilder builder_(_fbb); - builder_.add_resizeType(resizeType); - builder_.add_outputHeight(outputHeight); - builder_.add_outputWidth(outputWidth); - builder_.add_heightScale(heightScale); - builder_.add_widthScale(widthScale); - builder_.add_alignCorners(alignCorners); - return builder_.Finish(); -} - -flatbuffers::Offset CreateInterp(flatbuffers::FlatBufferBuilder &_fbb, const InterpT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ResizeT : public flatbuffers::NativeTable { - typedef Resize TableType; - float xScale; - float yScale; - ResizeT() - : xScale(0.0f), - yScale(0.0f) { - } -}; - -struct Resize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ResizeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ResizeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_XSCALE = 4, - VT_YSCALE = 6 - }; - float xScale() const { - return GetField(VT_XSCALE, 0.0f); - } - float yScale() const { - return GetField(VT_YSCALE, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_XSCALE) && - VerifyField(verifier, VT_YSCALE) && - verifier.EndTable(); - } - ResizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ResizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ResizeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_xScale(float xScale) { - fbb_.AddElement(Resize::VT_XSCALE, xScale, 0.0f); - } - void add_yScale(float yScale) { - fbb_.AddElement(Resize::VT_YSCALE, yScale, 0.0f); - } - explicit ResizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ResizeBuilder &operator=(const ResizeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateResize( - flatbuffers::FlatBufferBuilder &_fbb, - float xScale = 0.0f, - float yScale = 0.0f) { - ResizeBuilder builder_(_fbb); - builder_.add_yScale(yScale); - builder_.add_xScale(xScale); - return builder_.Finish(); -} - -flatbuffers::Offset CreateResize(flatbuffers::FlatBufferBuilder &_fbb, const ResizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PriorBoxT : public flatbuffers::NativeTable { - typedef PriorBox TableType; - std::vector minSizes; - std::vector maxSizes; - std::vector aspectRatios; - std::vector variances; - bool flip; - bool clip; - int32_t imageWidth; - int32_t imageHeight; - int32_t stepWidth; - int32_t stepHeight; - float offset; - PriorBoxT() - : flip(false), - clip(false), - imageWidth(0), - imageHeight(0), - stepWidth(0), - stepHeight(0), - offset(0.0f) { - } -}; - -struct PriorBox FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PriorBoxT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return PriorBoxTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MINSIZES = 4, - VT_MAXSIZES = 6, - VT_ASPECTRATIOS = 8, - VT_VARIANCES = 10, - VT_FLIP = 12, - VT_CLIP = 14, - VT_IMAGEWIDTH = 16, - VT_IMAGEHEIGHT = 18, - VT_STEPWIDTH = 20, - VT_STEPHEIGHT = 22, - VT_OFFSET = 24 - }; - const flatbuffers::Vector *minSizes() const { - return GetPointer *>(VT_MINSIZES); - } - const flatbuffers::Vector *maxSizes() const { - return GetPointer *>(VT_MAXSIZES); - } - const flatbuffers::Vector *aspectRatios() const { - return GetPointer *>(VT_ASPECTRATIOS); - } - const flatbuffers::Vector *variances() const { - return GetPointer *>(VT_VARIANCES); - } - bool flip() const { - return GetField(VT_FLIP, 0) != 0; - } - bool clip() const { - return GetField(VT_CLIP, 0) != 0; - } - int32_t imageWidth() const { - return GetField(VT_IMAGEWIDTH, 0); - } - int32_t imageHeight() const { - return GetField(VT_IMAGEHEIGHT, 0); - } - int32_t stepWidth() const { - return GetField(VT_STEPWIDTH, 0); - } - int32_t stepHeight() const { - return GetField(VT_STEPHEIGHT, 0); - } - float offset() const { - return GetField(VT_OFFSET, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_MINSIZES) && - verifier.VerifyVector(minSizes()) && - VerifyOffset(verifier, VT_MAXSIZES) && - verifier.VerifyVector(maxSizes()) && - VerifyOffset(verifier, VT_ASPECTRATIOS) && - verifier.VerifyVector(aspectRatios()) && - VerifyOffset(verifier, VT_VARIANCES) && - verifier.VerifyVector(variances()) && - VerifyField(verifier, VT_FLIP) && - VerifyField(verifier, VT_CLIP) && - VerifyField(verifier, VT_IMAGEWIDTH) && - VerifyField(verifier, VT_IMAGEHEIGHT) && - VerifyField(verifier, VT_STEPWIDTH) && - VerifyField(verifier, VT_STEPHEIGHT) && - VerifyField(verifier, VT_OFFSET) && - verifier.EndTable(); - } - PriorBoxT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PriorBoxT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PriorBoxT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PriorBoxBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_minSizes(flatbuffers::Offset> minSizes) { - fbb_.AddOffset(PriorBox::VT_MINSIZES, minSizes); - } - void add_maxSizes(flatbuffers::Offset> maxSizes) { - fbb_.AddOffset(PriorBox::VT_MAXSIZES, maxSizes); - } - void add_aspectRatios(flatbuffers::Offset> aspectRatios) { - fbb_.AddOffset(PriorBox::VT_ASPECTRATIOS, aspectRatios); - } - void add_variances(flatbuffers::Offset> variances) { - fbb_.AddOffset(PriorBox::VT_VARIANCES, variances); - } - void add_flip(bool flip) { - fbb_.AddElement(PriorBox::VT_FLIP, static_cast(flip), 0); - } - void add_clip(bool clip) { - fbb_.AddElement(PriorBox::VT_CLIP, static_cast(clip), 0); - } - void add_imageWidth(int32_t imageWidth) { - fbb_.AddElement(PriorBox::VT_IMAGEWIDTH, imageWidth, 0); - } - void add_imageHeight(int32_t imageHeight) { - fbb_.AddElement(PriorBox::VT_IMAGEHEIGHT, imageHeight, 0); - } - void add_stepWidth(int32_t stepWidth) { - fbb_.AddElement(PriorBox::VT_STEPWIDTH, stepWidth, 0); - } - void add_stepHeight(int32_t stepHeight) { - fbb_.AddElement(PriorBox::VT_STEPHEIGHT, stepHeight, 0); - } - void add_offset(float offset) { - fbb_.AddElement(PriorBox::VT_OFFSET, offset, 0.0f); - } - explicit PriorBoxBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PriorBoxBuilder &operator=(const PriorBoxBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePriorBox( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> minSizes = 0, - flatbuffers::Offset> maxSizes = 0, - flatbuffers::Offset> aspectRatios = 0, - flatbuffers::Offset> variances = 0, - bool flip = false, - bool clip = false, - int32_t imageWidth = 0, - int32_t imageHeight = 0, - int32_t stepWidth = 0, - int32_t stepHeight = 0, - float offset = 0.0f) { - PriorBoxBuilder builder_(_fbb); - builder_.add_offset(offset); - builder_.add_stepHeight(stepHeight); - builder_.add_stepWidth(stepWidth); - builder_.add_imageHeight(imageHeight); - builder_.add_imageWidth(imageWidth); - builder_.add_variances(variances); - builder_.add_aspectRatios(aspectRatios); - builder_.add_maxSizes(maxSizes); - builder_.add_minSizes(minSizes); - builder_.add_clip(clip); - builder_.add_flip(flip); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreatePriorBoxDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *minSizes = nullptr, - const std::vector *maxSizes = nullptr, - const std::vector *aspectRatios = nullptr, - const std::vector *variances = nullptr, - bool flip = false, - bool clip = false, - int32_t imageWidth = 0, - int32_t imageHeight = 0, - int32_t stepWidth = 0, - int32_t stepHeight = 0, - float offset = 0.0f) { - auto minSizes__ = minSizes ? _fbb.CreateVector(*minSizes) : 0; - auto maxSizes__ = maxSizes ? _fbb.CreateVector(*maxSizes) : 0; - auto aspectRatios__ = aspectRatios ? _fbb.CreateVector(*aspectRatios) : 0; - auto variances__ = variances ? _fbb.CreateVector(*variances) : 0; - return MNN::CreatePriorBox( - _fbb, - minSizes__, - maxSizes__, - aspectRatios__, - variances__, - flip, - clip, - imageWidth, - imageHeight, - stepWidth, - stepHeight, - offset); -} - -flatbuffers::Offset CreatePriorBox(flatbuffers::FlatBufferBuilder &_fbb, const PriorBoxT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NormalizeT : public flatbuffers::NativeTable { - typedef Normalize TableType; - int32_t acrossSpatial; - int32_t channelShared; - float eps; - std::vector scale; - NormalizeT() - : acrossSpatial(0), - channelShared(0), - eps(0.0f) { - } -}; - -struct Normalize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NormalizeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return NormalizeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ACROSSSPATIAL = 4, - VT_CHANNELSHARED = 6, - VT_EPS = 8, - VT_SCALE = 10 - }; - int32_t acrossSpatial() const { - return GetField(VT_ACROSSSPATIAL, 0); - } - int32_t channelShared() const { - return GetField(VT_CHANNELSHARED, 0); - } - float eps() const { - return GetField(VT_EPS, 0.0f); - } - const flatbuffers::Vector *scale() const { - return GetPointer *>(VT_SCALE); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ACROSSSPATIAL) && - VerifyField(verifier, VT_CHANNELSHARED) && - VerifyField(verifier, VT_EPS) && - VerifyOffset(verifier, VT_SCALE) && - verifier.VerifyVector(scale()) && - verifier.EndTable(); - } - NormalizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NormalizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NormalizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NormalizeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_acrossSpatial(int32_t acrossSpatial) { - fbb_.AddElement(Normalize::VT_ACROSSSPATIAL, acrossSpatial, 0); - } - void add_channelShared(int32_t channelShared) { - fbb_.AddElement(Normalize::VT_CHANNELSHARED, channelShared, 0); - } - void add_eps(float eps) { - fbb_.AddElement(Normalize::VT_EPS, eps, 0.0f); - } - void add_scale(flatbuffers::Offset> scale) { - fbb_.AddOffset(Normalize::VT_SCALE, scale); - } - explicit NormalizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NormalizeBuilder &operator=(const NormalizeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNormalize( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t acrossSpatial = 0, - int32_t channelShared = 0, - float eps = 0.0f, - flatbuffers::Offset> scale = 0) { - NormalizeBuilder builder_(_fbb); - builder_.add_scale(scale); - builder_.add_eps(eps); - builder_.add_channelShared(channelShared); - builder_.add_acrossSpatial(acrossSpatial); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateNormalizeDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t acrossSpatial = 0, - int32_t channelShared = 0, - float eps = 0.0f, - const std::vector *scale = nullptr) { - auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; - return MNN::CreateNormalize( - _fbb, - acrossSpatial, - channelShared, - eps, - scale__); -} - -flatbuffers::Offset CreateNormalize(flatbuffers::FlatBufferBuilder &_fbb, const NormalizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct EltwiseInt8T : public flatbuffers::NativeTable { - typedef EltwiseInt8 TableType; - EltwiseType type; - std::unique_ptr inputQuan0; - std::unique_ptr inputQuan1; - std::unique_ptr outputQuan; - EltwiseInt8T() - : type(EltwiseType_PROD) { - } -}; - -struct EltwiseInt8 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef EltwiseInt8T NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return EltwiseInt8TypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TYPE = 4, - VT_INPUTQUAN0 = 6, - VT_INPUTQUAN1 = 8, - VT_OUTPUTQUAN = 10 - }; - EltwiseType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - const QuantizedFloatParam *inputQuan0() const { - return GetPointer(VT_INPUTQUAN0); - } - const QuantizedFloatParam *inputQuan1() const { - return GetPointer(VT_INPUTQUAN1); - } - const QuantizedFloatParam *outputQuan() const { - return GetPointer(VT_OUTPUTQUAN); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TYPE) && - VerifyOffset(verifier, VT_INPUTQUAN0) && - verifier.VerifyTable(inputQuan0()) && - VerifyOffset(verifier, VT_INPUTQUAN1) && - verifier.VerifyTable(inputQuan1()) && - VerifyOffset(verifier, VT_OUTPUTQUAN) && - verifier.VerifyTable(outputQuan()) && - verifier.EndTable(); - } - EltwiseInt8T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(EltwiseInt8T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseInt8T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct EltwiseInt8Builder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_type(EltwiseType type) { - fbb_.AddElement(EltwiseInt8::VT_TYPE, static_cast(type), 0); - } - void add_inputQuan0(flatbuffers::Offset inputQuan0) { - fbb_.AddOffset(EltwiseInt8::VT_INPUTQUAN0, inputQuan0); - } - void add_inputQuan1(flatbuffers::Offset inputQuan1) { - fbb_.AddOffset(EltwiseInt8::VT_INPUTQUAN1, inputQuan1); - } - void add_outputQuan(flatbuffers::Offset outputQuan) { - fbb_.AddOffset(EltwiseInt8::VT_OUTPUTQUAN, outputQuan); - } - explicit EltwiseInt8Builder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - EltwiseInt8Builder &operator=(const EltwiseInt8Builder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateEltwiseInt8( - flatbuffers::FlatBufferBuilder &_fbb, - EltwiseType type = EltwiseType_PROD, - flatbuffers::Offset inputQuan0 = 0, - flatbuffers::Offset inputQuan1 = 0, - flatbuffers::Offset outputQuan = 0) { - EltwiseInt8Builder builder_(_fbb); - builder_.add_outputQuan(outputQuan); - builder_.add_inputQuan1(inputQuan1); - builder_.add_inputQuan0(inputQuan0); - builder_.add_type(type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateEltwiseInt8(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseInt8T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -inline Convolution2DCommonT *Convolution2DCommon::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Convolution2DCommonT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Convolution2DCommon::UnPackTo(Convolution2DCommonT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padX(); _o->padX = _e; }; - { auto _e = padY(); _o->padY = _e; }; - { auto _e = kernelX(); _o->kernelX = _e; }; - { auto _e = kernelY(); _o->kernelY = _e; }; - { auto _e = strideX(); _o->strideX = _e; }; - { auto _e = strideY(); _o->strideY = _e; }; - { auto _e = dilateX(); _o->dilateX = _e; }; - { auto _e = dilateY(); _o->dilateY = _e; }; - { auto _e = padMode(); _o->padMode = _e; }; - { auto _e = group(); _o->group = _e; }; - { auto _e = outputCount(); _o->outputCount = _e; }; - { auto _e = inputCount(); _o->inputCount = _e; }; - { auto _e = relu(); _o->relu = _e; }; - { auto _e = relu6(); _o->relu6 = _e; }; -} - -inline flatbuffers::Offset Convolution2DCommon::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DCommonT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConvolution2DCommon(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConvolution2DCommon(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DCommonT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Convolution2DCommonT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padX = _o->padX; - auto _padY = _o->padY; - auto _kernelX = _o->kernelX; - auto _kernelY = _o->kernelY; - auto _strideX = _o->strideX; - auto _strideY = _o->strideY; - auto _dilateX = _o->dilateX; - auto _dilateY = _o->dilateY; - auto _padMode = _o->padMode; - auto _group = _o->group; - auto _outputCount = _o->outputCount; - auto _inputCount = _o->inputCount; - auto _relu = _o->relu; - auto _relu6 = _o->relu6; - return MNN::CreateConvolution2DCommon( - _fbb, - _padX, - _padY, - _kernelX, - _kernelY, - _strideX, - _strideY, - _dilateX, - _dilateY, - _padMode, - _group, - _outputCount, - _inputCount, - _relu, - _relu6); -} - -inline Convolution3DCommonT *Convolution3DCommon::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Convolution3DCommonT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Convolution3DCommon::UnPackTo(Convolution3DCommonT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = dilates(); if (_e) { _o->dilates.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dilates[_i] = _e->Get(_i); } } }; - { auto _e = strides(); if (_e) { _o->strides.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->strides[_i] = _e->Get(_i); } } }; - { auto _e = kernels(); if (_e) { _o->kernels.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->kernels[_i] = _e->Get(_i); } } }; - { auto _e = pads(); if (_e) { _o->pads.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->pads[_i] = _e->Get(_i); } } }; - { auto _e = padMode(); _o->padMode = _e; }; - { auto _e = inputCount(); _o->inputCount = _e; }; - { auto _e = outputCount(); _o->outputCount = _e; }; - { auto _e = relu(); _o->relu = _e; }; - { auto _e = relu6(); _o->relu6 = _e; }; -} - -inline flatbuffers::Offset Convolution3DCommon::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DCommonT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConvolution3DCommon(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConvolution3DCommon(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DCommonT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Convolution3DCommonT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _dilates = _o->dilates.size() ? _fbb.CreateVector(_o->dilates) : 0; - auto _strides = _o->strides.size() ? _fbb.CreateVector(_o->strides) : 0; - auto _kernels = _o->kernels.size() ? _fbb.CreateVector(_o->kernels) : 0; - auto _pads = _o->pads.size() ? _fbb.CreateVector(_o->pads) : 0; - auto _padMode = _o->padMode; - auto _inputCount = _o->inputCount; - auto _outputCount = _o->outputCount; - auto _relu = _o->relu; - auto _relu6 = _o->relu6; - return MNN::CreateConvolution3DCommon( - _fbb, - _dilates, - _strides, - _kernels, - _pads, - _padMode, - _inputCount, - _outputCount, - _relu, - _relu6); -} - -inline IDSTQuanT *IDSTQuan::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new IDSTQuanT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void IDSTQuan::UnPackTo(IDSTQuanT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = buffer(); if (_e) { _o->buffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->buffer[_i] = _e->Get(_i); } } }; - { auto _e = alpha(); if (_e) { _o->alpha.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->alpha[_i] = _e->Get(_i); } } }; - { auto _e = type(); _o->type = _e; }; - { auto _e = useInt32(); _o->useInt32 = _e; }; - { auto _e = quantScale(); _o->quantScale = _e; }; - { auto _e = scaleIn(); _o->scaleIn = _e; }; - { auto _e = scaleOut(); _o->scaleOut = _e; }; - { auto _e = aMax(); _o->aMax = _e; }; - { auto _e = aMin(); _o->aMin = _e; }; - { auto _e = readType(); _o->readType = _e; }; - { auto _e = has_scaleInt(); _o->has_scaleInt = _e; }; -} - -inline flatbuffers::Offset IDSTQuan::Pack(flatbuffers::FlatBufferBuilder &_fbb, const IDSTQuanT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateIDSTQuan(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateIDSTQuan(flatbuffers::FlatBufferBuilder &_fbb, const IDSTQuanT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const IDSTQuanT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _buffer = _o->buffer.size() ? _fbb.CreateVector(_o->buffer) : 0; - auto _alpha = _o->alpha.size() ? _fbb.CreateVector(_o->alpha) : 0; - auto _type = _o->type; - auto _useInt32 = _o->useInt32; - auto _quantScale = _o->quantScale; - auto _scaleIn = _o->scaleIn; - auto _scaleOut = _o->scaleOut; - auto _aMax = _o->aMax; - auto _aMin = _o->aMin; - auto _readType = _o->readType; - auto _has_scaleInt = _o->has_scaleInt; - return MNN::CreateIDSTQuan( - _fbb, - _buffer, - _alpha, - _type, - _useInt32, - _quantScale, - _scaleIn, - _scaleOut, - _aMax, - _aMin, - _readType, - _has_scaleInt); -} - -inline QuantizedFloatParamT *QuantizedFloatParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedFloatParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedFloatParam::UnPackTo(QuantizedFloatParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } }; - { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; - { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } }; - { auto _e = tensorScale(); if (_e) { _o->tensorScale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->tensorScale[_i] = _e->Get(_i); } } }; -} - -inline flatbuffers::Offset QuantizedFloatParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedFloatParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedFloatParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedFloatParam(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedFloatParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedFloatParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0; - auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; - auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0; - auto _tensorScale = _o->tensorScale.size() ? _fbb.CreateVector(_o->tensorScale) : 0; - return MNN::CreateQuantizedFloatParam( - _fbb, - _weight, - _bias, - _scale, - _tensorScale); -} - -inline Convolution2DT *Convolution2D::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Convolution2DT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Convolution2D::UnPackTo(Convolution2DT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = common(); if (_e) _o->common = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } }; - { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; - { auto _e = quanParameter(); if (_e) _o->quanParameter = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = symmetricQuan(); if (_e) _o->symmetricQuan = std::unique_ptr(_e->UnPack(_resolver)); }; -} - -inline flatbuffers::Offset Convolution2D::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConvolution2D(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConvolution2D(flatbuffers::FlatBufferBuilder &_fbb, const Convolution2DT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Convolution2DT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _common = _o->common ? CreateConvolution2DCommon(_fbb, _o->common.get(), _rehasher) : 0; - auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0; - auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; - auto _quanParameter = _o->quanParameter ? CreateIDSTQuan(_fbb, _o->quanParameter.get(), _rehasher) : 0; - auto _symmetricQuan = _o->symmetricQuan ? CreateQuantizedFloatParam(_fbb, _o->symmetricQuan.get(), _rehasher) : 0; - return MNN::CreateConvolution2D( - _fbb, - _common, - _weight, - _bias, - _quanParameter, - _symmetricQuan); -} - -inline Convolution3DT *Convolution3D::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Convolution3DT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Convolution3D::UnPackTo(Convolution3DT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = common(); if (_e) _o->common = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } }; - { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; -} - -inline flatbuffers::Offset Convolution3D::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConvolution3D(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConvolution3D(flatbuffers::FlatBufferBuilder &_fbb, const Convolution3DT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Convolution3DT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _common = _o->common ? CreateConvolution3DCommon(_fbb, _o->common.get(), _rehasher) : 0; - auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0; - auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; - return MNN::CreateConvolution3D( - _fbb, - _common, - _weight, - _bias); -} - -inline InnerProductT *InnerProduct::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new InnerProductT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void InnerProduct::UnPackTo(InnerProductT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = outputCount(); _o->outputCount = _e; }; - { auto _e = biasTerm(); _o->biasTerm = _e; }; - { auto _e = weightSize(); _o->weightSize = _e; }; - { auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } }; - { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; - { auto _e = axis(); _o->axis = _e; }; - { auto _e = transpose(); _o->transpose = _e; }; - { auto _e = quanParameter(); if (_e) _o->quanParameter = std::unique_ptr(_e->UnPack(_resolver)); }; -} - -inline flatbuffers::Offset InnerProduct::Pack(flatbuffers::FlatBufferBuilder &_fbb, const InnerProductT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateInnerProduct(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateInnerProduct(flatbuffers::FlatBufferBuilder &_fbb, const InnerProductT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const InnerProductT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _outputCount = _o->outputCount; - auto _biasTerm = _o->biasTerm; - auto _weightSize = _o->weightSize; - auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0; - auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; - auto _axis = _o->axis; - auto _transpose = _o->transpose; - auto _quanParameter = _o->quanParameter ? CreateIDSTQuan(_fbb, _o->quanParameter.get(), _rehasher) : 0; - return MNN::CreateInnerProduct( - _fbb, - _outputCount, - _biasTerm, - _weightSize, - _weight, - _bias, - _axis, - _transpose, - _quanParameter); -} - -inline PoolT *Pool::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PoolT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Pool::UnPackTo(PoolT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padX(); _o->padX = _e; }; - { auto _e = padY(); _o->padY = _e; }; - { auto _e = isGlobal(); _o->isGlobal = _e; }; - { auto _e = kernelX(); _o->kernelX = _e; }; - { auto _e = kernelY(); _o->kernelY = _e; }; - { auto _e = strideX(); _o->strideX = _e; }; - { auto _e = strideY(); _o->strideY = _e; }; - { auto _e = type(); _o->type = _e; }; - { auto _e = padType(); _o->padType = _e; }; - { auto _e = dataType(); _o->dataType = _e; }; - { auto _e = ceilModel(); _o->ceilModel = _e; }; -} - -inline flatbuffers::Offset Pool::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PoolT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePool(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePool(flatbuffers::FlatBufferBuilder &_fbb, const PoolT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PoolT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padX = _o->padX; - auto _padY = _o->padY; - auto _isGlobal = _o->isGlobal; - auto _kernelX = _o->kernelX; - auto _kernelY = _o->kernelY; - auto _strideX = _o->strideX; - auto _strideY = _o->strideY; - auto _type = _o->type; - auto _padType = _o->padType; - auto _dataType = _o->dataType; - auto _ceilModel = _o->ceilModel; - return MNN::CreatePool( - _fbb, - _padX, - _padY, - _isGlobal, - _kernelX, - _kernelY, - _strideX, - _strideY, - _type, - _padType, - _dataType, - _ceilModel); -} - -inline Pool3DT *Pool3D::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Pool3DT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Pool3D::UnPackTo(Pool3DT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = strides(); if (_e) { _o->strides.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->strides[_i] = _e->Get(_i); } } }; - { auto _e = kernels(); if (_e) { _o->kernels.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->kernels[_i] = _e->Get(_i); } } }; - { auto _e = pads(); if (_e) { _o->pads.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->pads[_i] = _e->Get(_i); } } }; - { auto _e = type(); _o->type = _e; }; - { auto _e = padType(); _o->padType = _e; }; -} - -inline flatbuffers::Offset Pool3D::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool3DT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePool3D(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePool3D(flatbuffers::FlatBufferBuilder &_fbb, const Pool3DT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Pool3DT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _strides = _o->strides.size() ? _fbb.CreateVector(_o->strides) : 0; - auto _kernels = _o->kernels.size() ? _fbb.CreateVector(_o->kernels) : 0; - auto _pads = _o->pads.size() ? _fbb.CreateVector(_o->pads) : 0; - auto _type = _o->type; - auto _padType = _o->padType; - return MNN::CreatePool3D( - _fbb, - _strides, - _kernels, - _pads, - _type, - _padType); -} - -inline ReluT *Relu::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReluT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Relu::UnPackTo(ReluT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = slope(); _o->slope = _e; }; -} - -inline flatbuffers::Offset Relu::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReluT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRelu(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRelu(flatbuffers::FlatBufferBuilder &_fbb, const ReluT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReluT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _slope = _o->slope; - return MNN::CreateRelu( - _fbb, - _slope); -} - -inline Relu6T *Relu6::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Relu6T(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Relu6::UnPackTo(Relu6T *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = slope(); _o->slope = _e; }; -} - -inline flatbuffers::Offset Relu6::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Relu6T* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRelu6(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRelu6(flatbuffers::FlatBufferBuilder &_fbb, const Relu6T *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Relu6T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _slope = _o->slope; - return MNN::CreateRelu6( - _fbb, - _slope); -} - -inline PReluT *PRelu::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PReluT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void PRelu::UnPackTo(PReluT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = slopeCount(); _o->slopeCount = _e; }; - { auto _e = slope(); if (_e) { _o->slope.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->slope[_i] = _e->Get(_i); } } }; -} - -inline flatbuffers::Offset PRelu::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PReluT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePRelu(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePRelu(flatbuffers::FlatBufferBuilder &_fbb, const PReluT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PReluT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _slopeCount = _o->slopeCount; - auto _slope = _o->slope.size() ? _fbb.CreateVector(_o->slope) : 0; - return MNN::CreatePRelu( - _fbb, - _slopeCount, - _slope); -} - -inline ELUT *ELU::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ELUT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ELU::UnPackTo(ELUT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = alpha(); _o->alpha = _e; }; -} - -inline flatbuffers::Offset ELU::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ELUT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateELU(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateELU(flatbuffers::FlatBufferBuilder &_fbb, const ELUT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ELUT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _alpha = _o->alpha; - return MNN::CreateELU( - _fbb, - _alpha); -} - -inline LRNT *LRN::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LRNT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LRN::UnPackTo(LRNT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = regionType(); _o->regionType = _e; }; - { auto _e = localSize(); _o->localSize = _e; }; - { auto _e = alpha(); _o->alpha = _e; }; - { auto _e = beta(); _o->beta = _e; }; -} - -inline flatbuffers::Offset LRN::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LRNT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLRN(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLRN(flatbuffers::FlatBufferBuilder &_fbb, const LRNT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LRNT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _regionType = _o->regionType; - auto _localSize = _o->localSize; - auto _alpha = _o->alpha; - auto _beta = _o->beta; - return MNN::CreateLRN( - _fbb, - _regionType, - _localSize, - _alpha, - _beta); -} - -inline ArgMaxT *ArgMax::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ArgMaxT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ArgMax::UnPackTo(ArgMaxT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = outMaxVal(); _o->outMaxVal = _e; }; - { auto _e = topK(); _o->topK = _e; }; - { auto _e = axis(); _o->axis = _e; }; - { auto _e = softmaxThreshold(); _o->softmaxThreshold = _e; }; -} - -inline flatbuffers::Offset ArgMax::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateArgMax(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateArgMax(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMaxT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _outMaxVal = _o->outMaxVal; - auto _topK = _o->topK; - auto _axis = _o->axis; - auto _softmaxThreshold = _o->softmaxThreshold; - return MNN::CreateArgMax( - _fbb, - _outMaxVal, - _topK, - _axis, - _softmaxThreshold); -} - -inline AxisT *Axis::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AxisT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Axis::UnPackTo(AxisT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; }; -} - -inline flatbuffers::Offset Axis::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AxisT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAxis(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAxis(flatbuffers::FlatBufferBuilder &_fbb, const AxisT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AxisT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - return MNN::CreateAxis( - _fbb, - _axis); -} - -inline InputT *Input::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new InputT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Input::UnPackTo(InputT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = dims(); if (_e) { _o->dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dims[_i] = _e->Get(_i); } } }; - { auto _e = dtype(); _o->dtype = _e; }; - { auto _e = dformat(); _o->dformat = _e; }; -} - -inline flatbuffers::Offset Input::Pack(flatbuffers::FlatBufferBuilder &_fbb, const InputT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateInput(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateInput(flatbuffers::FlatBufferBuilder &_fbb, const InputT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const InputT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _dims = _o->dims.size() ? _fbb.CreateVector(_o->dims) : 0; - auto _dtype = _o->dtype; - auto _dformat = _o->dformat; - return MNN::CreateInput( - _fbb, - _dims, - _dtype, - _dformat); -} - -inline LSTMT *LSTM::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LSTMT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LSTM::UnPackTo(LSTMT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = outputCount(); _o->outputCount = _e; }; - { auto _e = weightSize(); _o->weightSize = _e; }; - { auto _e = clippingThreshold(); _o->clippingThreshold = _e; }; - { auto _e = weightI(); if (_e) _o->weightI = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = weightH(); if (_e) _o->weightH = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = bias(); if (_e) _o->bias = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = weightIQ(); if (_e) _o->weightIQ = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = weightIA(); if (_e) _o->weightIA = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = quantScale(); _o->quantScale = _e; }; -} - -inline flatbuffers::Offset LSTM::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLSTM(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLSTM(flatbuffers::FlatBufferBuilder &_fbb, const LSTMT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSTMT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _outputCount = _o->outputCount; - auto _weightSize = _o->weightSize; - auto _clippingThreshold = _o->clippingThreshold; - auto _weightI = _o->weightI ? CreateBlob(_fbb, _o->weightI.get(), _rehasher) : 0; - auto _weightH = _o->weightH ? CreateBlob(_fbb, _o->weightH.get(), _rehasher) : 0; - auto _bias = _o->bias ? CreateBlob(_fbb, _o->bias.get(), _rehasher) : 0; - auto _weightIQ = _o->weightIQ ? CreateBlob(_fbb, _o->weightIQ.get(), _rehasher) : 0; - auto _weightIA = _o->weightIA ? CreateBlob(_fbb, _o->weightIA.get(), _rehasher) : 0; - auto _quantScale = _o->quantScale; - return MNN::CreateLSTM( - _fbb, - _outputCount, - _weightSize, - _clippingThreshold, - _weightI, - _weightH, - _bias, - _weightIQ, - _weightIA, - _quantScale); -} - -inline SliceT *Slice::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SliceT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Slice::UnPackTo(SliceT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; }; - { auto _e = slicePoints(); if (_e) { _o->slicePoints.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->slicePoints[_i] = _e->Get(_i); } } }; - { auto _e = sourceType(); _o->sourceType = _e; }; -} - -inline flatbuffers::Offset Slice::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSlice(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSlice(flatbuffers::FlatBufferBuilder &_fbb, const SliceT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SliceT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - auto _slicePoints = _o->slicePoints.size() ? _fbb.CreateVector(_o->slicePoints) : 0; - auto _sourceType = _o->sourceType; - return MNN::CreateSlice( - _fbb, - _axis, - _slicePoints, - _sourceType); -} - -inline BatchNormT *BatchNorm::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BatchNormT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BatchNorm::UnPackTo(BatchNormT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = channels(); _o->channels = _e; }; - { auto _e = slopeData(); if (_e) { _o->slopeData.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->slopeData[_i] = _e->Get(_i); } } }; - { auto _e = meanData(); if (_e) { _o->meanData.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->meanData[_i] = _e->Get(_i); } } }; - { auto _e = varData(); if (_e) { _o->varData.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->varData[_i] = _e->Get(_i); } } }; - { auto _e = biasData(); if (_e) { _o->biasData.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->biasData[_i] = _e->Get(_i); } } }; - { auto _e = Adata(); if (_e) { _o->Adata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->Adata[_i] = _e->Get(_i); } } }; - { auto _e = Bdata(); if (_e) { _o->Bdata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->Bdata[_i] = _e->Get(_i); } } }; - { auto _e = epsilon(); _o->epsilon = _e; }; -} - -inline flatbuffers::Offset BatchNorm::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchNormT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBatchNorm(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBatchNorm(flatbuffers::FlatBufferBuilder &_fbb, const BatchNormT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchNormT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _channels = _o->channels; - auto _slopeData = _o->slopeData.size() ? _fbb.CreateVector(_o->slopeData) : 0; - auto _meanData = _o->meanData.size() ? _fbb.CreateVector(_o->meanData) : 0; - auto _varData = _o->varData.size() ? _fbb.CreateVector(_o->varData) : 0; - auto _biasData = _o->biasData.size() ? _fbb.CreateVector(_o->biasData) : 0; - auto _Adata = _o->Adata.size() ? _fbb.CreateVector(_o->Adata) : 0; - auto _Bdata = _o->Bdata.size() ? _fbb.CreateVector(_o->Bdata) : 0; - auto _epsilon = _o->epsilon; - return MNN::CreateBatchNorm( - _fbb, - _channels, - _slopeData, - _meanData, - _varData, - _biasData, - _Adata, - _Bdata, - _epsilon); -} - -inline ScaleT *Scale::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ScaleT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Scale::UnPackTo(ScaleT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = channels(); _o->channels = _e; }; - { auto _e = scaleData(); if (_e) { _o->scaleData.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scaleData[_i] = _e->Get(_i); } } }; - { auto _e = biasData(); if (_e) { _o->biasData.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->biasData[_i] = _e->Get(_i); } } }; -} - -inline flatbuffers::Offset Scale::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScaleT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateScale(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateScale(flatbuffers::FlatBufferBuilder &_fbb, const ScaleT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ScaleT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _channels = _o->channels; - auto _scaleData = _o->scaleData.size() ? _fbb.CreateVector(_o->scaleData) : 0; - auto _biasData = _o->biasData.size() ? _fbb.CreateVector(_o->biasData) : 0; - return MNN::CreateScale( - _fbb, - _channels, - _scaleData, - _biasData); -} - -inline EltwiseT *Eltwise::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new EltwiseT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Eltwise::UnPackTo(EltwiseT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = type(); _o->type = _e; }; - { auto _e = coeff(); if (_e) { _o->coeff.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->coeff[_i] = _e->Get(_i); } } }; -} - -inline flatbuffers::Offset Eltwise::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateEltwise(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateEltwise(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EltwiseT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _type = _o->type; - auto _coeff = _o->coeff.size() ? _fbb.CreateVector(_o->coeff) : 0; - return MNN::CreateEltwise( - _fbb, - _type, - _coeff); -} - -inline FlattenT *Flatten::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FlattenT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Flatten::UnPackTo(FlattenT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; }; - { auto _e = endAxis(); _o->endAxis = _e; }; -} - -inline flatbuffers::Offset Flatten::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FlattenT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFlatten(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFlatten(flatbuffers::FlatBufferBuilder &_fbb, const FlattenT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FlattenT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - auto _endAxis = _o->endAxis; - return MNN::CreateFlatten( - _fbb, - _axis, - _endAxis); -} - -inline PermuteT *Permute::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PermuteT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Permute::UnPackTo(PermuteT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = dims(); if (_e) { _o->dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dims[_i] = _e->Get(_i); } } }; -} - -inline flatbuffers::Offset Permute::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PermuteT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePermute(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePermute(flatbuffers::FlatBufferBuilder &_fbb, const PermuteT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PermuteT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _dims = _o->dims.size() ? _fbb.CreateVector(_o->dims) : 0; - return MNN::CreatePermute( - _fbb, - _dims); -} - -inline ReshapeT *Reshape::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReshapeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Reshape::UnPackTo(ReshapeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = dims(); if (_e) { _o->dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dims[_i] = _e->Get(_i); } } }; - { auto _e = dimType(); _o->dimType = _e; }; -} - -inline flatbuffers::Offset Reshape::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReshape(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReshape(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReshapeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _dims = _o->dims.size() ? _fbb.CreateVector(_o->dims) : 0; - auto _dimType = _o->dimType; - return MNN::CreateReshape( - _fbb, - _dims, - _dimType); -} - -inline DetectionOutputT *DetectionOutput::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DetectionOutputT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DetectionOutput::UnPackTo(DetectionOutputT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = classCount(); _o->classCount = _e; }; - { auto _e = nmsThresholdold(); _o->nmsThresholdold = _e; }; - { auto _e = nmsTopK(); _o->nmsTopK = _e; }; - { auto _e = keepTopK(); _o->keepTopK = _e; }; - { auto _e = confidenceThreshold(); _o->confidenceThreshold = _e; }; - { auto _e = shareLocation(); _o->shareLocation = _e; }; - { auto _e = backgroundLable(); _o->backgroundLable = _e; }; - { auto _e = varianceEncodedTarget(); _o->varianceEncodedTarget = _e; }; - { auto _e = codeType(); _o->codeType = _e; }; - { auto _e = objectnessScore(); _o->objectnessScore = _e; }; -} - -inline flatbuffers::Offset DetectionOutput::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DetectionOutputT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDetectionOutput(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDetectionOutput(flatbuffers::FlatBufferBuilder &_fbb, const DetectionOutputT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DetectionOutputT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _classCount = _o->classCount; - auto _nmsThresholdold = _o->nmsThresholdold; - auto _nmsTopK = _o->nmsTopK; - auto _keepTopK = _o->keepTopK; - auto _confidenceThreshold = _o->confidenceThreshold; - auto _shareLocation = _o->shareLocation; - auto _backgroundLable = _o->backgroundLable; - auto _varianceEncodedTarget = _o->varianceEncodedTarget; - auto _codeType = _o->codeType; - auto _objectnessScore = _o->objectnessScore; - return MNN::CreateDetectionOutput( - _fbb, - _classCount, - _nmsThresholdold, - _nmsTopK, - _keepTopK, - _confidenceThreshold, - _shareLocation, - _backgroundLable, - _varianceEncodedTarget, - _codeType, - _objectnessScore); -} - -inline RoiPoolingT *RoiPooling::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RoiPoolingT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void RoiPooling::UnPackTo(RoiPoolingT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = pooledWidth(); _o->pooledWidth = _e; }; - { auto _e = pooledHeight(); _o->pooledHeight = _e; }; - { auto _e = spatialScale(); _o->spatialScale = _e; }; -} - -inline flatbuffers::Offset RoiPooling::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RoiPoolingT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRoiPooling(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRoiPooling(flatbuffers::FlatBufferBuilder &_fbb, const RoiPoolingT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RoiPoolingT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _pooledWidth = _o->pooledWidth; - auto _pooledHeight = _o->pooledHeight; - auto _spatialScale = _o->spatialScale; - return MNN::CreateRoiPooling( - _fbb, - _pooledWidth, - _pooledHeight, - _spatialScale); -} - -inline ProposalT *Proposal::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ProposalT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Proposal::UnPackTo(ProposalT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = featStride(); _o->featStride = _e; }; - { auto _e = baseSize(); _o->baseSize = _e; }; - { auto _e = preNmsTopN(); _o->preNmsTopN = _e; }; - { auto _e = afterNmsTopN(); _o->afterNmsTopN = _e; }; - { auto _e = nmsThreshold(); _o->nmsThreshold = _e; }; - { auto _e = minSize(); _o->minSize = _e; }; - { auto _e = ratios(); if (_e) _o->ratios = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = scales(); if (_e) _o->scales = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = anchors(); if (_e) _o->anchors = std::unique_ptr(_e->UnPack(_resolver)); }; -} - -inline flatbuffers::Offset Proposal::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ProposalT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateProposal(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateProposal(flatbuffers::FlatBufferBuilder &_fbb, const ProposalT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ProposalT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _featStride = _o->featStride; - auto _baseSize = _o->baseSize; - auto _preNmsTopN = _o->preNmsTopN; - auto _afterNmsTopN = _o->afterNmsTopN; - auto _nmsThreshold = _o->nmsThreshold; - auto _minSize = _o->minSize; - auto _ratios = _o->ratios ? CreateBlob(_fbb, _o->ratios.get(), _rehasher) : 0; - auto _scales = _o->scales ? CreateBlob(_fbb, _o->scales.get(), _rehasher) : 0; - auto _anchors = _o->anchors ? CreateBlob(_fbb, _o->anchors.get(), _rehasher) : 0; - return MNN::CreateProposal( - _fbb, - _featStride, - _baseSize, - _preNmsTopN, - _afterNmsTopN, - _nmsThreshold, - _minSize, - _ratios, - _scales, - _anchors); -} - -inline InterpT *Interp::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new InterpT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Interp::UnPackTo(InterpT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = widthScale(); _o->widthScale = _e; }; - { auto _e = heightScale(); _o->heightScale = _e; }; - { auto _e = outputWidth(); _o->outputWidth = _e; }; - { auto _e = outputHeight(); _o->outputHeight = _e; }; - { auto _e = resizeType(); _o->resizeType = _e; }; - { auto _e = alignCorners(); _o->alignCorners = _e; }; -} - -inline flatbuffers::Offset Interp::Pack(flatbuffers::FlatBufferBuilder &_fbb, const InterpT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateInterp(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateInterp(flatbuffers::FlatBufferBuilder &_fbb, const InterpT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const InterpT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _widthScale = _o->widthScale; - auto _heightScale = _o->heightScale; - auto _outputWidth = _o->outputWidth; - auto _outputHeight = _o->outputHeight; - auto _resizeType = _o->resizeType; - auto _alignCorners = _o->alignCorners; - return MNN::CreateInterp( - _fbb, - _widthScale, - _heightScale, - _outputWidth, - _outputHeight, - _resizeType, - _alignCorners); -} - -inline ResizeT *Resize::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ResizeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Resize::UnPackTo(ResizeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = xScale(); _o->xScale = _e; }; - { auto _e = yScale(); _o->yScale = _e; }; -} - -inline flatbuffers::Offset Resize::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateResize(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateResize(flatbuffers::FlatBufferBuilder &_fbb, const ResizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _xScale = _o->xScale; - auto _yScale = _o->yScale; - return MNN::CreateResize( - _fbb, - _xScale, - _yScale); -} - -inline PriorBoxT *PriorBox::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PriorBoxT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void PriorBox::UnPackTo(PriorBoxT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = minSizes(); if (_e) { _o->minSizes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->minSizes[_i] = _e->Get(_i); } } }; - { auto _e = maxSizes(); if (_e) { _o->maxSizes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->maxSizes[_i] = _e->Get(_i); } } }; - { auto _e = aspectRatios(); if (_e) { _o->aspectRatios.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->aspectRatios[_i] = _e->Get(_i); } } }; - { auto _e = variances(); if (_e) { _o->variances.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->variances[_i] = _e->Get(_i); } } }; - { auto _e = flip(); _o->flip = _e; }; - { auto _e = clip(); _o->clip = _e; }; - { auto _e = imageWidth(); _o->imageWidth = _e; }; - { auto _e = imageHeight(); _o->imageHeight = _e; }; - { auto _e = stepWidth(); _o->stepWidth = _e; }; - { auto _e = stepHeight(); _o->stepHeight = _e; }; - { auto _e = offset(); _o->offset = _e; }; -} - -inline flatbuffers::Offset PriorBox::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PriorBoxT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePriorBox(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePriorBox(flatbuffers::FlatBufferBuilder &_fbb, const PriorBoxT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PriorBoxT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _minSizes = _o->minSizes.size() ? _fbb.CreateVector(_o->minSizes) : 0; - auto _maxSizes = _o->maxSizes.size() ? _fbb.CreateVector(_o->maxSizes) : 0; - auto _aspectRatios = _o->aspectRatios.size() ? _fbb.CreateVector(_o->aspectRatios) : 0; - auto _variances = _o->variances.size() ? _fbb.CreateVector(_o->variances) : 0; - auto _flip = _o->flip; - auto _clip = _o->clip; - auto _imageWidth = _o->imageWidth; - auto _imageHeight = _o->imageHeight; - auto _stepWidth = _o->stepWidth; - auto _stepHeight = _o->stepHeight; - auto _offset = _o->offset; - return MNN::CreatePriorBox( - _fbb, - _minSizes, - _maxSizes, - _aspectRatios, - _variances, - _flip, - _clip, - _imageWidth, - _imageHeight, - _stepWidth, - _stepHeight, - _offset); -} - -inline NormalizeT *Normalize::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NormalizeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Normalize::UnPackTo(NormalizeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = acrossSpatial(); _o->acrossSpatial = _e; }; - { auto _e = channelShared(); _o->channelShared = _e; }; - { auto _e = eps(); _o->eps = _e; }; - { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } }; -} - -inline flatbuffers::Offset Normalize::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NormalizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNormalize(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNormalize(flatbuffers::FlatBufferBuilder &_fbb, const NormalizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NormalizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _acrossSpatial = _o->acrossSpatial; - auto _channelShared = _o->channelShared; - auto _eps = _o->eps; - auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0; - return MNN::CreateNormalize( - _fbb, - _acrossSpatial, - _channelShared, - _eps, - _scale); -} - -inline EltwiseInt8T *EltwiseInt8::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new EltwiseInt8T(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void EltwiseInt8::UnPackTo(EltwiseInt8T *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = type(); _o->type = _e; }; - { auto _e = inputQuan0(); if (_e) _o->inputQuan0 = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = inputQuan1(); if (_e) _o->inputQuan1 = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = outputQuan(); if (_e) _o->outputQuan = std::unique_ptr(_e->UnPack(_resolver)); }; -} - -inline flatbuffers::Offset EltwiseInt8::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseInt8T* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateEltwiseInt8(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateEltwiseInt8(flatbuffers::FlatBufferBuilder &_fbb, const EltwiseInt8T *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EltwiseInt8T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _type = _o->type; - auto _inputQuan0 = _o->inputQuan0 ? CreateQuantizedFloatParam(_fbb, _o->inputQuan0.get(), _rehasher) : 0; - auto _inputQuan1 = _o->inputQuan1 ? CreateQuantizedFloatParam(_fbb, _o->inputQuan1.get(), _rehasher) : 0; - auto _outputQuan = _o->outputQuan ? CreateQuantizedFloatParam(_fbb, _o->outputQuan.get(), _rehasher) : 0; - return MNN::CreateEltwiseInt8( - _fbb, - _type, - _inputQuan0, - _inputQuan1, - _outputQuan); -} - -inline const flatbuffers::TypeTable *PadModeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PadModeTypeTable - }; - static const char * const names[] = { - "CAFFE", - "VALID", - "SAME" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *PoolTypeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PoolTypeTypeTable - }; - static const char * const names[] = { - "MAXPOOL", - "AVEPOOL" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *PoolPadTypeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PoolPadTypeTypeTable - }; - static const char * const names[] = { - "CAFFE", - "VALID", - "SAME" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *EltwiseTypeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - EltwiseTypeTypeTable - }; - static const char * const names[] = { - "PROD", - "SUM", - "MAXIMUM", - "SUB" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 4, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *Convolution2DCommonTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PadModeTypeTable - }; - static const char * const names[] = { - "padX", - "padY", - "kernelX", - "kernelY", - "strideX", - "strideY", - "dilateX", - "dilateY", - "padMode", - "group", - "outputCount", - "inputCount", - "relu", - "relu6" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 14, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *Convolution3DCommonTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PadModeTypeTable - }; - static const char * const names[] = { - "dilates", - "strides", - "kernels", - "pads", - "padMode", - "inputCount", - "outputCount", - "relu", - "relu6" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 9, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *IDSTQuanTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const char * const names[] = { - "buffer", - "alpha", - "type", - "useInt32", - "quantScale", - "scaleIn", - "scaleOut", - "aMax", - "aMin", - "readType", - "has_scaleInt" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 11, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedFloatParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 } - }; - static const char * const names[] = { - "weight", - "bias", - "scale", - "tensorScale" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *Convolution2DTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 1 }, - { flatbuffers::ET_SEQUENCE, 0, 2 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - Convolution2DCommonTypeTable, - IDSTQuanTypeTable, - QuantizedFloatParamTypeTable - }; - static const char * const names[] = { - "common", - "weight", - "bias", - "quanParameter", - "symmetricQuan" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *Convolution3DTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - Convolution3DCommonTypeTable - }; - static const char * const names[] = { - "common", - "weight", - "bias" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *InnerProductTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - IDSTQuanTypeTable - }; - static const char * const names[] = { - "outputCount", - "biasTerm", - "weightSize", - "weight", - "bias", - "axis", - "transpose", - "quanParameter" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 8, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *PoolTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 1 }, - { flatbuffers::ET_INT, 0, 2 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PoolTypeTypeTable, - PoolPadTypeTypeTable, - DataTypeTypeTable - }; - static const char * const names[] = { - "padX", - "padY", - "isGlobal", - "kernelX", - "kernelY", - "strideX", - "strideY", - "type", - "padType", - "dataType", - "ceilModel" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 11, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *Pool3DTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PoolTypeTypeTable, - PoolPadTypeTypeTable - }; - static const char * const names[] = { - "strides", - "kernels", - "pads", - "type", - "padType" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ReluTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "slope" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *Relu6TypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "slope" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *PReluTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 } - }; - static const char * const names[] = { - "slopeCount", - "slope" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ELUTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "alpha" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *LRNTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "regionType", - "localSize", - "alpha", - "beta" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ArgMaxTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const char * const names[] = { - "outMaxVal", - "topK", - "axis", - "softmaxThreshold" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *AxisTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 } - }; - static const char * const names[] = { - "axis" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *InputTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable, - MNN_DATA_FORMATTypeTable - }; - static const char * const names[] = { - "dims", - "dtype", - "dformat" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *LSTMTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - BlobTypeTable - }; - static const char * const names[] = { - "outputCount", - "weightSize", - "clippingThreshold", - "weightI", - "weightH", - "bias", - "weightIQ", - "weightIA", - "quantScale" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 9, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *SliceTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - NetSourceTypeTable - }; - static const char * const names[] = { - "axis", - "slicePoints", - "sourceType" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *BatchNormTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "channels", - "slopeData", - "meanData", - "varData", - "biasData", - "Adata", - "Bdata", - "epsilon" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 8, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ScaleTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 } - }; - static const char * const names[] = { - "channels", - "scaleData", - "biasData" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *EltwiseTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_FLOAT, 1, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - EltwiseTypeTypeTable - }; - static const char * const names[] = { - "type", - "coeff" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *FlattenTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const char * const names[] = { - "axis", - "endAxis" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *PermuteTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 } - }; - static const char * const names[] = { - "dims" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ReshapeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - MNN_DATA_FORMATTypeTable - }; - static const char * const names[] = { - "dims", - "dimType" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *DetectionOutputTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "classCount", - "nmsThresholdold", - "nmsTopK", - "keepTopK", - "confidenceThreshold", - "shareLocation", - "backgroundLable", - "varianceEncodedTarget", - "codeType", - "objectnessScore" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 10, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *RoiPoolingTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "pooledWidth", - "pooledHeight", - "spatialScale" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ProposalTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - BlobTypeTable - }; - static const char * const names[] = { - "featStride", - "baseSize", - "preNmsTopN", - "afterNmsTopN", - "nmsThreshold", - "minSize", - "ratios", - "scales", - "anchors" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 9, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *InterpTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const char * const names[] = { - "widthScale", - "heightScale", - "outputWidth", - "outputHeight", - "resizeType", - "alignCorners" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 6, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ResizeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "xScale", - "yScale" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *PriorBoxTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "minSizes", - "maxSizes", - "aspectRatios", - "variances", - "flip", - "clip", - "imageWidth", - "imageHeight", - "stepWidth", - "stepHeight", - "offset" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 11, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *NormalizeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 } - }; - static const char * const names[] = { - "acrossSpatial", - "channelShared", - "eps", - "scale" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *EltwiseInt8TypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 1 }, - { flatbuffers::ET_SEQUENCE, 0, 1 }, - { flatbuffers::ET_SEQUENCE, 0, 1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - EltwiseTypeTypeTable, - QuantizedFloatParamTypeTable - }; - static const char * const names[] = { - "type", - "inputQuan0", - "inputQuan1", - "outputQuan" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -} // namespace MNN - -#endif // FLATBUFFERS_GENERATED_CAFFEOP_MNN_H_ diff --git a/schema/current/GpuLibrary_generated.h b/schema/current/GpuLibrary_generated.h deleted file mode 100644 index 321f816c..00000000 --- a/schema/current/GpuLibrary_generated.h +++ /dev/null @@ -1,1034 +0,0 @@ -// automatically generated by the FlatBuffers compiler, do not modify - - -#ifndef FLATBUFFERS_GENERATED_GPULIBRARY_MNN_H_ -#define FLATBUFFERS_GENERATED_GPULIBRARY_MNN_H_ - - -#include "Tensor_generated.h" -#include "Type_generated.h" - -namespace MNN { - -struct GpuBuffer; -struct GpuBufferT; - -struct GpuPipeline; -struct GpuPipelineT; - -struct GpuStage; -struct GpuStageT; - -struct GpuFunction; -struct GpuFunctionT; - -struct GpuLibrary; -struct GpuLibraryT; - -inline const flatbuffers::TypeTable *GpuBufferTypeTable(); - -inline const flatbuffers::TypeTable *GpuPipelineTypeTable(); - -inline const flatbuffers::TypeTable *GpuStageTypeTable(); - -inline const flatbuffers::TypeTable *GpuFunctionTypeTable(); - -inline const flatbuffers::TypeTable *GpuLibraryTypeTable(); - -enum STORAGE_TYPE { - STORAGE_TYPE_BUFFER = 0, - STORAGE_TYPE_UNIFORM = 1, - STORAGE_TYPE_IMAGE = 2, - STORAGE_TYPE_MIN = STORAGE_TYPE_BUFFER, - STORAGE_TYPE_MAX = STORAGE_TYPE_IMAGE -}; - -inline const STORAGE_TYPE (&EnumValuesSTORAGE_TYPE())[3] { - static const STORAGE_TYPE values[] = { - STORAGE_TYPE_BUFFER, - STORAGE_TYPE_UNIFORM, - STORAGE_TYPE_IMAGE - }; - return values; -} - -inline const char * const *EnumNamesSTORAGE_TYPE() { - static const char * const names[] = { - "BUFFER", - "UNIFORM", - "IMAGE", - nullptr - }; - return names; -} - -inline const char *EnumNameSTORAGE_TYPE(STORAGE_TYPE e) { - if (e < STORAGE_TYPE_BUFFER || e > STORAGE_TYPE_IMAGE) return ""; - const size_t index = static_cast(e); - return EnumNamesSTORAGE_TYPE()[index]; -} - -enum ACCESS_TYPE { - ACCESS_TYPE_READ_ONLY = 0, - ACCESS_TYPE_WRITE_ONLY = 1, - ACCESS_TYPE_READ_WRITE = 2, - ACCESS_TYPE_MIN = ACCESS_TYPE_READ_ONLY, - ACCESS_TYPE_MAX = ACCESS_TYPE_READ_WRITE -}; - -inline const ACCESS_TYPE (&EnumValuesACCESS_TYPE())[3] { - static const ACCESS_TYPE values[] = { - ACCESS_TYPE_READ_ONLY, - ACCESS_TYPE_WRITE_ONLY, - ACCESS_TYPE_READ_WRITE - }; - return values; -} - -inline const char * const *EnumNamesACCESS_TYPE() { - static const char * const names[] = { - "READ_ONLY", - "WRITE_ONLY", - "READ_WRITE", - nullptr - }; - return names; -} - -inline const char *EnumNameACCESS_TYPE(ACCESS_TYPE e) { - if (e < ACCESS_TYPE_READ_ONLY || e > ACCESS_TYPE_READ_WRITE) return ""; - const size_t index = static_cast(e); - return EnumNamesACCESS_TYPE()[index]; -} - -struct GpuBufferT : public flatbuffers::NativeTable { - typedef GpuBuffer TableType; - ACCESS_TYPE access; - STORAGE_TYPE storage; - std::unique_ptr content; - GpuBufferT() - : access(ACCESS_TYPE_READ_ONLY), - storage(STORAGE_TYPE_BUFFER) { - } -}; - -struct GpuBuffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GpuBufferT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return GpuBufferTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ACCESS = 4, - VT_STORAGE = 6, - VT_CONTENT = 8 - }; - ACCESS_TYPE access() const { - return static_cast(GetField(VT_ACCESS, 0)); - } - STORAGE_TYPE storage() const { - return static_cast(GetField(VT_STORAGE, 0)); - } - const Blob *content() const { - return GetPointer(VT_CONTENT); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ACCESS) && - VerifyField(verifier, VT_STORAGE) && - VerifyOffset(verifier, VT_CONTENT) && - verifier.VerifyTable(content()) && - verifier.EndTable(); - } - GpuBufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GpuBufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuBufferT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GpuBufferBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_access(ACCESS_TYPE access) { - fbb_.AddElement(GpuBuffer::VT_ACCESS, static_cast(access), 0); - } - void add_storage(STORAGE_TYPE storage) { - fbb_.AddElement(GpuBuffer::VT_STORAGE, static_cast(storage), 0); - } - void add_content(flatbuffers::Offset content) { - fbb_.AddOffset(GpuBuffer::VT_CONTENT, content); - } - explicit GpuBufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GpuBufferBuilder &operator=(const GpuBufferBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGpuBuffer( - flatbuffers::FlatBufferBuilder &_fbb, - ACCESS_TYPE access = ACCESS_TYPE_READ_ONLY, - STORAGE_TYPE storage = STORAGE_TYPE_BUFFER, - flatbuffers::Offset content = 0) { - GpuBufferBuilder builder_(_fbb); - builder_.add_content(content); - builder_.add_storage(storage); - builder_.add_access(access); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGpuBuffer(flatbuffers::FlatBufferBuilder &_fbb, const GpuBufferT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GpuPipelineT : public flatbuffers::NativeTable { - typedef GpuPipeline TableType; - std::vector localSize; - std::string key; - std::vector metal; - std::vector vulkan; - std::string openglComputeShader; - std::string openclKernel; - GpuPipelineT() { - } -}; - -struct GpuPipeline FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GpuPipelineT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return GpuPipelineTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_LOCALSIZE = 4, - VT_KEY = 6, - VT_METAL = 8, - VT_VULKAN = 10, - VT_OPENGLCOMPUTESHADER = 12, - VT_OPENCLKERNEL = 14 - }; - const flatbuffers::Vector *localSize() const { - return GetPointer *>(VT_LOCALSIZE); - } - const flatbuffers::String *key() const { - return GetPointer(VT_KEY); - } - const flatbuffers::Vector *metal() const { - return GetPointer *>(VT_METAL); - } - const flatbuffers::Vector *vulkan() const { - return GetPointer *>(VT_VULKAN); - } - const flatbuffers::String *openglComputeShader() const { - return GetPointer(VT_OPENGLCOMPUTESHADER); - } - const flatbuffers::String *openclKernel() const { - return GetPointer(VT_OPENCLKERNEL); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_LOCALSIZE) && - verifier.VerifyVector(localSize()) && - VerifyOffset(verifier, VT_KEY) && - verifier.VerifyString(key()) && - VerifyOffset(verifier, VT_METAL) && - verifier.VerifyVector(metal()) && - VerifyOffset(verifier, VT_VULKAN) && - verifier.VerifyVector(vulkan()) && - VerifyOffset(verifier, VT_OPENGLCOMPUTESHADER) && - verifier.VerifyString(openglComputeShader()) && - VerifyOffset(verifier, VT_OPENCLKERNEL) && - verifier.VerifyString(openclKernel()) && - verifier.EndTable(); - } - GpuPipelineT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GpuPipelineT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuPipelineT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GpuPipelineBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_localSize(flatbuffers::Offset> localSize) { - fbb_.AddOffset(GpuPipeline::VT_LOCALSIZE, localSize); - } - void add_key(flatbuffers::Offset key) { - fbb_.AddOffset(GpuPipeline::VT_KEY, key); - } - void add_metal(flatbuffers::Offset> metal) { - fbb_.AddOffset(GpuPipeline::VT_METAL, metal); - } - void add_vulkan(flatbuffers::Offset> vulkan) { - fbb_.AddOffset(GpuPipeline::VT_VULKAN, vulkan); - } - void add_openglComputeShader(flatbuffers::Offset openglComputeShader) { - fbb_.AddOffset(GpuPipeline::VT_OPENGLCOMPUTESHADER, openglComputeShader); - } - void add_openclKernel(flatbuffers::Offset openclKernel) { - fbb_.AddOffset(GpuPipeline::VT_OPENCLKERNEL, openclKernel); - } - explicit GpuPipelineBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GpuPipelineBuilder &operator=(const GpuPipelineBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGpuPipeline( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> localSize = 0, - flatbuffers::Offset key = 0, - flatbuffers::Offset> metal = 0, - flatbuffers::Offset> vulkan = 0, - flatbuffers::Offset openglComputeShader = 0, - flatbuffers::Offset openclKernel = 0) { - GpuPipelineBuilder builder_(_fbb); - builder_.add_openclKernel(openclKernel); - builder_.add_openglComputeShader(openglComputeShader); - builder_.add_vulkan(vulkan); - builder_.add_metal(metal); - builder_.add_key(key); - builder_.add_localSize(localSize); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateGpuPipelineDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *localSize = nullptr, - const char *key = nullptr, - const std::vector *metal = nullptr, - const std::vector *vulkan = nullptr, - const char *openglComputeShader = nullptr, - const char *openclKernel = nullptr) { - auto localSize__ = localSize ? _fbb.CreateVector(*localSize) : 0; - auto key__ = key ? _fbb.CreateString(key) : 0; - auto metal__ = metal ? _fbb.CreateVector(*metal) : 0; - auto vulkan__ = vulkan ? _fbb.CreateVector(*vulkan) : 0; - auto openglComputeShader__ = openglComputeShader ? _fbb.CreateString(openglComputeShader) : 0; - auto openclKernel__ = openclKernel ? _fbb.CreateString(openclKernel) : 0; - return MNN::CreateGpuPipeline( - _fbb, - localSize__, - key__, - metal__, - vulkan__, - openglComputeShader__, - openclKernel__); -} - -flatbuffers::Offset CreateGpuPipeline(flatbuffers::FlatBufferBuilder &_fbb, const GpuPipelineT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GpuStageT : public flatbuffers::NativeTable { - typedef GpuStage TableType; - std::string pipeline; - std::vector groupSize; - std::vector inputIndexes; - std::vector outputIndexes; - std::vector> middleBuffer; - std::vector> constBuffer; - int32_t globalSizeIndex; - std::vector globalSizeDivide; - bool requireSize; - GpuStageT() - : globalSizeIndex(0), - requireSize(false) { - } -}; - -struct GpuStage FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GpuStageT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return GpuStageTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PIPELINE = 4, - VT_GROUPSIZE = 6, - VT_INPUTINDEXES = 8, - VT_OUTPUTINDEXES = 10, - VT_MIDDLEBUFFER = 12, - VT_CONSTBUFFER = 14, - VT_GLOBALSIZEINDEX = 16, - VT_GLOBALSIZEDIVIDE = 18, - VT_REQUIRESIZE = 20 - }; - const flatbuffers::String *pipeline() const { - return GetPointer(VT_PIPELINE); - } - const flatbuffers::Vector *groupSize() const { - return GetPointer *>(VT_GROUPSIZE); - } - const flatbuffers::Vector *inputIndexes() const { - return GetPointer *>(VT_INPUTINDEXES); - } - const flatbuffers::Vector *outputIndexes() const { - return GetPointer *>(VT_OUTPUTINDEXES); - } - const flatbuffers::Vector> *middleBuffer() const { - return GetPointer> *>(VT_MIDDLEBUFFER); - } - const flatbuffers::Vector> *constBuffer() const { - return GetPointer> *>(VT_CONSTBUFFER); - } - int32_t globalSizeIndex() const { - return GetField(VT_GLOBALSIZEINDEX, 0); - } - const flatbuffers::Vector *globalSizeDivide() const { - return GetPointer *>(VT_GLOBALSIZEDIVIDE); - } - bool requireSize() const { - return GetField(VT_REQUIRESIZE, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_PIPELINE) && - verifier.VerifyString(pipeline()) && - VerifyOffset(verifier, VT_GROUPSIZE) && - verifier.VerifyVector(groupSize()) && - VerifyOffset(verifier, VT_INPUTINDEXES) && - verifier.VerifyVector(inputIndexes()) && - VerifyOffset(verifier, VT_OUTPUTINDEXES) && - verifier.VerifyVector(outputIndexes()) && - VerifyOffset(verifier, VT_MIDDLEBUFFER) && - verifier.VerifyVector(middleBuffer()) && - verifier.VerifyVectorOfTables(middleBuffer()) && - VerifyOffset(verifier, VT_CONSTBUFFER) && - verifier.VerifyVector(constBuffer()) && - verifier.VerifyVectorOfTables(constBuffer()) && - VerifyField(verifier, VT_GLOBALSIZEINDEX) && - VerifyOffset(verifier, VT_GLOBALSIZEDIVIDE) && - verifier.VerifyVector(globalSizeDivide()) && - VerifyField(verifier, VT_REQUIRESIZE) && - verifier.EndTable(); - } - GpuStageT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GpuStageT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuStageT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GpuStageBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_pipeline(flatbuffers::Offset pipeline) { - fbb_.AddOffset(GpuStage::VT_PIPELINE, pipeline); - } - void add_groupSize(flatbuffers::Offset> groupSize) { - fbb_.AddOffset(GpuStage::VT_GROUPSIZE, groupSize); - } - void add_inputIndexes(flatbuffers::Offset> inputIndexes) { - fbb_.AddOffset(GpuStage::VT_INPUTINDEXES, inputIndexes); - } - void add_outputIndexes(flatbuffers::Offset> outputIndexes) { - fbb_.AddOffset(GpuStage::VT_OUTPUTINDEXES, outputIndexes); - } - void add_middleBuffer(flatbuffers::Offset>> middleBuffer) { - fbb_.AddOffset(GpuStage::VT_MIDDLEBUFFER, middleBuffer); - } - void add_constBuffer(flatbuffers::Offset>> constBuffer) { - fbb_.AddOffset(GpuStage::VT_CONSTBUFFER, constBuffer); - } - void add_globalSizeIndex(int32_t globalSizeIndex) { - fbb_.AddElement(GpuStage::VT_GLOBALSIZEINDEX, globalSizeIndex, 0); - } - void add_globalSizeDivide(flatbuffers::Offset> globalSizeDivide) { - fbb_.AddOffset(GpuStage::VT_GLOBALSIZEDIVIDE, globalSizeDivide); - } - void add_requireSize(bool requireSize) { - fbb_.AddElement(GpuStage::VT_REQUIRESIZE, static_cast(requireSize), 0); - } - explicit GpuStageBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GpuStageBuilder &operator=(const GpuStageBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGpuStage( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset pipeline = 0, - flatbuffers::Offset> groupSize = 0, - flatbuffers::Offset> inputIndexes = 0, - flatbuffers::Offset> outputIndexes = 0, - flatbuffers::Offset>> middleBuffer = 0, - flatbuffers::Offset>> constBuffer = 0, - int32_t globalSizeIndex = 0, - flatbuffers::Offset> globalSizeDivide = 0, - bool requireSize = false) { - GpuStageBuilder builder_(_fbb); - builder_.add_globalSizeDivide(globalSizeDivide); - builder_.add_globalSizeIndex(globalSizeIndex); - builder_.add_constBuffer(constBuffer); - builder_.add_middleBuffer(middleBuffer); - builder_.add_outputIndexes(outputIndexes); - builder_.add_inputIndexes(inputIndexes); - builder_.add_groupSize(groupSize); - builder_.add_pipeline(pipeline); - builder_.add_requireSize(requireSize); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateGpuStageDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *pipeline = nullptr, - const std::vector *groupSize = nullptr, - const std::vector *inputIndexes = nullptr, - const std::vector *outputIndexes = nullptr, - const std::vector> *middleBuffer = nullptr, - const std::vector> *constBuffer = nullptr, - int32_t globalSizeIndex = 0, - const std::vector *globalSizeDivide = nullptr, - bool requireSize = false) { - auto pipeline__ = pipeline ? _fbb.CreateString(pipeline) : 0; - auto groupSize__ = groupSize ? _fbb.CreateVector(*groupSize) : 0; - auto inputIndexes__ = inputIndexes ? _fbb.CreateVector(*inputIndexes) : 0; - auto outputIndexes__ = outputIndexes ? _fbb.CreateVector(*outputIndexes) : 0; - auto middleBuffer__ = middleBuffer ? _fbb.CreateVector>(*middleBuffer) : 0; - auto constBuffer__ = constBuffer ? _fbb.CreateVector>(*constBuffer) : 0; - auto globalSizeDivide__ = globalSizeDivide ? _fbb.CreateVector(*globalSizeDivide) : 0; - return MNN::CreateGpuStage( - _fbb, - pipeline__, - groupSize__, - inputIndexes__, - outputIndexes__, - middleBuffer__, - constBuffer__, - globalSizeIndex, - globalSizeDivide__, - requireSize); -} - -flatbuffers::Offset CreateGpuStage(flatbuffers::FlatBufferBuilder &_fbb, const GpuStageT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GpuFunctionT : public flatbuffers::NativeTable { - typedef GpuFunction TableType; - std::vector> stags; - std::string name; - GpuFunctionT() { - } -}; - -struct GpuFunction FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GpuFunctionT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return GpuFunctionTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_STAGS = 4, - VT_NAME = 6 - }; - const flatbuffers::Vector> *stags() const { - return GetPointer> *>(VT_STAGS); - } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_STAGS) && - verifier.VerifyVector(stags()) && - verifier.VerifyVectorOfTables(stags()) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - verifier.EndTable(); - } - GpuFunctionT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GpuFunctionT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuFunctionT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GpuFunctionBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_stags(flatbuffers::Offset>> stags) { - fbb_.AddOffset(GpuFunction::VT_STAGS, stags); - } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(GpuFunction::VT_NAME, name); - } - explicit GpuFunctionBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GpuFunctionBuilder &operator=(const GpuFunctionBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGpuFunction( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset>> stags = 0, - flatbuffers::Offset name = 0) { - GpuFunctionBuilder builder_(_fbb); - builder_.add_name(name); - builder_.add_stags(stags); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateGpuFunctionDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector> *stags = nullptr, - const char *name = nullptr) { - auto stags__ = stags ? _fbb.CreateVector>(*stags) : 0; - auto name__ = name ? _fbb.CreateString(name) : 0; - return MNN::CreateGpuFunction( - _fbb, - stags__, - name__); -} - -flatbuffers::Offset CreateGpuFunction(flatbuffers::FlatBufferBuilder &_fbb, const GpuFunctionT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GpuLibraryT : public flatbuffers::NativeTable { - typedef GpuLibrary TableType; - std::vector> functions; - std::vector> pipeline; - std::string name; - GpuLibraryT() { - } -}; - -struct GpuLibrary FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GpuLibraryT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return GpuLibraryTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUNCTIONS = 4, - VT_PIPELINE = 6, - VT_NAME = 8 - }; - const flatbuffers::Vector> *functions() const { - return GetPointer> *>(VT_FUNCTIONS); - } - const flatbuffers::Vector> *pipeline() const { - return GetPointer> *>(VT_PIPELINE); - } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_FUNCTIONS) && - verifier.VerifyVector(functions()) && - verifier.VerifyVectorOfTables(functions()) && - VerifyOffset(verifier, VT_PIPELINE) && - verifier.VerifyVector(pipeline()) && - verifier.VerifyVectorOfTables(pipeline()) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - verifier.EndTable(); - } - GpuLibraryT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GpuLibraryT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuLibraryT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GpuLibraryBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_functions(flatbuffers::Offset>> functions) { - fbb_.AddOffset(GpuLibrary::VT_FUNCTIONS, functions); - } - void add_pipeline(flatbuffers::Offset>> pipeline) { - fbb_.AddOffset(GpuLibrary::VT_PIPELINE, pipeline); - } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(GpuLibrary::VT_NAME, name); - } - explicit GpuLibraryBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GpuLibraryBuilder &operator=(const GpuLibraryBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGpuLibrary( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset>> functions = 0, - flatbuffers::Offset>> pipeline = 0, - flatbuffers::Offset name = 0) { - GpuLibraryBuilder builder_(_fbb); - builder_.add_name(name); - builder_.add_pipeline(pipeline); - builder_.add_functions(functions); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateGpuLibraryDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector> *functions = nullptr, - const std::vector> *pipeline = nullptr, - const char *name = nullptr) { - auto functions__ = functions ? _fbb.CreateVector>(*functions) : 0; - auto pipeline__ = pipeline ? _fbb.CreateVector>(*pipeline) : 0; - auto name__ = name ? _fbb.CreateString(name) : 0; - return MNN::CreateGpuLibrary( - _fbb, - functions__, - pipeline__, - name__); -} - -flatbuffers::Offset CreateGpuLibrary(flatbuffers::FlatBufferBuilder &_fbb, const GpuLibraryT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -inline GpuBufferT *GpuBuffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GpuBufferT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GpuBuffer::UnPackTo(GpuBufferT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = access(); _o->access = _e; }; - { auto _e = storage(); _o->storage = _e; }; - { auto _e = content(); if (_e) _o->content = std::unique_ptr(_e->UnPack(_resolver)); }; -} - -inline flatbuffers::Offset GpuBuffer::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuBufferT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGpuBuffer(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGpuBuffer(flatbuffers::FlatBufferBuilder &_fbb, const GpuBufferT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuBufferT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _access = _o->access; - auto _storage = _o->storage; - auto _content = _o->content ? CreateBlob(_fbb, _o->content.get(), _rehasher) : 0; - return MNN::CreateGpuBuffer( - _fbb, - _access, - _storage, - _content); -} - -inline GpuPipelineT *GpuPipeline::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GpuPipelineT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GpuPipeline::UnPackTo(GpuPipelineT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = localSize(); if (_e) { _o->localSize.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->localSize[_i] = _e->Get(_i); } } }; - { auto _e = key(); if (_e) _o->key = _e->str(); }; - { auto _e = metal(); if (_e) { _o->metal.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metal[_i] = _e->Get(_i); } } }; - { auto _e = vulkan(); if (_e) { _o->vulkan.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->vulkan[_i] = _e->Get(_i); } } }; - { auto _e = openglComputeShader(); if (_e) _o->openglComputeShader = _e->str(); }; - { auto _e = openclKernel(); if (_e) _o->openclKernel = _e->str(); }; -} - -inline flatbuffers::Offset GpuPipeline::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuPipelineT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGpuPipeline(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGpuPipeline(flatbuffers::FlatBufferBuilder &_fbb, const GpuPipelineT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuPipelineT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _localSize = _o->localSize.size() ? _fbb.CreateVector(_o->localSize) : 0; - auto _key = _o->key.empty() ? 0 : _fbb.CreateString(_o->key); - auto _metal = _o->metal.size() ? _fbb.CreateVector(_o->metal) : 0; - auto _vulkan = _o->vulkan.size() ? _fbb.CreateVector(_o->vulkan) : 0; - auto _openglComputeShader = _o->openglComputeShader.empty() ? 0 : _fbb.CreateString(_o->openglComputeShader); - auto _openclKernel = _o->openclKernel.empty() ? 0 : _fbb.CreateString(_o->openclKernel); - return MNN::CreateGpuPipeline( - _fbb, - _localSize, - _key, - _metal, - _vulkan, - _openglComputeShader, - _openclKernel); -} - -inline GpuStageT *GpuStage::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GpuStageT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GpuStage::UnPackTo(GpuStageT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = pipeline(); if (_e) _o->pipeline = _e->str(); }; - { auto _e = groupSize(); if (_e) { _o->groupSize.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->groupSize[_i] = _e->Get(_i); } } }; - { auto _e = inputIndexes(); if (_e) { _o->inputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputIndexes[_i] = _e->Get(_i); } } }; - { auto _e = outputIndexes(); if (_e) { _o->outputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputIndexes[_i] = _e->Get(_i); } } }; - { auto _e = middleBuffer(); if (_e) { _o->middleBuffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->middleBuffer[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; - { auto _e = constBuffer(); if (_e) { _o->constBuffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->constBuffer[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; - { auto _e = globalSizeIndex(); _o->globalSizeIndex = _e; }; - { auto _e = globalSizeDivide(); if (_e) { _o->globalSizeDivide.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->globalSizeDivide[_i] = _e->Get(_i); } } }; - { auto _e = requireSize(); _o->requireSize = _e; }; -} - -inline flatbuffers::Offset GpuStage::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuStageT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGpuStage(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGpuStage(flatbuffers::FlatBufferBuilder &_fbb, const GpuStageT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuStageT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _pipeline = _o->pipeline.empty() ? 0 : _fbb.CreateString(_o->pipeline); - auto _groupSize = _o->groupSize.size() ? _fbb.CreateVector(_o->groupSize) : 0; - auto _inputIndexes = _o->inputIndexes.size() ? _fbb.CreateVector(_o->inputIndexes) : 0; - auto _outputIndexes = _o->outputIndexes.size() ? _fbb.CreateVector(_o->outputIndexes) : 0; - auto _middleBuffer = _o->middleBuffer.size() ? _fbb.CreateVector> (_o->middleBuffer.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuBuffer(*__va->__fbb, __va->__o->middleBuffer[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _constBuffer = _o->constBuffer.size() ? _fbb.CreateVector> (_o->constBuffer.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuBuffer(*__va->__fbb, __va->__o->constBuffer[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _globalSizeIndex = _o->globalSizeIndex; - auto _globalSizeDivide = _o->globalSizeDivide.size() ? _fbb.CreateVector(_o->globalSizeDivide) : 0; - auto _requireSize = _o->requireSize; - return MNN::CreateGpuStage( - _fbb, - _pipeline, - _groupSize, - _inputIndexes, - _outputIndexes, - _middleBuffer, - _constBuffer, - _globalSizeIndex, - _globalSizeDivide, - _requireSize); -} - -inline GpuFunctionT *GpuFunction::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GpuFunctionT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GpuFunction::UnPackTo(GpuFunctionT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = stags(); if (_e) { _o->stags.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->stags[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; - { auto _e = name(); if (_e) _o->name = _e->str(); }; -} - -inline flatbuffers::Offset GpuFunction::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuFunctionT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGpuFunction(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGpuFunction(flatbuffers::FlatBufferBuilder &_fbb, const GpuFunctionT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuFunctionT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _stags = _o->stags.size() ? _fbb.CreateVector> (_o->stags.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuStage(*__va->__fbb, __va->__o->stags[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - return MNN::CreateGpuFunction( - _fbb, - _stags, - _name); -} - -inline GpuLibraryT *GpuLibrary::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GpuLibraryT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GpuLibrary::UnPackTo(GpuLibraryT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = functions(); if (_e) { _o->functions.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->functions[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; - { auto _e = pipeline(); if (_e) { _o->pipeline.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->pipeline[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; - { auto _e = name(); if (_e) _o->name = _e->str(); }; -} - -inline flatbuffers::Offset GpuLibrary::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GpuLibraryT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGpuLibrary(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGpuLibrary(flatbuffers::FlatBufferBuilder &_fbb, const GpuLibraryT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GpuLibraryT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _functions = _o->functions.size() ? _fbb.CreateVector> (_o->functions.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuFunction(*__va->__fbb, __va->__o->functions[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _pipeline = _o->pipeline.size() ? _fbb.CreateVector> (_o->pipeline.size(), [](size_t i, _VectorArgs *__va) { return CreateGpuPipeline(*__va->__fbb, __va->__o->pipeline[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - return MNN::CreateGpuLibrary( - _fbb, - _functions, - _pipeline, - _name); -} - -inline const flatbuffers::TypeTable *STORAGE_TYPETypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - STORAGE_TYPETypeTable - }; - static const char * const names[] = { - "BUFFER", - "UNIFORM", - "IMAGE" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ACCESS_TYPETypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ACCESS_TYPETypeTable - }; - static const char * const names[] = { - "READ_ONLY", - "WRITE_ONLY", - "READ_WRITE" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *GpuBufferTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 1 }, - { flatbuffers::ET_SEQUENCE, 0, 2 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ACCESS_TYPETypeTable, - STORAGE_TYPETypeTable, - BlobTypeTable - }; - static const char * const names[] = { - "access", - "storage", - "content" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *GpuPipelineTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_CHAR, 1, -1 }, - { flatbuffers::ET_CHAR, 1, -1 }, - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_STRING, 0, -1 } - }; - static const char * const names[] = { - "localSize", - "key", - "metal", - "vulkan", - "openglComputeShader", - "openclKernel" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 6, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *GpuStageTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_SEQUENCE, 1, 0 }, - { flatbuffers::ET_SEQUENCE, 1, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - GpuBufferTypeTable - }; - static const char * const names[] = { - "pipeline", - "groupSize", - "inputIndexes", - "outputIndexes", - "middleBuffer", - "constBuffer", - "globalSizeIndex", - "globalSizeDivide", - "requireSize" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 9, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *GpuFunctionTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_SEQUENCE, 1, 0 }, - { flatbuffers::ET_STRING, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - GpuStageTypeTable - }; - static const char * const names[] = { - "stags", - "name" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *GpuLibraryTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_SEQUENCE, 1, 0 }, - { flatbuffers::ET_SEQUENCE, 1, 1 }, - { flatbuffers::ET_STRING, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - GpuFunctionTypeTable, - GpuPipelineTypeTable - }; - static const char * const names[] = { - "functions", - "pipeline", - "name" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -} // namespace MNN - -#endif // FLATBUFFERS_GENERATED_GPULIBRARY_MNN_H_ diff --git a/schema/current/MNN_generated.h b/schema/current/MNN_generated.h deleted file mode 100644 index 96cbf638..00000000 --- a/schema/current/MNN_generated.h +++ /dev/null @@ -1,6285 +0,0 @@ -// automatically generated by the FlatBuffers compiler, do not modify - - -#ifndef FLATBUFFERS_GENERATED_MNN_MNN_H_ -#define FLATBUFFERS_GENERATED_MNN_MNN_H_ - - -#include "CaffeOp_generated.h" -#include "GpuLibrary_generated.h" -#include "TFQuantizeOp_generated.h" -#include "Tensor_generated.h" -#include "TensorflowOp_generated.h" -#include "Type_generated.h" -#include "UserDefine_generated.h" - -namespace MNN { - -struct Plugin; -struct PluginT; - -struct Extra; -struct ExtraT; - -struct Op; -struct OpT; - -struct TensorDescribe; -struct TensorDescribeT; - -struct Net; -struct NetT; - -inline const flatbuffers::TypeTable *PluginTypeTable(); - -inline const flatbuffers::TypeTable *ExtraTypeTable(); - -inline const flatbuffers::TypeTable *OpTypeTable(); - -inline const flatbuffers::TypeTable *TensorDescribeTypeTable(); - -inline const flatbuffers::TypeTable *NetTypeTable(); - -enum OpType { - OpType_AbsVal = 0, - OpType_QuantizedAdd = 1, - OpType_ArgMax = 2, - OpType_AsString = 3, - OpType_InstanceNorm = 4, - OpType_BatchToSpaceND = 5, - OpType_Bias = 6, - OpType_BinaryOp = 7, - OpType_Bnll = 8, - OpType_Cast = 9, - OpType_Concat = 10, - OpType_Const = 11, - OpType_Convolution = 12, - OpType_ConvolutionDepthwise = 13, - OpType_Crop = 14, - OpType_CropAndResize = 15, - OpType_Cubic = 16, - OpType_Deconvolution = 17, - OpType_DeconvolutionDepthwise = 18, - OpType_Dequantize = 19, - OpType_DetectionOutput = 20, - OpType_Dropout = 21, - OpType_Eltwise = 22, - OpType_ELU = 23, - OpType_Embed = 24, - OpType_Exp = 25, - OpType_ExpandDims = 26, - OpType_Fill = 27, - OpType_Flatten = 28, - OpType_FloorMod = 29, - OpType_Gather = 30, - OpType_GatherV2 = 31, - OpType_Im2Seq = 32, - OpType_InnerProduct = 33, - OpType_Input = 34, - OpType_Interp = 35, - OpType_Log = 36, - OpType_LRN = 37, - OpType_LSTM = 38, - OpType_MatMul = 39, - OpType_MVN = 40, - OpType_NonMaxSuppression = 41, - OpType_NonMaxSuppressionV2 = 42, - OpType_Normalize = 43, - OpType_Pack = 44, - OpType_Padding = 45, - OpType_Permute = 46, - OpType_Pooling = 47, - OpType_Power = 48, - OpType_PReLU = 49, - OpType_PriorBox = 50, - OpType_Proposal = 51, - OpType_QuantizedAvgPool = 52, - OpType_QuantizedBiasAdd = 53, - OpType_QuantizedConcat = 54, - OpType_QuantizedDepthwiseConv2D = 55, - OpType_QuantizedLogistic = 56, - OpType_QuantizedMatMul = 57, - OpType_QuantizedMaxPool = 58, - OpType_QuantizedRelu = 59, - OpType_QuantizedRelu6 = 60, - OpType_QuantizedReshape = 61, - OpType_QuantizedSoftmax = 62, - OpType_QuantizeMaxMin = 63, - OpType_QuantizeV2 = 64, - OpType_Range = 65, - OpType_Rank = 66, - OpType_ReduceJoin = 67, - OpType_Reduction = 68, - OpType_ReLU = 69, - OpType_ReLU6 = 70, - OpType_RequantizationRange = 71, - OpType_Requantize = 72, - OpType_Reshape = 73, - OpType_Resize = 74, - OpType_RNN = 75, - OpType_ROIPooling = 76, - OpType_Scale = 77, - OpType_Selu = 78, - OpType_Seq2Out = 79, - OpType_Shape = 80, - OpType_Sigmoid = 81, - OpType_Size = 82, - OpType_Slice = 83, - OpType_SliceTf = 84, - OpType_Softmax = 85, - OpType_SpaceToBatchND = 86, - OpType_SpatialProduct = 87, - OpType_Split = 88, - OpType_SPP = 89, - OpType_Squeeze = 90, - OpType_StridedSlice = 91, - OpType_StringJoin = 92, - OpType_StringSplit = 93, - OpType_StringToNumber = 94, - OpType_TanH = 95, - OpType_TfQuantizedConv2D = 96, - OpType_Threshold = 97, - OpType_Tile = 98, - OpType_TopKV2 = 99, - OpType_Transpose = 100, - OpType_UnaryOp = 101, - OpType_Unpack = 102, - OpType_Where = 103, - OpType_Moments = 104, - OpType_RNNSequenceGRU = 105, - OpType_BatchMatMul = 106, - OpType_Unsqueeze = 107, - OpType_CosineSimilarity = 108, - OpType_DepthToSpace = 109, - OpType_SpaceToDepth = 110, - OpType_ReverseSequence = 111, - OpType_Pooling3D = 112, - OpType_Convolution3D = 113, - OpType_MatrixBandPart = 114, - OpType_GatherND = 115, - OpType_DetectionPostProcess = 116, - OpType_UnravelIndex = 117, - OpType_ScatterNd = 118, - OpType_OneHot = 119, - OpType_BroadcastTo = 120, - OpType_Dilation2D = 121, - OpType_MaxLayerCount = 128, - OpType_ConvertTensor = 129, - OpType_ArgMin = 130, - OpType_LinSpace = 131, - OpType_PLUGIN = 256, - OpType_Select = 257, - OpType_ZerosLike = 258, - OpType_Broastcast = 259, - OpType_SetDiff1D = 260, - OpType_ReluGrad = 261, - OpType_Relu6Grad = 262, - OpType_PoolGrad = 263, - OpType_SoftmaxGrad = 264, - OpType_Conv2DBackPropFilter = 265, - OpType_TrainableParam = 266, - OpType_BatchNorm = 267, - OpType_Extra = 512, - OpType_ConvInt8 = 513, - OpType_Int8ToFloat = 514, - OpType_DepthwiseConvInt8 = 515, - OpType_PoolInt8 = 516, - OpType_FloatToInt8 = 517, - OpType_EltwiseInt8 = 518, - OpType_MIN = OpType_AbsVal, - OpType_MAX = OpType_EltwiseInt8 -}; - -inline const OpType (&EnumValuesOpType())[145] { - static const OpType values[] = { - OpType_AbsVal, - OpType_QuantizedAdd, - OpType_ArgMax, - OpType_AsString, - OpType_InstanceNorm, - OpType_BatchToSpaceND, - OpType_Bias, - OpType_BinaryOp, - OpType_Bnll, - OpType_Cast, - OpType_Concat, - OpType_Const, - OpType_Convolution, - OpType_ConvolutionDepthwise, - OpType_Crop, - OpType_CropAndResize, - OpType_Cubic, - OpType_Deconvolution, - OpType_DeconvolutionDepthwise, - OpType_Dequantize, - OpType_DetectionOutput, - OpType_Dropout, - OpType_Eltwise, - OpType_ELU, - OpType_Embed, - OpType_Exp, - OpType_ExpandDims, - OpType_Fill, - OpType_Flatten, - OpType_FloorMod, - OpType_Gather, - OpType_GatherV2, - OpType_Im2Seq, - OpType_InnerProduct, - OpType_Input, - OpType_Interp, - OpType_Log, - OpType_LRN, - OpType_LSTM, - OpType_MatMul, - OpType_MVN, - OpType_NonMaxSuppression, - OpType_NonMaxSuppressionV2, - OpType_Normalize, - OpType_Pack, - OpType_Padding, - OpType_Permute, - OpType_Pooling, - OpType_Power, - OpType_PReLU, - OpType_PriorBox, - OpType_Proposal, - OpType_QuantizedAvgPool, - OpType_QuantizedBiasAdd, - OpType_QuantizedConcat, - OpType_QuantizedDepthwiseConv2D, - OpType_QuantizedLogistic, - OpType_QuantizedMatMul, - OpType_QuantizedMaxPool, - OpType_QuantizedRelu, - OpType_QuantizedRelu6, - OpType_QuantizedReshape, - OpType_QuantizedSoftmax, - OpType_QuantizeMaxMin, - OpType_QuantizeV2, - OpType_Range, - OpType_Rank, - OpType_ReduceJoin, - OpType_Reduction, - OpType_ReLU, - OpType_ReLU6, - OpType_RequantizationRange, - OpType_Requantize, - OpType_Reshape, - OpType_Resize, - OpType_RNN, - OpType_ROIPooling, - OpType_Scale, - OpType_Selu, - OpType_Seq2Out, - OpType_Shape, - OpType_Sigmoid, - OpType_Size, - OpType_Slice, - OpType_SliceTf, - OpType_Softmax, - OpType_SpaceToBatchND, - OpType_SpatialProduct, - OpType_Split, - OpType_SPP, - OpType_Squeeze, - OpType_StridedSlice, - OpType_StringJoin, - OpType_StringSplit, - OpType_StringToNumber, - OpType_TanH, - OpType_TfQuantizedConv2D, - OpType_Threshold, - OpType_Tile, - OpType_TopKV2, - OpType_Transpose, - OpType_UnaryOp, - OpType_Unpack, - OpType_Where, - OpType_Moments, - OpType_RNNSequenceGRU, - OpType_BatchMatMul, - OpType_Unsqueeze, - OpType_CosineSimilarity, - OpType_DepthToSpace, - OpType_SpaceToDepth, - OpType_ReverseSequence, - OpType_Pooling3D, - OpType_Convolution3D, - OpType_MatrixBandPart, - OpType_GatherND, - OpType_DetectionPostProcess, - OpType_UnravelIndex, - OpType_ScatterNd, - OpType_OneHot, - OpType_BroadcastTo, - OpType_Dilation2D, - OpType_MaxLayerCount, - OpType_ConvertTensor, - OpType_ArgMin, - OpType_LinSpace, - OpType_PLUGIN, - OpType_Select, - OpType_ZerosLike, - OpType_Broastcast, - OpType_SetDiff1D, - OpType_ReluGrad, - OpType_Relu6Grad, - OpType_PoolGrad, - OpType_SoftmaxGrad, - OpType_Conv2DBackPropFilter, - OpType_TrainableParam, - OpType_BatchNorm, - OpType_Extra, - OpType_ConvInt8, - OpType_Int8ToFloat, - OpType_DepthwiseConvInt8, - OpType_PoolInt8, - OpType_FloatToInt8, - OpType_EltwiseInt8 - }; - return values; -} - -inline const char * const *EnumNamesOpType() { - static const char * const names[] = { - "AbsVal", - "QuantizedAdd", - "ArgMax", - "AsString", - "InstanceNorm", - "BatchToSpaceND", - "Bias", - "BinaryOp", - "Bnll", - "Cast", - "Concat", - "Const", - "Convolution", - "ConvolutionDepthwise", - "Crop", - "CropAndResize", - "Cubic", - "Deconvolution", - "DeconvolutionDepthwise", - "Dequantize", - "DetectionOutput", - "Dropout", - "Eltwise", - "ELU", - "Embed", - "Exp", - "ExpandDims", - "Fill", - "Flatten", - "FloorMod", - "Gather", - "GatherV2", - "Im2Seq", - "InnerProduct", - "Input", - "Interp", - "Log", - "LRN", - "LSTM", - "MatMul", - "MVN", - "NonMaxSuppression", - "NonMaxSuppressionV2", - "Normalize", - "Pack", - "Padding", - "Permute", - "Pooling", - "Power", - "PReLU", - "PriorBox", - "Proposal", - "QuantizedAvgPool", - "QuantizedBiasAdd", - "QuantizedConcat", - "QuantizedDepthwiseConv2D", - "QuantizedLogistic", - "QuantizedMatMul", - "QuantizedMaxPool", - "QuantizedRelu", - "QuantizedRelu6", - "QuantizedReshape", - "QuantizedSoftmax", - "QuantizeMaxMin", - "QuantizeV2", - "Range", - "Rank", - "ReduceJoin", - "Reduction", - "ReLU", - "ReLU6", - "RequantizationRange", - "Requantize", - "Reshape", - "Resize", - "RNN", - "ROIPooling", - "Scale", - "Selu", - "Seq2Out", - "Shape", - "Sigmoid", - "Size", - "Slice", - "SliceTf", - "Softmax", - "SpaceToBatchND", - "SpatialProduct", - "Split", - "SPP", - "Squeeze", - "StridedSlice", - "StringJoin", - "StringSplit", - "StringToNumber", - "TanH", - "TfQuantizedConv2D", - "Threshold", - "Tile", - "TopKV2", - "Transpose", - "UnaryOp", - "Unpack", - "Where", - "Moments", - "RNNSequenceGRU", - "BatchMatMul", - "Unsqueeze", - "CosineSimilarity", - "DepthToSpace", - "SpaceToDepth", - "ReverseSequence", - "Pooling3D", - "Convolution3D", - "MatrixBandPart", - "GatherND", - "DetectionPostProcess", - "UnravelIndex", - "ScatterNd", - "OneHot", - "BroadcastTo", - "Dilation2D", - "", - "", - "", - "", - "", - "", - "MaxLayerCount", - "ConvertTensor", - "ArgMin", - "LinSpace", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "PLUGIN", - "Select", - "ZerosLike", - "Broastcast", - "SetDiff1D", - "ReluGrad", - "Relu6Grad", - "PoolGrad", - "SoftmaxGrad", - "Conv2DBackPropFilter", - "TrainableParam", - "BatchNorm", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "", - "Extra", - "ConvInt8", - "Int8ToFloat", - "DepthwiseConvInt8", - "PoolInt8", - "FloatToInt8", - "EltwiseInt8", - nullptr - }; - return names; -} - -inline const char *EnumNameOpType(OpType e) { - if (e < OpType_AbsVal || e > OpType_EltwiseInt8) return ""; - const size_t index = static_cast(e); - return EnumNamesOpType()[index]; -} - -enum OpParameter { - OpParameter_NONE = 0, - OpParameter_QuantizedAdd = 1, - OpParameter_ArgMax = 2, - OpParameter_AsString = 3, - OpParameter_Axis = 4, - OpParameter_BatchNorm = 5, - OpParameter_BinaryOp = 6, - OpParameter_Blob = 7, - OpParameter_CastParam = 8, - OpParameter_Convolution2D = 9, - OpParameter_Crop = 10, - OpParameter_CropAndResize = 11, - OpParameter_Dequantize = 12, - OpParameter_DetectionOutput = 13, - OpParameter_Eltwise = 14, - OpParameter_ExpandDims = 15, - OpParameter_Fill = 16, - OpParameter_Flatten = 17, - OpParameter_Gather = 18, - OpParameter_GatherV2 = 19, - OpParameter_InnerProduct = 20, - OpParameter_Input = 21, - OpParameter_Interp = 22, - OpParameter_LRN = 23, - OpParameter_LSTM = 24, - OpParameter_MatMul = 25, - OpParameter_NonMaxSuppressionV2 = 26, - OpParameter_Normalize = 27, - OpParameter_PackParam = 28, - OpParameter_Permute = 29, - OpParameter_Plugin = 30, - OpParameter_Pool = 31, - OpParameter_PRelu = 32, - OpParameter_PriorBox = 33, - OpParameter_Proposal = 34, - OpParameter_QuantizedAvgPool = 35, - OpParameter_QuantizedBiasAdd = 36, - OpParameter_QuantizedConcat = 37, - OpParameter_QuantizedLogistic = 38, - OpParameter_QuantizedMatMul = 39, - OpParameter_QuantizedMaxPool = 40, - OpParameter_QuantizedRelu = 41, - OpParameter_QuantizedRelu6 = 42, - OpParameter_QuantizedReshape = 43, - OpParameter_QuantizedSoftmax = 44, - OpParameter_QuantizeMaxMin = 45, - OpParameter_QuantizeV2 = 46, - OpParameter_Range = 47, - OpParameter_Rank = 48, - OpParameter_ReduceJoin = 49, - OpParameter_ReductionParam = 50, - OpParameter_Relu = 51, - OpParameter_Relu6 = 52, - OpParameter_RequantizationRange = 53, - OpParameter_Requantize = 54, - OpParameter_Reshape = 55, - OpParameter_Resize = 56, - OpParameter_RoiPooling = 57, - OpParameter_Scale = 58, - OpParameter_Selu = 59, - OpParameter_Size = 60, - OpParameter_Slice = 61, - OpParameter_SliceTf = 62, - OpParameter_SpaceBatch = 63, - OpParameter_SqueezeParam = 64, - OpParameter_StridedSliceParam = 65, - OpParameter_TensorConvertInfo = 66, - OpParameter_TfQuantizedConv2D = 67, - OpParameter_TopKV2 = 68, - OpParameter_Transpose = 69, - OpParameter_UnaryOp = 70, - OpParameter_MomentsParam = 71, - OpParameter_RNNParam = 72, - OpParameter_BatchMatMulParam = 73, - OpParameter_QuantizedFloatParam = 74, - OpParameter_DepthSpaceParam = 75, - OpParameter_EltwiseInt8 = 76, - OpParameter_ReverseSequenceParam = 77, - OpParameter_Extra = 78, - OpParameter_Pool3D = 79, - OpParameter_Convolution3D = 80, - OpParameter_ELU = 81, - OpParameter_DetectionPostProcessParam = 82, - OpParameter_OneHotParam = 83, - OpParameter_PadParam = 84, - OpParameter_MIN = OpParameter_NONE, - OpParameter_MAX = OpParameter_PadParam -}; - -inline const OpParameter (&EnumValuesOpParameter())[85] { - static const OpParameter values[] = { - OpParameter_NONE, - OpParameter_QuantizedAdd, - OpParameter_ArgMax, - OpParameter_AsString, - OpParameter_Axis, - OpParameter_BatchNorm, - OpParameter_BinaryOp, - OpParameter_Blob, - OpParameter_CastParam, - OpParameter_Convolution2D, - OpParameter_Crop, - OpParameter_CropAndResize, - OpParameter_Dequantize, - OpParameter_DetectionOutput, - OpParameter_Eltwise, - OpParameter_ExpandDims, - OpParameter_Fill, - OpParameter_Flatten, - OpParameter_Gather, - OpParameter_GatherV2, - OpParameter_InnerProduct, - OpParameter_Input, - OpParameter_Interp, - OpParameter_LRN, - OpParameter_LSTM, - OpParameter_MatMul, - OpParameter_NonMaxSuppressionV2, - OpParameter_Normalize, - OpParameter_PackParam, - OpParameter_Permute, - OpParameter_Plugin, - OpParameter_Pool, - OpParameter_PRelu, - OpParameter_PriorBox, - OpParameter_Proposal, - OpParameter_QuantizedAvgPool, - OpParameter_QuantizedBiasAdd, - OpParameter_QuantizedConcat, - OpParameter_QuantizedLogistic, - OpParameter_QuantizedMatMul, - OpParameter_QuantizedMaxPool, - OpParameter_QuantizedRelu, - OpParameter_QuantizedRelu6, - OpParameter_QuantizedReshape, - OpParameter_QuantizedSoftmax, - OpParameter_QuantizeMaxMin, - OpParameter_QuantizeV2, - OpParameter_Range, - OpParameter_Rank, - OpParameter_ReduceJoin, - OpParameter_ReductionParam, - OpParameter_Relu, - OpParameter_Relu6, - OpParameter_RequantizationRange, - OpParameter_Requantize, - OpParameter_Reshape, - OpParameter_Resize, - OpParameter_RoiPooling, - OpParameter_Scale, - OpParameter_Selu, - OpParameter_Size, - OpParameter_Slice, - OpParameter_SliceTf, - OpParameter_SpaceBatch, - OpParameter_SqueezeParam, - OpParameter_StridedSliceParam, - OpParameter_TensorConvertInfo, - OpParameter_TfQuantizedConv2D, - OpParameter_TopKV2, - OpParameter_Transpose, - OpParameter_UnaryOp, - OpParameter_MomentsParam, - OpParameter_RNNParam, - OpParameter_BatchMatMulParam, - OpParameter_QuantizedFloatParam, - OpParameter_DepthSpaceParam, - OpParameter_EltwiseInt8, - OpParameter_ReverseSequenceParam, - OpParameter_Extra, - OpParameter_Pool3D, - OpParameter_Convolution3D, - OpParameter_ELU, - OpParameter_DetectionPostProcessParam, - OpParameter_OneHotParam, - OpParameter_PadParam - }; - return values; -} - -inline const char * const *EnumNamesOpParameter() { - static const char * const names[] = { - "NONE", - "QuantizedAdd", - "ArgMax", - "AsString", - "Axis", - "BatchNorm", - "BinaryOp", - "Blob", - "CastParam", - "Convolution2D", - "Crop", - "CropAndResize", - "Dequantize", - "DetectionOutput", - "Eltwise", - "ExpandDims", - "Fill", - "Flatten", - "Gather", - "GatherV2", - "InnerProduct", - "Input", - "Interp", - "LRN", - "LSTM", - "MatMul", - "NonMaxSuppressionV2", - "Normalize", - "PackParam", - "Permute", - "Plugin", - "Pool", - "PRelu", - "PriorBox", - "Proposal", - "QuantizedAvgPool", - "QuantizedBiasAdd", - "QuantizedConcat", - "QuantizedLogistic", - "QuantizedMatMul", - "QuantizedMaxPool", - "QuantizedRelu", - "QuantizedRelu6", - "QuantizedReshape", - "QuantizedSoftmax", - "QuantizeMaxMin", - "QuantizeV2", - "Range", - "Rank", - "ReduceJoin", - "ReductionParam", - "Relu", - "Relu6", - "RequantizationRange", - "Requantize", - "Reshape", - "Resize", - "RoiPooling", - "Scale", - "Selu", - "Size", - "Slice", - "SliceTf", - "SpaceBatch", - "SqueezeParam", - "StridedSliceParam", - "TensorConvertInfo", - "TfQuantizedConv2D", - "TopKV2", - "Transpose", - "UnaryOp", - "MomentsParam", - "RNNParam", - "BatchMatMulParam", - "QuantizedFloatParam", - "DepthSpaceParam", - "EltwiseInt8", - "ReverseSequenceParam", - "Extra", - "Pool3D", - "Convolution3D", - "ELU", - "DetectionPostProcessParam", - "OneHotParam", - "PadParam", - nullptr - }; - return names; -} - -inline const char *EnumNameOpParameter(OpParameter e) { - if (e < OpParameter_NONE || e > OpParameter_PadParam) return ""; - const size_t index = static_cast(e); - return EnumNamesOpParameter()[index]; -} - -template struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_NONE; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizedAdd; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_ArgMax; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_AsString; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Axis; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_BatchNorm; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_BinaryOp; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Blob; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_CastParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Convolution2D; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Crop; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_CropAndResize; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Dequantize; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_DetectionOutput; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Eltwise; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_ExpandDims; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Fill; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Flatten; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Gather; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_GatherV2; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_InnerProduct; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Input; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Interp; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_LRN; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_LSTM; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_MatMul; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_NonMaxSuppressionV2; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Normalize; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_PackParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Permute; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Plugin; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Pool; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_PRelu; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_PriorBox; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Proposal; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizedAvgPool; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizedBiasAdd; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizedConcat; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizedLogistic; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizedMatMul; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizedMaxPool; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizedRelu; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizedRelu6; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizedReshape; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizedSoftmax; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizeMaxMin; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizeV2; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Range; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Rank; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_ReduceJoin; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_ReductionParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Relu; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Relu6; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_RequantizationRange; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Requantize; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Reshape; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Resize; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_RoiPooling; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Scale; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Selu; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Size; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Slice; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_SliceTf; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_SpaceBatch; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_SqueezeParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_StridedSliceParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_TensorConvertInfo; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_TfQuantizedConv2D; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_TopKV2; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Transpose; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_UnaryOp; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_MomentsParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_RNNParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_BatchMatMulParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_QuantizedFloatParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_DepthSpaceParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_EltwiseInt8; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_ReverseSequenceParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Extra; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Pool3D; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_Convolution3D; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_ELU; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_DetectionPostProcessParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_OneHotParam; -}; - -template<> struct OpParameterTraits { - static const OpParameter enum_value = OpParameter_PadParam; -}; - -struct OpParameterUnion { - OpParameter type; - void *value; - - OpParameterUnion() : type(OpParameter_NONE), value(nullptr) {} - OpParameterUnion(OpParameterUnion&& u) FLATBUFFERS_NOEXCEPT : - type(OpParameter_NONE), value(nullptr) - { std::swap(type, u.type); std::swap(value, u.value); } - OpParameterUnion(const OpParameterUnion &) FLATBUFFERS_NOEXCEPT; - OpParameterUnion &operator=(const OpParameterUnion &u) FLATBUFFERS_NOEXCEPT - { OpParameterUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } - OpParameterUnion &operator=(OpParameterUnion &&u) FLATBUFFERS_NOEXCEPT - { std::swap(type, u.type); std::swap(value, u.value); return *this; } - ~OpParameterUnion() { Reset(); } - - void Reset(); - -#ifndef FLATBUFFERS_CPP98_STL - template - void Set(T&& val) { - Reset(); - type = OpParameterTraits::enum_value; - if (type != OpParameter_NONE) { - value = new T(std::forward(val)); - } - } -#endif // FLATBUFFERS_CPP98_STL - - static void *UnPack(const void *obj, OpParameter type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; - - QuantizedAddT *AsQuantizedAdd() { - return type == OpParameter_QuantizedAdd ? - reinterpret_cast(value) : nullptr; - } - const QuantizedAddT *AsQuantizedAdd() const { - return type == OpParameter_QuantizedAdd ? - reinterpret_cast(value) : nullptr; - } - ArgMaxT *AsArgMax() { - return type == OpParameter_ArgMax ? - reinterpret_cast(value) : nullptr; - } - const ArgMaxT *AsArgMax() const { - return type == OpParameter_ArgMax ? - reinterpret_cast(value) : nullptr; - } - AsStringT *AsAsString() { - return type == OpParameter_AsString ? - reinterpret_cast(value) : nullptr; - } - const AsStringT *AsAsString() const { - return type == OpParameter_AsString ? - reinterpret_cast(value) : nullptr; - } - AxisT *AsAxis() { - return type == OpParameter_Axis ? - reinterpret_cast(value) : nullptr; - } - const AxisT *AsAxis() const { - return type == OpParameter_Axis ? - reinterpret_cast(value) : nullptr; - } - BatchNormT *AsBatchNorm() { - return type == OpParameter_BatchNorm ? - reinterpret_cast(value) : nullptr; - } - const BatchNormT *AsBatchNorm() const { - return type == OpParameter_BatchNorm ? - reinterpret_cast(value) : nullptr; - } - BinaryOpT *AsBinaryOp() { - return type == OpParameter_BinaryOp ? - reinterpret_cast(value) : nullptr; - } - const BinaryOpT *AsBinaryOp() const { - return type == OpParameter_BinaryOp ? - reinterpret_cast(value) : nullptr; - } - BlobT *AsBlob() { - return type == OpParameter_Blob ? - reinterpret_cast(value) : nullptr; - } - const BlobT *AsBlob() const { - return type == OpParameter_Blob ? - reinterpret_cast(value) : nullptr; - } - CastParamT *AsCastParam() { - return type == OpParameter_CastParam ? - reinterpret_cast(value) : nullptr; - } - const CastParamT *AsCastParam() const { - return type == OpParameter_CastParam ? - reinterpret_cast(value) : nullptr; - } - Convolution2DT *AsConvolution2D() { - return type == OpParameter_Convolution2D ? - reinterpret_cast(value) : nullptr; - } - const Convolution2DT *AsConvolution2D() const { - return type == OpParameter_Convolution2D ? - reinterpret_cast(value) : nullptr; - } - CropT *AsCrop() { - return type == OpParameter_Crop ? - reinterpret_cast(value) : nullptr; - } - const CropT *AsCrop() const { - return type == OpParameter_Crop ? - reinterpret_cast(value) : nullptr; - } - CropAndResizeT *AsCropAndResize() { - return type == OpParameter_CropAndResize ? - reinterpret_cast(value) : nullptr; - } - const CropAndResizeT *AsCropAndResize() const { - return type == OpParameter_CropAndResize ? - reinterpret_cast(value) : nullptr; - } - DequantizeT *AsDequantize() { - return type == OpParameter_Dequantize ? - reinterpret_cast(value) : nullptr; - } - const DequantizeT *AsDequantize() const { - return type == OpParameter_Dequantize ? - reinterpret_cast(value) : nullptr; - } - DetectionOutputT *AsDetectionOutput() { - return type == OpParameter_DetectionOutput ? - reinterpret_cast(value) : nullptr; - } - const DetectionOutputT *AsDetectionOutput() const { - return type == OpParameter_DetectionOutput ? - reinterpret_cast(value) : nullptr; - } - EltwiseT *AsEltwise() { - return type == OpParameter_Eltwise ? - reinterpret_cast(value) : nullptr; - } - const EltwiseT *AsEltwise() const { - return type == OpParameter_Eltwise ? - reinterpret_cast(value) : nullptr; - } - ExpandDimsT *AsExpandDims() { - return type == OpParameter_ExpandDims ? - reinterpret_cast(value) : nullptr; - } - const ExpandDimsT *AsExpandDims() const { - return type == OpParameter_ExpandDims ? - reinterpret_cast(value) : nullptr; - } - FillT *AsFill() { - return type == OpParameter_Fill ? - reinterpret_cast(value) : nullptr; - } - const FillT *AsFill() const { - return type == OpParameter_Fill ? - reinterpret_cast(value) : nullptr; - } - FlattenT *AsFlatten() { - return type == OpParameter_Flatten ? - reinterpret_cast(value) : nullptr; - } - const FlattenT *AsFlatten() const { - return type == OpParameter_Flatten ? - reinterpret_cast(value) : nullptr; - } - GatherT *AsGather() { - return type == OpParameter_Gather ? - reinterpret_cast(value) : nullptr; - } - const GatherT *AsGather() const { - return type == OpParameter_Gather ? - reinterpret_cast(value) : nullptr; - } - GatherV2T *AsGatherV2() { - return type == OpParameter_GatherV2 ? - reinterpret_cast(value) : nullptr; - } - const GatherV2T *AsGatherV2() const { - return type == OpParameter_GatherV2 ? - reinterpret_cast(value) : nullptr; - } - InnerProductT *AsInnerProduct() { - return type == OpParameter_InnerProduct ? - reinterpret_cast(value) : nullptr; - } - const InnerProductT *AsInnerProduct() const { - return type == OpParameter_InnerProduct ? - reinterpret_cast(value) : nullptr; - } - InputT *AsInput() { - return type == OpParameter_Input ? - reinterpret_cast(value) : nullptr; - } - const InputT *AsInput() const { - return type == OpParameter_Input ? - reinterpret_cast(value) : nullptr; - } - InterpT *AsInterp() { - return type == OpParameter_Interp ? - reinterpret_cast(value) : nullptr; - } - const InterpT *AsInterp() const { - return type == OpParameter_Interp ? - reinterpret_cast(value) : nullptr; - } - LRNT *AsLRN() { - return type == OpParameter_LRN ? - reinterpret_cast(value) : nullptr; - } - const LRNT *AsLRN() const { - return type == OpParameter_LRN ? - reinterpret_cast(value) : nullptr; - } - LSTMT *AsLSTM() { - return type == OpParameter_LSTM ? - reinterpret_cast(value) : nullptr; - } - const LSTMT *AsLSTM() const { - return type == OpParameter_LSTM ? - reinterpret_cast(value) : nullptr; - } - MatMulT *AsMatMul() { - return type == OpParameter_MatMul ? - reinterpret_cast(value) : nullptr; - } - const MatMulT *AsMatMul() const { - return type == OpParameter_MatMul ? - reinterpret_cast(value) : nullptr; - } - NonMaxSuppressionV2T *AsNonMaxSuppressionV2() { - return type == OpParameter_NonMaxSuppressionV2 ? - reinterpret_cast(value) : nullptr; - } - const NonMaxSuppressionV2T *AsNonMaxSuppressionV2() const { - return type == OpParameter_NonMaxSuppressionV2 ? - reinterpret_cast(value) : nullptr; - } - NormalizeT *AsNormalize() { - return type == OpParameter_Normalize ? - reinterpret_cast(value) : nullptr; - } - const NormalizeT *AsNormalize() const { - return type == OpParameter_Normalize ? - reinterpret_cast(value) : nullptr; - } - PackParamT *AsPackParam() { - return type == OpParameter_PackParam ? - reinterpret_cast(value) : nullptr; - } - const PackParamT *AsPackParam() const { - return type == OpParameter_PackParam ? - reinterpret_cast(value) : nullptr; - } - PermuteT *AsPermute() { - return type == OpParameter_Permute ? - reinterpret_cast(value) : nullptr; - } - const PermuteT *AsPermute() const { - return type == OpParameter_Permute ? - reinterpret_cast(value) : nullptr; - } - PluginT *AsPlugin() { - return type == OpParameter_Plugin ? - reinterpret_cast(value) : nullptr; - } - const PluginT *AsPlugin() const { - return type == OpParameter_Plugin ? - reinterpret_cast(value) : nullptr; - } - PoolT *AsPool() { - return type == OpParameter_Pool ? - reinterpret_cast(value) : nullptr; - } - const PoolT *AsPool() const { - return type == OpParameter_Pool ? - reinterpret_cast(value) : nullptr; - } - PReluT *AsPRelu() { - return type == OpParameter_PRelu ? - reinterpret_cast(value) : nullptr; - } - const PReluT *AsPRelu() const { - return type == OpParameter_PRelu ? - reinterpret_cast(value) : nullptr; - } - PriorBoxT *AsPriorBox() { - return type == OpParameter_PriorBox ? - reinterpret_cast(value) : nullptr; - } - const PriorBoxT *AsPriorBox() const { - return type == OpParameter_PriorBox ? - reinterpret_cast(value) : nullptr; - } - ProposalT *AsProposal() { - return type == OpParameter_Proposal ? - reinterpret_cast(value) : nullptr; - } - const ProposalT *AsProposal() const { - return type == OpParameter_Proposal ? - reinterpret_cast(value) : nullptr; - } - QuantizedAvgPoolT *AsQuantizedAvgPool() { - return type == OpParameter_QuantizedAvgPool ? - reinterpret_cast(value) : nullptr; - } - const QuantizedAvgPoolT *AsQuantizedAvgPool() const { - return type == OpParameter_QuantizedAvgPool ? - reinterpret_cast(value) : nullptr; - } - QuantizedBiasAddT *AsQuantizedBiasAdd() { - return type == OpParameter_QuantizedBiasAdd ? - reinterpret_cast(value) : nullptr; - } - const QuantizedBiasAddT *AsQuantizedBiasAdd() const { - return type == OpParameter_QuantizedBiasAdd ? - reinterpret_cast(value) : nullptr; - } - QuantizedConcatT *AsQuantizedConcat() { - return type == OpParameter_QuantizedConcat ? - reinterpret_cast(value) : nullptr; - } - const QuantizedConcatT *AsQuantizedConcat() const { - return type == OpParameter_QuantizedConcat ? - reinterpret_cast(value) : nullptr; - } - QuantizedLogisticT *AsQuantizedLogistic() { - return type == OpParameter_QuantizedLogistic ? - reinterpret_cast(value) : nullptr; - } - const QuantizedLogisticT *AsQuantizedLogistic() const { - return type == OpParameter_QuantizedLogistic ? - reinterpret_cast(value) : nullptr; - } - QuantizedMatMulT *AsQuantizedMatMul() { - return type == OpParameter_QuantizedMatMul ? - reinterpret_cast(value) : nullptr; - } - const QuantizedMatMulT *AsQuantizedMatMul() const { - return type == OpParameter_QuantizedMatMul ? - reinterpret_cast(value) : nullptr; - } - QuantizedMaxPoolT *AsQuantizedMaxPool() { - return type == OpParameter_QuantizedMaxPool ? - reinterpret_cast(value) : nullptr; - } - const QuantizedMaxPoolT *AsQuantizedMaxPool() const { - return type == OpParameter_QuantizedMaxPool ? - reinterpret_cast(value) : nullptr; - } - QuantizedReluT *AsQuantizedRelu() { - return type == OpParameter_QuantizedRelu ? - reinterpret_cast(value) : nullptr; - } - const QuantizedReluT *AsQuantizedRelu() const { - return type == OpParameter_QuantizedRelu ? - reinterpret_cast(value) : nullptr; - } - QuantizedRelu6T *AsQuantizedRelu6() { - return type == OpParameter_QuantizedRelu6 ? - reinterpret_cast(value) : nullptr; - } - const QuantizedRelu6T *AsQuantizedRelu6() const { - return type == OpParameter_QuantizedRelu6 ? - reinterpret_cast(value) : nullptr; - } - QuantizedReshapeT *AsQuantizedReshape() { - return type == OpParameter_QuantizedReshape ? - reinterpret_cast(value) : nullptr; - } - const QuantizedReshapeT *AsQuantizedReshape() const { - return type == OpParameter_QuantizedReshape ? - reinterpret_cast(value) : nullptr; - } - QuantizedSoftmaxT *AsQuantizedSoftmax() { - return type == OpParameter_QuantizedSoftmax ? - reinterpret_cast(value) : nullptr; - } - const QuantizedSoftmaxT *AsQuantizedSoftmax() const { - return type == OpParameter_QuantizedSoftmax ? - reinterpret_cast(value) : nullptr; - } - QuantizeMaxMinT *AsQuantizeMaxMin() { - return type == OpParameter_QuantizeMaxMin ? - reinterpret_cast(value) : nullptr; - } - const QuantizeMaxMinT *AsQuantizeMaxMin() const { - return type == OpParameter_QuantizeMaxMin ? - reinterpret_cast(value) : nullptr; - } - QuantizeV2T *AsQuantizeV2() { - return type == OpParameter_QuantizeV2 ? - reinterpret_cast(value) : nullptr; - } - const QuantizeV2T *AsQuantizeV2() const { - return type == OpParameter_QuantizeV2 ? - reinterpret_cast(value) : nullptr; - } - RangeT *AsRange() { - return type == OpParameter_Range ? - reinterpret_cast(value) : nullptr; - } - const RangeT *AsRange() const { - return type == OpParameter_Range ? - reinterpret_cast(value) : nullptr; - } - RankT *AsRank() { - return type == OpParameter_Rank ? - reinterpret_cast(value) : nullptr; - } - const RankT *AsRank() const { - return type == OpParameter_Rank ? - reinterpret_cast(value) : nullptr; - } - ReduceJoinT *AsReduceJoin() { - return type == OpParameter_ReduceJoin ? - reinterpret_cast(value) : nullptr; - } - const ReduceJoinT *AsReduceJoin() const { - return type == OpParameter_ReduceJoin ? - reinterpret_cast(value) : nullptr; - } - ReductionParamT *AsReductionParam() { - return type == OpParameter_ReductionParam ? - reinterpret_cast(value) : nullptr; - } - const ReductionParamT *AsReductionParam() const { - return type == OpParameter_ReductionParam ? - reinterpret_cast(value) : nullptr; - } - ReluT *AsRelu() { - return type == OpParameter_Relu ? - reinterpret_cast(value) : nullptr; - } - const ReluT *AsRelu() const { - return type == OpParameter_Relu ? - reinterpret_cast(value) : nullptr; - } - Relu6T *AsRelu6() { - return type == OpParameter_Relu6 ? - reinterpret_cast(value) : nullptr; - } - const Relu6T *AsRelu6() const { - return type == OpParameter_Relu6 ? - reinterpret_cast(value) : nullptr; - } - RequantizationRangeT *AsRequantizationRange() { - return type == OpParameter_RequantizationRange ? - reinterpret_cast(value) : nullptr; - } - const RequantizationRangeT *AsRequantizationRange() const { - return type == OpParameter_RequantizationRange ? - reinterpret_cast(value) : nullptr; - } - RequantizeT *AsRequantize() { - return type == OpParameter_Requantize ? - reinterpret_cast(value) : nullptr; - } - const RequantizeT *AsRequantize() const { - return type == OpParameter_Requantize ? - reinterpret_cast(value) : nullptr; - } - ReshapeT *AsReshape() { - return type == OpParameter_Reshape ? - reinterpret_cast(value) : nullptr; - } - const ReshapeT *AsReshape() const { - return type == OpParameter_Reshape ? - reinterpret_cast(value) : nullptr; - } - ResizeT *AsResize() { - return type == OpParameter_Resize ? - reinterpret_cast(value) : nullptr; - } - const ResizeT *AsResize() const { - return type == OpParameter_Resize ? - reinterpret_cast(value) : nullptr; - } - RoiPoolingT *AsRoiPooling() { - return type == OpParameter_RoiPooling ? - reinterpret_cast(value) : nullptr; - } - const RoiPoolingT *AsRoiPooling() const { - return type == OpParameter_RoiPooling ? - reinterpret_cast(value) : nullptr; - } - ScaleT *AsScale() { - return type == OpParameter_Scale ? - reinterpret_cast(value) : nullptr; - } - const ScaleT *AsScale() const { - return type == OpParameter_Scale ? - reinterpret_cast(value) : nullptr; - } - SeluT *AsSelu() { - return type == OpParameter_Selu ? - reinterpret_cast(value) : nullptr; - } - const SeluT *AsSelu() const { - return type == OpParameter_Selu ? - reinterpret_cast(value) : nullptr; - } - SizeT *AsSize() { - return type == OpParameter_Size ? - reinterpret_cast(value) : nullptr; - } - const SizeT *AsSize() const { - return type == OpParameter_Size ? - reinterpret_cast(value) : nullptr; - } - SliceT *AsSlice() { - return type == OpParameter_Slice ? - reinterpret_cast(value) : nullptr; - } - const SliceT *AsSlice() const { - return type == OpParameter_Slice ? - reinterpret_cast(value) : nullptr; - } - SliceTfT *AsSliceTf() { - return type == OpParameter_SliceTf ? - reinterpret_cast(value) : nullptr; - } - const SliceTfT *AsSliceTf() const { - return type == OpParameter_SliceTf ? - reinterpret_cast(value) : nullptr; - } - SpaceBatchT *AsSpaceBatch() { - return type == OpParameter_SpaceBatch ? - reinterpret_cast(value) : nullptr; - } - const SpaceBatchT *AsSpaceBatch() const { - return type == OpParameter_SpaceBatch ? - reinterpret_cast(value) : nullptr; - } - SqueezeParamT *AsSqueezeParam() { - return type == OpParameter_SqueezeParam ? - reinterpret_cast(value) : nullptr; - } - const SqueezeParamT *AsSqueezeParam() const { - return type == OpParameter_SqueezeParam ? - reinterpret_cast(value) : nullptr; - } - StridedSliceParamT *AsStridedSliceParam() { - return type == OpParameter_StridedSliceParam ? - reinterpret_cast(value) : nullptr; - } - const StridedSliceParamT *AsStridedSliceParam() const { - return type == OpParameter_StridedSliceParam ? - reinterpret_cast(value) : nullptr; - } - TensorConvertInfoT *AsTensorConvertInfo() { - return type == OpParameter_TensorConvertInfo ? - reinterpret_cast(value) : nullptr; - } - const TensorConvertInfoT *AsTensorConvertInfo() const { - return type == OpParameter_TensorConvertInfo ? - reinterpret_cast(value) : nullptr; - } - TfQuantizedConv2DT *AsTfQuantizedConv2D() { - return type == OpParameter_TfQuantizedConv2D ? - reinterpret_cast(value) : nullptr; - } - const TfQuantizedConv2DT *AsTfQuantizedConv2D() const { - return type == OpParameter_TfQuantizedConv2D ? - reinterpret_cast(value) : nullptr; - } - TopKV2T *AsTopKV2() { - return type == OpParameter_TopKV2 ? - reinterpret_cast(value) : nullptr; - } - const TopKV2T *AsTopKV2() const { - return type == OpParameter_TopKV2 ? - reinterpret_cast(value) : nullptr; - } - TransposeT *AsTranspose() { - return type == OpParameter_Transpose ? - reinterpret_cast(value) : nullptr; - } - const TransposeT *AsTranspose() const { - return type == OpParameter_Transpose ? - reinterpret_cast(value) : nullptr; - } - UnaryOpT *AsUnaryOp() { - return type == OpParameter_UnaryOp ? - reinterpret_cast(value) : nullptr; - } - const UnaryOpT *AsUnaryOp() const { - return type == OpParameter_UnaryOp ? - reinterpret_cast(value) : nullptr; - } - MomentsParamT *AsMomentsParam() { - return type == OpParameter_MomentsParam ? - reinterpret_cast(value) : nullptr; - } - const MomentsParamT *AsMomentsParam() const { - return type == OpParameter_MomentsParam ? - reinterpret_cast(value) : nullptr; - } - RNNParamT *AsRNNParam() { - return type == OpParameter_RNNParam ? - reinterpret_cast(value) : nullptr; - } - const RNNParamT *AsRNNParam() const { - return type == OpParameter_RNNParam ? - reinterpret_cast(value) : nullptr; - } - BatchMatMulParamT *AsBatchMatMulParam() { - return type == OpParameter_BatchMatMulParam ? - reinterpret_cast(value) : nullptr; - } - const BatchMatMulParamT *AsBatchMatMulParam() const { - return type == OpParameter_BatchMatMulParam ? - reinterpret_cast(value) : nullptr; - } - QuantizedFloatParamT *AsQuantizedFloatParam() { - return type == OpParameter_QuantizedFloatParam ? - reinterpret_cast(value) : nullptr; - } - const QuantizedFloatParamT *AsQuantizedFloatParam() const { - return type == OpParameter_QuantizedFloatParam ? - reinterpret_cast(value) : nullptr; - } - DepthSpaceParamT *AsDepthSpaceParam() { - return type == OpParameter_DepthSpaceParam ? - reinterpret_cast(value) : nullptr; - } - const DepthSpaceParamT *AsDepthSpaceParam() const { - return type == OpParameter_DepthSpaceParam ? - reinterpret_cast(value) : nullptr; - } - EltwiseInt8T *AsEltwiseInt8() { - return type == OpParameter_EltwiseInt8 ? - reinterpret_cast(value) : nullptr; - } - const EltwiseInt8T *AsEltwiseInt8() const { - return type == OpParameter_EltwiseInt8 ? - reinterpret_cast(value) : nullptr; - } - ReverseSequenceParamT *AsReverseSequenceParam() { - return type == OpParameter_ReverseSequenceParam ? - reinterpret_cast(value) : nullptr; - } - const ReverseSequenceParamT *AsReverseSequenceParam() const { - return type == OpParameter_ReverseSequenceParam ? - reinterpret_cast(value) : nullptr; - } - ExtraT *AsExtra() { - return type == OpParameter_Extra ? - reinterpret_cast(value) : nullptr; - } - const ExtraT *AsExtra() const { - return type == OpParameter_Extra ? - reinterpret_cast(value) : nullptr; - } - Pool3DT *AsPool3D() { - return type == OpParameter_Pool3D ? - reinterpret_cast(value) : nullptr; - } - const Pool3DT *AsPool3D() const { - return type == OpParameter_Pool3D ? - reinterpret_cast(value) : nullptr; - } - Convolution3DT *AsConvolution3D() { - return type == OpParameter_Convolution3D ? - reinterpret_cast(value) : nullptr; - } - const Convolution3DT *AsConvolution3D() const { - return type == OpParameter_Convolution3D ? - reinterpret_cast(value) : nullptr; - } - ELUT *AsELU() { - return type == OpParameter_ELU ? - reinterpret_cast(value) : nullptr; - } - const ELUT *AsELU() const { - return type == OpParameter_ELU ? - reinterpret_cast(value) : nullptr; - } - DetectionPostProcessParamT *AsDetectionPostProcessParam() { - return type == OpParameter_DetectionPostProcessParam ? - reinterpret_cast(value) : nullptr; - } - const DetectionPostProcessParamT *AsDetectionPostProcessParam() const { - return type == OpParameter_DetectionPostProcessParam ? - reinterpret_cast(value) : nullptr; - } - OneHotParamT *AsOneHotParam() { - return type == OpParameter_OneHotParam ? - reinterpret_cast(value) : nullptr; - } - const OneHotParamT *AsOneHotParam() const { - return type == OpParameter_OneHotParam ? - reinterpret_cast(value) : nullptr; - } - PadParamT *AsPadParam() { - return type == OpParameter_PadParam ? - reinterpret_cast(value) : nullptr; - } - const PadParamT *AsPadParam() const { - return type == OpParameter_PadParam ? - reinterpret_cast(value) : nullptr; - } -}; - -bool VerifyOpParameter(flatbuffers::Verifier &verifier, const void *obj, OpParameter type); -bool VerifyOpParameterVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); - -enum ForwardType { - ForwardType_CPU = 0, - ForwardType_METAL = 1, - ForwardType_OPENCL = 2, - ForwardType_OPENGLES = 3, - ForwardType_VULKAN = 4, - ForwardType_MIN = ForwardType_CPU, - ForwardType_MAX = ForwardType_VULKAN -}; - -inline const ForwardType (&EnumValuesForwardType())[5] { - static const ForwardType values[] = { - ForwardType_CPU, - ForwardType_METAL, - ForwardType_OPENCL, - ForwardType_OPENGLES, - ForwardType_VULKAN - }; - return values; -} - -inline const char * const *EnumNamesForwardType() { - static const char * const names[] = { - "CPU", - "METAL", - "OPENCL", - "OPENGLES", - "VULKAN", - nullptr - }; - return names; -} - -inline const char *EnumNameForwardType(ForwardType e) { - if (e < ForwardType_CPU || e > ForwardType_VULKAN) return ""; - const size_t index = static_cast(e); - return EnumNamesForwardType()[index]; -} - -enum Usage { - Usage_INFERENCE = 0, - Usage_TRAIN = 1, - Usage_MIN = Usage_INFERENCE, - Usage_MAX = Usage_TRAIN -}; - -inline const Usage (&EnumValuesUsage())[2] { - static const Usage values[] = { - Usage_INFERENCE, - Usage_TRAIN - }; - return values; -} - -inline const char * const *EnumNamesUsage() { - static const char * const names[] = { - "INFERENCE", - "TRAIN", - nullptr - }; - return names; -} - -inline const char *EnumNameUsage(Usage e) { - if (e < Usage_INFERENCE || e > Usage_TRAIN) return ""; - const size_t index = static_cast(e); - return EnumNamesUsage()[index]; -} - -struct PluginT : public flatbuffers::NativeTable { - typedef Plugin TableType; - std::string type; - std::vector> buffer; - PluginT() { - } -}; - -struct Plugin FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PluginT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return PluginTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TYPE = 4, - VT_BUFFER = 6 - }; - const flatbuffers::String *type() const { - return GetPointer(VT_TYPE); - } - const flatbuffers::Vector> *buffer() const { - return GetPointer> *>(VT_BUFFER); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_TYPE) && - verifier.VerifyString(type()) && - VerifyOffset(verifier, VT_BUFFER) && - verifier.VerifyVector(buffer()) && - verifier.VerifyVectorOfTables(buffer()) && - verifier.EndTable(); - } - PluginT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PluginT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PluginT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PluginBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_type(flatbuffers::Offset type) { - fbb_.AddOffset(Plugin::VT_TYPE, type); - } - void add_buffer(flatbuffers::Offset>> buffer) { - fbb_.AddOffset(Plugin::VT_BUFFER, buffer); - } - explicit PluginBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PluginBuilder &operator=(const PluginBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePlugin( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset type = 0, - flatbuffers::Offset>> buffer = 0) { - PluginBuilder builder_(_fbb); - builder_.add_buffer(buffer); - builder_.add_type(type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreatePluginDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *type = nullptr, - const std::vector> *buffer = nullptr) { - auto type__ = type ? _fbb.CreateString(type) : 0; - auto buffer__ = buffer ? _fbb.CreateVector>(*buffer) : 0; - return MNN::CreatePlugin( - _fbb, - type__, - buffer__); -} - -flatbuffers::Offset CreatePlugin(flatbuffers::FlatBufferBuilder &_fbb, const PluginT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ExtraT : public flatbuffers::NativeTable { - typedef Extra TableType; - std::string type; - std::string engine; - std::vector info; - std::vector> attr; - ExtraT() { - } -}; - -struct Extra FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ExtraT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ExtraTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TYPE = 4, - VT_ENGINE = 6, - VT_INFO = 8, - VT_ATTR = 10 - }; - const flatbuffers::String *type() const { - return GetPointer(VT_TYPE); - } - const flatbuffers::String *engine() const { - return GetPointer(VT_ENGINE); - } - const flatbuffers::Vector *info() const { - return GetPointer *>(VT_INFO); - } - const flatbuffers::Vector> *attr() const { - return GetPointer> *>(VT_ATTR); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_TYPE) && - verifier.VerifyString(type()) && - VerifyOffset(verifier, VT_ENGINE) && - verifier.VerifyString(engine()) && - VerifyOffset(verifier, VT_INFO) && - verifier.VerifyVector(info()) && - VerifyOffset(verifier, VT_ATTR) && - verifier.VerifyVector(attr()) && - verifier.VerifyVectorOfTables(attr()) && - verifier.EndTable(); - } - ExtraT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ExtraT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExtraT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ExtraBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_type(flatbuffers::Offset type) { - fbb_.AddOffset(Extra::VT_TYPE, type); - } - void add_engine(flatbuffers::Offset engine) { - fbb_.AddOffset(Extra::VT_ENGINE, engine); - } - void add_info(flatbuffers::Offset> info) { - fbb_.AddOffset(Extra::VT_INFO, info); - } - void add_attr(flatbuffers::Offset>> attr) { - fbb_.AddOffset(Extra::VT_ATTR, attr); - } - explicit ExtraBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ExtraBuilder &operator=(const ExtraBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateExtra( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset type = 0, - flatbuffers::Offset engine = 0, - flatbuffers::Offset> info = 0, - flatbuffers::Offset>> attr = 0) { - ExtraBuilder builder_(_fbb); - builder_.add_attr(attr); - builder_.add_info(info); - builder_.add_engine(engine); - builder_.add_type(type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateExtraDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *type = nullptr, - const char *engine = nullptr, - const std::vector *info = nullptr, - const std::vector> *attr = nullptr) { - auto type__ = type ? _fbb.CreateString(type) : 0; - auto engine__ = engine ? _fbb.CreateString(engine) : 0; - auto info__ = info ? _fbb.CreateVector(*info) : 0; - auto attr__ = attr ? _fbb.CreateVector>(*attr) : 0; - return MNN::CreateExtra( - _fbb, - type__, - engine__, - info__, - attr__); -} - -flatbuffers::Offset CreateExtra(flatbuffers::FlatBufferBuilder &_fbb, const ExtraT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct OpT : public flatbuffers::NativeTable { - typedef Op TableType; - std::vector inputIndexes; - OpParameterUnion main; - std::string name; - std::vector outputIndexes; - OpType type; - MNN_DATA_FORMAT defaultDimentionFormat; - OpT() - : type(OpType_AbsVal), - defaultDimentionFormat(MNN_DATA_FORMAT_NHWC) { - } -}; - -struct Op FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OpT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return OpTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INPUTINDEXES = 4, - VT_MAIN_TYPE = 6, - VT_MAIN = 8, - VT_NAME = 10, - VT_OUTPUTINDEXES = 12, - VT_TYPE = 14, - VT_DEFAULTDIMENTIONFORMAT = 16 - }; - const flatbuffers::Vector *inputIndexes() const { - return GetPointer *>(VT_INPUTINDEXES); - } - OpParameter main_type() const { - return static_cast(GetField(VT_MAIN_TYPE, 0)); - } - const void *main() const { - return GetPointer(VT_MAIN); - } - template const T *main_as() const; - const QuantizedAdd *main_as_QuantizedAdd() const { - return main_type() == OpParameter_QuantizedAdd ? static_cast(main()) : nullptr; - } - const ArgMax *main_as_ArgMax() const { - return main_type() == OpParameter_ArgMax ? static_cast(main()) : nullptr; - } - const AsString *main_as_AsString() const { - return main_type() == OpParameter_AsString ? static_cast(main()) : nullptr; - } - const Axis *main_as_Axis() const { - return main_type() == OpParameter_Axis ? static_cast(main()) : nullptr; - } - const BatchNorm *main_as_BatchNorm() const { - return main_type() == OpParameter_BatchNorm ? static_cast(main()) : nullptr; - } - const BinaryOp *main_as_BinaryOp() const { - return main_type() == OpParameter_BinaryOp ? static_cast(main()) : nullptr; - } - const Blob *main_as_Blob() const { - return main_type() == OpParameter_Blob ? static_cast(main()) : nullptr; - } - const CastParam *main_as_CastParam() const { - return main_type() == OpParameter_CastParam ? static_cast(main()) : nullptr; - } - const Convolution2D *main_as_Convolution2D() const { - return main_type() == OpParameter_Convolution2D ? static_cast(main()) : nullptr; - } - const Crop *main_as_Crop() const { - return main_type() == OpParameter_Crop ? static_cast(main()) : nullptr; - } - const CropAndResize *main_as_CropAndResize() const { - return main_type() == OpParameter_CropAndResize ? static_cast(main()) : nullptr; - } - const Dequantize *main_as_Dequantize() const { - return main_type() == OpParameter_Dequantize ? static_cast(main()) : nullptr; - } - const DetectionOutput *main_as_DetectionOutput() const { - return main_type() == OpParameter_DetectionOutput ? static_cast(main()) : nullptr; - } - const Eltwise *main_as_Eltwise() const { - return main_type() == OpParameter_Eltwise ? static_cast(main()) : nullptr; - } - const ExpandDims *main_as_ExpandDims() const { - return main_type() == OpParameter_ExpandDims ? static_cast(main()) : nullptr; - } - const Fill *main_as_Fill() const { - return main_type() == OpParameter_Fill ? static_cast(main()) : nullptr; - } - const Flatten *main_as_Flatten() const { - return main_type() == OpParameter_Flatten ? static_cast(main()) : nullptr; - } - const Gather *main_as_Gather() const { - return main_type() == OpParameter_Gather ? static_cast(main()) : nullptr; - } - const GatherV2 *main_as_GatherV2() const { - return main_type() == OpParameter_GatherV2 ? static_cast(main()) : nullptr; - } - const InnerProduct *main_as_InnerProduct() const { - return main_type() == OpParameter_InnerProduct ? static_cast(main()) : nullptr; - } - const Input *main_as_Input() const { - return main_type() == OpParameter_Input ? static_cast(main()) : nullptr; - } - const Interp *main_as_Interp() const { - return main_type() == OpParameter_Interp ? static_cast(main()) : nullptr; - } - const LRN *main_as_LRN() const { - return main_type() == OpParameter_LRN ? static_cast(main()) : nullptr; - } - const LSTM *main_as_LSTM() const { - return main_type() == OpParameter_LSTM ? static_cast(main()) : nullptr; - } - const MatMul *main_as_MatMul() const { - return main_type() == OpParameter_MatMul ? static_cast(main()) : nullptr; - } - const NonMaxSuppressionV2 *main_as_NonMaxSuppressionV2() const { - return main_type() == OpParameter_NonMaxSuppressionV2 ? static_cast(main()) : nullptr; - } - const Normalize *main_as_Normalize() const { - return main_type() == OpParameter_Normalize ? static_cast(main()) : nullptr; - } - const PackParam *main_as_PackParam() const { - return main_type() == OpParameter_PackParam ? static_cast(main()) : nullptr; - } - const Permute *main_as_Permute() const { - return main_type() == OpParameter_Permute ? static_cast(main()) : nullptr; - } - const Plugin *main_as_Plugin() const { - return main_type() == OpParameter_Plugin ? static_cast(main()) : nullptr; - } - const Pool *main_as_Pool() const { - return main_type() == OpParameter_Pool ? static_cast(main()) : nullptr; - } - const PRelu *main_as_PRelu() const { - return main_type() == OpParameter_PRelu ? static_cast(main()) : nullptr; - } - const PriorBox *main_as_PriorBox() const { - return main_type() == OpParameter_PriorBox ? static_cast(main()) : nullptr; - } - const Proposal *main_as_Proposal() const { - return main_type() == OpParameter_Proposal ? static_cast(main()) : nullptr; - } - const QuantizedAvgPool *main_as_QuantizedAvgPool() const { - return main_type() == OpParameter_QuantizedAvgPool ? static_cast(main()) : nullptr; - } - const QuantizedBiasAdd *main_as_QuantizedBiasAdd() const { - return main_type() == OpParameter_QuantizedBiasAdd ? static_cast(main()) : nullptr; - } - const QuantizedConcat *main_as_QuantizedConcat() const { - return main_type() == OpParameter_QuantizedConcat ? static_cast(main()) : nullptr; - } - const QuantizedLogistic *main_as_QuantizedLogistic() const { - return main_type() == OpParameter_QuantizedLogistic ? static_cast(main()) : nullptr; - } - const QuantizedMatMul *main_as_QuantizedMatMul() const { - return main_type() == OpParameter_QuantizedMatMul ? static_cast(main()) : nullptr; - } - const QuantizedMaxPool *main_as_QuantizedMaxPool() const { - return main_type() == OpParameter_QuantizedMaxPool ? static_cast(main()) : nullptr; - } - const QuantizedRelu *main_as_QuantizedRelu() const { - return main_type() == OpParameter_QuantizedRelu ? static_cast(main()) : nullptr; - } - const QuantizedRelu6 *main_as_QuantizedRelu6() const { - return main_type() == OpParameter_QuantizedRelu6 ? static_cast(main()) : nullptr; - } - const QuantizedReshape *main_as_QuantizedReshape() const { - return main_type() == OpParameter_QuantizedReshape ? static_cast(main()) : nullptr; - } - const QuantizedSoftmax *main_as_QuantizedSoftmax() const { - return main_type() == OpParameter_QuantizedSoftmax ? static_cast(main()) : nullptr; - } - const QuantizeMaxMin *main_as_QuantizeMaxMin() const { - return main_type() == OpParameter_QuantizeMaxMin ? static_cast(main()) : nullptr; - } - const QuantizeV2 *main_as_QuantizeV2() const { - return main_type() == OpParameter_QuantizeV2 ? static_cast(main()) : nullptr; - } - const Range *main_as_Range() const { - return main_type() == OpParameter_Range ? static_cast(main()) : nullptr; - } - const Rank *main_as_Rank() const { - return main_type() == OpParameter_Rank ? static_cast(main()) : nullptr; - } - const ReduceJoin *main_as_ReduceJoin() const { - return main_type() == OpParameter_ReduceJoin ? static_cast(main()) : nullptr; - } - const ReductionParam *main_as_ReductionParam() const { - return main_type() == OpParameter_ReductionParam ? static_cast(main()) : nullptr; - } - const Relu *main_as_Relu() const { - return main_type() == OpParameter_Relu ? static_cast(main()) : nullptr; - } - const Relu6 *main_as_Relu6() const { - return main_type() == OpParameter_Relu6 ? static_cast(main()) : nullptr; - } - const RequantizationRange *main_as_RequantizationRange() const { - return main_type() == OpParameter_RequantizationRange ? static_cast(main()) : nullptr; - } - const Requantize *main_as_Requantize() const { - return main_type() == OpParameter_Requantize ? static_cast(main()) : nullptr; - } - const Reshape *main_as_Reshape() const { - return main_type() == OpParameter_Reshape ? static_cast(main()) : nullptr; - } - const Resize *main_as_Resize() const { - return main_type() == OpParameter_Resize ? static_cast(main()) : nullptr; - } - const RoiPooling *main_as_RoiPooling() const { - return main_type() == OpParameter_RoiPooling ? static_cast(main()) : nullptr; - } - const Scale *main_as_Scale() const { - return main_type() == OpParameter_Scale ? static_cast(main()) : nullptr; - } - const Selu *main_as_Selu() const { - return main_type() == OpParameter_Selu ? static_cast(main()) : nullptr; - } - const Size *main_as_Size() const { - return main_type() == OpParameter_Size ? static_cast(main()) : nullptr; - } - const Slice *main_as_Slice() const { - return main_type() == OpParameter_Slice ? static_cast(main()) : nullptr; - } - const SliceTf *main_as_SliceTf() const { - return main_type() == OpParameter_SliceTf ? static_cast(main()) : nullptr; - } - const SpaceBatch *main_as_SpaceBatch() const { - return main_type() == OpParameter_SpaceBatch ? static_cast(main()) : nullptr; - } - const SqueezeParam *main_as_SqueezeParam() const { - return main_type() == OpParameter_SqueezeParam ? static_cast(main()) : nullptr; - } - const StridedSliceParam *main_as_StridedSliceParam() const { - return main_type() == OpParameter_StridedSliceParam ? static_cast(main()) : nullptr; - } - const TensorConvertInfo *main_as_TensorConvertInfo() const { - return main_type() == OpParameter_TensorConvertInfo ? static_cast(main()) : nullptr; - } - const TfQuantizedConv2D *main_as_TfQuantizedConv2D() const { - return main_type() == OpParameter_TfQuantizedConv2D ? static_cast(main()) : nullptr; - } - const TopKV2 *main_as_TopKV2() const { - return main_type() == OpParameter_TopKV2 ? static_cast(main()) : nullptr; - } - const Transpose *main_as_Transpose() const { - return main_type() == OpParameter_Transpose ? static_cast(main()) : nullptr; - } - const UnaryOp *main_as_UnaryOp() const { - return main_type() == OpParameter_UnaryOp ? static_cast(main()) : nullptr; - } - const MomentsParam *main_as_MomentsParam() const { - return main_type() == OpParameter_MomentsParam ? static_cast(main()) : nullptr; - } - const RNNParam *main_as_RNNParam() const { - return main_type() == OpParameter_RNNParam ? static_cast(main()) : nullptr; - } - const BatchMatMulParam *main_as_BatchMatMulParam() const { - return main_type() == OpParameter_BatchMatMulParam ? static_cast(main()) : nullptr; - } - const QuantizedFloatParam *main_as_QuantizedFloatParam() const { - return main_type() == OpParameter_QuantizedFloatParam ? static_cast(main()) : nullptr; - } - const DepthSpaceParam *main_as_DepthSpaceParam() const { - return main_type() == OpParameter_DepthSpaceParam ? static_cast(main()) : nullptr; - } - const EltwiseInt8 *main_as_EltwiseInt8() const { - return main_type() == OpParameter_EltwiseInt8 ? static_cast(main()) : nullptr; - } - const ReverseSequenceParam *main_as_ReverseSequenceParam() const { - return main_type() == OpParameter_ReverseSequenceParam ? static_cast(main()) : nullptr; - } - const Extra *main_as_Extra() const { - return main_type() == OpParameter_Extra ? static_cast(main()) : nullptr; - } - const Pool3D *main_as_Pool3D() const { - return main_type() == OpParameter_Pool3D ? static_cast(main()) : nullptr; - } - const Convolution3D *main_as_Convolution3D() const { - return main_type() == OpParameter_Convolution3D ? static_cast(main()) : nullptr; - } - const ELU *main_as_ELU() const { - return main_type() == OpParameter_ELU ? static_cast(main()) : nullptr; - } - const DetectionPostProcessParam *main_as_DetectionPostProcessParam() const { - return main_type() == OpParameter_DetectionPostProcessParam ? static_cast(main()) : nullptr; - } - const OneHotParam *main_as_OneHotParam() const { - return main_type() == OpParameter_OneHotParam ? static_cast(main()) : nullptr; - } - const PadParam *main_as_PadParam() const { - return main_type() == OpParameter_PadParam ? static_cast(main()) : nullptr; - } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - const flatbuffers::Vector *outputIndexes() const { - return GetPointer *>(VT_OUTPUTINDEXES); - } - OpType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - MNN_DATA_FORMAT defaultDimentionFormat() const { - return static_cast(GetField(VT_DEFAULTDIMENTIONFORMAT, 1)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_INPUTINDEXES) && - verifier.VerifyVector(inputIndexes()) && - VerifyField(verifier, VT_MAIN_TYPE) && - VerifyOffset(verifier, VT_MAIN) && - VerifyOpParameter(verifier, main(), main_type()) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyOffset(verifier, VT_OUTPUTINDEXES) && - verifier.VerifyVector(outputIndexes()) && - VerifyField(verifier, VT_TYPE) && - VerifyField(verifier, VT_DEFAULTDIMENTIONFORMAT) && - verifier.EndTable(); - } - OpT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OpT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OpT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -template<> inline const QuantizedAdd *Op::main_as() const { - return main_as_QuantizedAdd(); -} - -template<> inline const ArgMax *Op::main_as() const { - return main_as_ArgMax(); -} - -template<> inline const AsString *Op::main_as() const { - return main_as_AsString(); -} - -template<> inline const Axis *Op::main_as() const { - return main_as_Axis(); -} - -template<> inline const BatchNorm *Op::main_as() const { - return main_as_BatchNorm(); -} - -template<> inline const BinaryOp *Op::main_as() const { - return main_as_BinaryOp(); -} - -template<> inline const Blob *Op::main_as() const { - return main_as_Blob(); -} - -template<> inline const CastParam *Op::main_as() const { - return main_as_CastParam(); -} - -template<> inline const Convolution2D *Op::main_as() const { - return main_as_Convolution2D(); -} - -template<> inline const Crop *Op::main_as() const { - return main_as_Crop(); -} - -template<> inline const CropAndResize *Op::main_as() const { - return main_as_CropAndResize(); -} - -template<> inline const Dequantize *Op::main_as() const { - return main_as_Dequantize(); -} - -template<> inline const DetectionOutput *Op::main_as() const { - return main_as_DetectionOutput(); -} - -template<> inline const Eltwise *Op::main_as() const { - return main_as_Eltwise(); -} - -template<> inline const ExpandDims *Op::main_as() const { - return main_as_ExpandDims(); -} - -template<> inline const Fill *Op::main_as() const { - return main_as_Fill(); -} - -template<> inline const Flatten *Op::main_as() const { - return main_as_Flatten(); -} - -template<> inline const Gather *Op::main_as() const { - return main_as_Gather(); -} - -template<> inline const GatherV2 *Op::main_as() const { - return main_as_GatherV2(); -} - -template<> inline const InnerProduct *Op::main_as() const { - return main_as_InnerProduct(); -} - -template<> inline const Input *Op::main_as() const { - return main_as_Input(); -} - -template<> inline const Interp *Op::main_as() const { - return main_as_Interp(); -} - -template<> inline const LRN *Op::main_as() const { - return main_as_LRN(); -} - -template<> inline const LSTM *Op::main_as() const { - return main_as_LSTM(); -} - -template<> inline const MatMul *Op::main_as() const { - return main_as_MatMul(); -} - -template<> inline const NonMaxSuppressionV2 *Op::main_as() const { - return main_as_NonMaxSuppressionV2(); -} - -template<> inline const Normalize *Op::main_as() const { - return main_as_Normalize(); -} - -template<> inline const PackParam *Op::main_as() const { - return main_as_PackParam(); -} - -template<> inline const Permute *Op::main_as() const { - return main_as_Permute(); -} - -template<> inline const Plugin *Op::main_as() const { - return main_as_Plugin(); -} - -template<> inline const Pool *Op::main_as() const { - return main_as_Pool(); -} - -template<> inline const PRelu *Op::main_as() const { - return main_as_PRelu(); -} - -template<> inline const PriorBox *Op::main_as() const { - return main_as_PriorBox(); -} - -template<> inline const Proposal *Op::main_as() const { - return main_as_Proposal(); -} - -template<> inline const QuantizedAvgPool *Op::main_as() const { - return main_as_QuantizedAvgPool(); -} - -template<> inline const QuantizedBiasAdd *Op::main_as() const { - return main_as_QuantizedBiasAdd(); -} - -template<> inline const QuantizedConcat *Op::main_as() const { - return main_as_QuantizedConcat(); -} - -template<> inline const QuantizedLogistic *Op::main_as() const { - return main_as_QuantizedLogistic(); -} - -template<> inline const QuantizedMatMul *Op::main_as() const { - return main_as_QuantizedMatMul(); -} - -template<> inline const QuantizedMaxPool *Op::main_as() const { - return main_as_QuantizedMaxPool(); -} - -template<> inline const QuantizedRelu *Op::main_as() const { - return main_as_QuantizedRelu(); -} - -template<> inline const QuantizedRelu6 *Op::main_as() const { - return main_as_QuantizedRelu6(); -} - -template<> inline const QuantizedReshape *Op::main_as() const { - return main_as_QuantizedReshape(); -} - -template<> inline const QuantizedSoftmax *Op::main_as() const { - return main_as_QuantizedSoftmax(); -} - -template<> inline const QuantizeMaxMin *Op::main_as() const { - return main_as_QuantizeMaxMin(); -} - -template<> inline const QuantizeV2 *Op::main_as() const { - return main_as_QuantizeV2(); -} - -template<> inline const Range *Op::main_as() const { - return main_as_Range(); -} - -template<> inline const Rank *Op::main_as() const { - return main_as_Rank(); -} - -template<> inline const ReduceJoin *Op::main_as() const { - return main_as_ReduceJoin(); -} - -template<> inline const ReductionParam *Op::main_as() const { - return main_as_ReductionParam(); -} - -template<> inline const Relu *Op::main_as() const { - return main_as_Relu(); -} - -template<> inline const Relu6 *Op::main_as() const { - return main_as_Relu6(); -} - -template<> inline const RequantizationRange *Op::main_as() const { - return main_as_RequantizationRange(); -} - -template<> inline const Requantize *Op::main_as() const { - return main_as_Requantize(); -} - -template<> inline const Reshape *Op::main_as() const { - return main_as_Reshape(); -} - -template<> inline const Resize *Op::main_as() const { - return main_as_Resize(); -} - -template<> inline const RoiPooling *Op::main_as() const { - return main_as_RoiPooling(); -} - -template<> inline const Scale *Op::main_as() const { - return main_as_Scale(); -} - -template<> inline const Selu *Op::main_as() const { - return main_as_Selu(); -} - -template<> inline const Size *Op::main_as() const { - return main_as_Size(); -} - -template<> inline const Slice *Op::main_as() const { - return main_as_Slice(); -} - -template<> inline const SliceTf *Op::main_as() const { - return main_as_SliceTf(); -} - -template<> inline const SpaceBatch *Op::main_as() const { - return main_as_SpaceBatch(); -} - -template<> inline const SqueezeParam *Op::main_as() const { - return main_as_SqueezeParam(); -} - -template<> inline const StridedSliceParam *Op::main_as() const { - return main_as_StridedSliceParam(); -} - -template<> inline const TensorConvertInfo *Op::main_as() const { - return main_as_TensorConvertInfo(); -} - -template<> inline const TfQuantizedConv2D *Op::main_as() const { - return main_as_TfQuantizedConv2D(); -} - -template<> inline const TopKV2 *Op::main_as() const { - return main_as_TopKV2(); -} - -template<> inline const Transpose *Op::main_as() const { - return main_as_Transpose(); -} - -template<> inline const UnaryOp *Op::main_as() const { - return main_as_UnaryOp(); -} - -template<> inline const MomentsParam *Op::main_as() const { - return main_as_MomentsParam(); -} - -template<> inline const RNNParam *Op::main_as() const { - return main_as_RNNParam(); -} - -template<> inline const BatchMatMulParam *Op::main_as() const { - return main_as_BatchMatMulParam(); -} - -template<> inline const QuantizedFloatParam *Op::main_as() const { - return main_as_QuantizedFloatParam(); -} - -template<> inline const DepthSpaceParam *Op::main_as() const { - return main_as_DepthSpaceParam(); -} - -template<> inline const EltwiseInt8 *Op::main_as() const { - return main_as_EltwiseInt8(); -} - -template<> inline const ReverseSequenceParam *Op::main_as() const { - return main_as_ReverseSequenceParam(); -} - -template<> inline const Extra *Op::main_as() const { - return main_as_Extra(); -} - -template<> inline const Pool3D *Op::main_as() const { - return main_as_Pool3D(); -} - -template<> inline const Convolution3D *Op::main_as() const { - return main_as_Convolution3D(); -} - -template<> inline const ELU *Op::main_as() const { - return main_as_ELU(); -} - -template<> inline const DetectionPostProcessParam *Op::main_as() const { - return main_as_DetectionPostProcessParam(); -} - -template<> inline const OneHotParam *Op::main_as() const { - return main_as_OneHotParam(); -} - -template<> inline const PadParam *Op::main_as() const { - return main_as_PadParam(); -} - -struct OpBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_inputIndexes(flatbuffers::Offset> inputIndexes) { - fbb_.AddOffset(Op::VT_INPUTINDEXES, inputIndexes); - } - void add_main_type(OpParameter main_type) { - fbb_.AddElement(Op::VT_MAIN_TYPE, static_cast(main_type), 0); - } - void add_main(flatbuffers::Offset main) { - fbb_.AddOffset(Op::VT_MAIN, main); - } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(Op::VT_NAME, name); - } - void add_outputIndexes(flatbuffers::Offset> outputIndexes) { - fbb_.AddOffset(Op::VT_OUTPUTINDEXES, outputIndexes); - } - void add_type(OpType type) { - fbb_.AddElement(Op::VT_TYPE, static_cast(type), 0); - } - void add_defaultDimentionFormat(MNN_DATA_FORMAT defaultDimentionFormat) { - fbb_.AddElement(Op::VT_DEFAULTDIMENTIONFORMAT, static_cast(defaultDimentionFormat), 1); - } - explicit OpBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - OpBuilder &operator=(const OpBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOp( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> inputIndexes = 0, - OpParameter main_type = OpParameter_NONE, - flatbuffers::Offset main = 0, - flatbuffers::Offset name = 0, - flatbuffers::Offset> outputIndexes = 0, - OpType type = OpType_AbsVal, - MNN_DATA_FORMAT defaultDimentionFormat = MNN_DATA_FORMAT_NHWC) { - OpBuilder builder_(_fbb); - builder_.add_type(type); - builder_.add_outputIndexes(outputIndexes); - builder_.add_name(name); - builder_.add_main(main); - builder_.add_inputIndexes(inputIndexes); - builder_.add_defaultDimentionFormat(defaultDimentionFormat); - builder_.add_main_type(main_type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateOpDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *inputIndexes = nullptr, - OpParameter main_type = OpParameter_NONE, - flatbuffers::Offset main = 0, - const char *name = nullptr, - const std::vector *outputIndexes = nullptr, - OpType type = OpType_AbsVal, - MNN_DATA_FORMAT defaultDimentionFormat = MNN_DATA_FORMAT_NHWC) { - auto inputIndexes__ = inputIndexes ? _fbb.CreateVector(*inputIndexes) : 0; - auto name__ = name ? _fbb.CreateString(name) : 0; - auto outputIndexes__ = outputIndexes ? _fbb.CreateVector(*outputIndexes) : 0; - return MNN::CreateOp( - _fbb, - inputIndexes__, - main_type, - main, - name__, - outputIndexes__, - type, - defaultDimentionFormat); -} - -flatbuffers::Offset CreateOp(flatbuffers::FlatBufferBuilder &_fbb, const OpT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TensorDescribeT : public flatbuffers::NativeTable { - typedef TensorDescribe TableType; - std::unique_ptr blob; - int32_t index; - std::string name; - TensorDescribeT() - : index(0) { - } -}; - -struct TensorDescribe FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TensorDescribeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return TensorDescribeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BLOB = 4, - VT_INDEX = 6, - VT_NAME = 8 - }; - const Blob *blob() const { - return GetPointer(VT_BLOB); - } - int32_t index() const { - return GetField(VT_INDEX, 0); - } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_BLOB) && - verifier.VerifyTable(blob()) && - VerifyField(verifier, VT_INDEX) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - verifier.EndTable(); - } - TensorDescribeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TensorDescribeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorDescribeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TensorDescribeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_blob(flatbuffers::Offset blob) { - fbb_.AddOffset(TensorDescribe::VT_BLOB, blob); - } - void add_index(int32_t index) { - fbb_.AddElement(TensorDescribe::VT_INDEX, index, 0); - } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(TensorDescribe::VT_NAME, name); - } - explicit TensorDescribeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TensorDescribeBuilder &operator=(const TensorDescribeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTensorDescribe( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset blob = 0, - int32_t index = 0, - flatbuffers::Offset name = 0) { - TensorDescribeBuilder builder_(_fbb); - builder_.add_name(name); - builder_.add_index(index); - builder_.add_blob(blob); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateTensorDescribeDirect( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset blob = 0, - int32_t index = 0, - const char *name = nullptr) { - auto name__ = name ? _fbb.CreateString(name) : 0; - return MNN::CreateTensorDescribe( - _fbb, - blob, - index, - name__); -} - -flatbuffers::Offset CreateTensorDescribe(flatbuffers::FlatBufferBuilder &_fbb, const TensorDescribeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NetT : public flatbuffers::NativeTable { - typedef Net TableType; - std::string bizCode; - std::vector> extraTensorDescribe; - std::unique_ptr gpulibrary; - std::vector> oplists; - std::vector outputName; - ForwardType preferForwardType; - NetSource sourceType; - std::vector tensorName; - int32_t tensorNumber; - Usage usage; - NetT() - : preferForwardType(ForwardType_CPU), - sourceType(NetSource_CAFFE), - tensorNumber(0), - usage(Usage_INFERENCE) { - } -}; - -struct Net FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NetT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return NetTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BIZCODE = 4, - VT_EXTRATENSORDESCRIBE = 6, - VT_GPULIBRARY = 8, - VT_OPLISTS = 10, - VT_OUTPUTNAME = 12, - VT_PREFERFORWARDTYPE = 14, - VT_SOURCETYPE = 16, - VT_TENSORNAME = 18, - VT_TENSORNUMBER = 20, - VT_USAGE = 22 - }; - const flatbuffers::String *bizCode() const { - return GetPointer(VT_BIZCODE); - } - const flatbuffers::Vector> *extraTensorDescribe() const { - return GetPointer> *>(VT_EXTRATENSORDESCRIBE); - } - const GpuLibrary *gpulibrary() const { - return GetPointer(VT_GPULIBRARY); - } - const flatbuffers::Vector> *oplists() const { - return GetPointer> *>(VT_OPLISTS); - } - const flatbuffers::Vector> *outputName() const { - return GetPointer> *>(VT_OUTPUTNAME); - } - ForwardType preferForwardType() const { - return static_cast(GetField(VT_PREFERFORWARDTYPE, 0)); - } - NetSource sourceType() const { - return static_cast(GetField(VT_SOURCETYPE, 0)); - } - const flatbuffers::Vector> *tensorName() const { - return GetPointer> *>(VT_TENSORNAME); - } - int32_t tensorNumber() const { - return GetField(VT_TENSORNUMBER, 0); - } - Usage usage() const { - return static_cast(GetField(VT_USAGE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_BIZCODE) && - verifier.VerifyString(bizCode()) && - VerifyOffset(verifier, VT_EXTRATENSORDESCRIBE) && - verifier.VerifyVector(extraTensorDescribe()) && - verifier.VerifyVectorOfTables(extraTensorDescribe()) && - VerifyOffset(verifier, VT_GPULIBRARY) && - verifier.VerifyTable(gpulibrary()) && - VerifyOffset(verifier, VT_OPLISTS) && - verifier.VerifyVector(oplists()) && - verifier.VerifyVectorOfTables(oplists()) && - VerifyOffset(verifier, VT_OUTPUTNAME) && - verifier.VerifyVector(outputName()) && - verifier.VerifyVectorOfStrings(outputName()) && - VerifyField(verifier, VT_PREFERFORWARDTYPE) && - VerifyField(verifier, VT_SOURCETYPE) && - VerifyOffset(verifier, VT_TENSORNAME) && - verifier.VerifyVector(tensorName()) && - verifier.VerifyVectorOfStrings(tensorName()) && - VerifyField(verifier, VT_TENSORNUMBER) && - VerifyField(verifier, VT_USAGE) && - verifier.EndTable(); - } - NetT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NetT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NetT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NetBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_bizCode(flatbuffers::Offset bizCode) { - fbb_.AddOffset(Net::VT_BIZCODE, bizCode); - } - void add_extraTensorDescribe(flatbuffers::Offset>> extraTensorDescribe) { - fbb_.AddOffset(Net::VT_EXTRATENSORDESCRIBE, extraTensorDescribe); - } - void add_gpulibrary(flatbuffers::Offset gpulibrary) { - fbb_.AddOffset(Net::VT_GPULIBRARY, gpulibrary); - } - void add_oplists(flatbuffers::Offset>> oplists) { - fbb_.AddOffset(Net::VT_OPLISTS, oplists); - } - void add_outputName(flatbuffers::Offset>> outputName) { - fbb_.AddOffset(Net::VT_OUTPUTNAME, outputName); - } - void add_preferForwardType(ForwardType preferForwardType) { - fbb_.AddElement(Net::VT_PREFERFORWARDTYPE, static_cast(preferForwardType), 0); - } - void add_sourceType(NetSource sourceType) { - fbb_.AddElement(Net::VT_SOURCETYPE, static_cast(sourceType), 0); - } - void add_tensorName(flatbuffers::Offset>> tensorName) { - fbb_.AddOffset(Net::VT_TENSORNAME, tensorName); - } - void add_tensorNumber(int32_t tensorNumber) { - fbb_.AddElement(Net::VT_TENSORNUMBER, tensorNumber, 0); - } - void add_usage(Usage usage) { - fbb_.AddElement(Net::VT_USAGE, static_cast(usage), 0); - } - explicit NetBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NetBuilder &operator=(const NetBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNet( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset bizCode = 0, - flatbuffers::Offset>> extraTensorDescribe = 0, - flatbuffers::Offset gpulibrary = 0, - flatbuffers::Offset>> oplists = 0, - flatbuffers::Offset>> outputName = 0, - ForwardType preferForwardType = ForwardType_CPU, - NetSource sourceType = NetSource_CAFFE, - flatbuffers::Offset>> tensorName = 0, - int32_t tensorNumber = 0, - Usage usage = Usage_INFERENCE) { - NetBuilder builder_(_fbb); - builder_.add_tensorNumber(tensorNumber); - builder_.add_tensorName(tensorName); - builder_.add_outputName(outputName); - builder_.add_oplists(oplists); - builder_.add_gpulibrary(gpulibrary); - builder_.add_extraTensorDescribe(extraTensorDescribe); - builder_.add_bizCode(bizCode); - builder_.add_usage(usage); - builder_.add_sourceType(sourceType); - builder_.add_preferForwardType(preferForwardType); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateNetDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *bizCode = nullptr, - const std::vector> *extraTensorDescribe = nullptr, - flatbuffers::Offset gpulibrary = 0, - const std::vector> *oplists = nullptr, - const std::vector> *outputName = nullptr, - ForwardType preferForwardType = ForwardType_CPU, - NetSource sourceType = NetSource_CAFFE, - const std::vector> *tensorName = nullptr, - int32_t tensorNumber = 0, - Usage usage = Usage_INFERENCE) { - auto bizCode__ = bizCode ? _fbb.CreateString(bizCode) : 0; - auto extraTensorDescribe__ = extraTensorDescribe ? _fbb.CreateVector>(*extraTensorDescribe) : 0; - auto oplists__ = oplists ? _fbb.CreateVector>(*oplists) : 0; - auto outputName__ = outputName ? _fbb.CreateVector>(*outputName) : 0; - auto tensorName__ = tensorName ? _fbb.CreateVector>(*tensorName) : 0; - return MNN::CreateNet( - _fbb, - bizCode__, - extraTensorDescribe__, - gpulibrary, - oplists__, - outputName__, - preferForwardType, - sourceType, - tensorName__, - tensorNumber, - usage); -} - -flatbuffers::Offset CreateNet(flatbuffers::FlatBufferBuilder &_fbb, const NetT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -inline PluginT *Plugin::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PluginT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Plugin::UnPackTo(PluginT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = type(); if (_e) _o->type = _e->str(); }; - { auto _e = buffer(); if (_e) { _o->buffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->buffer[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; -} - -inline flatbuffers::Offset Plugin::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PluginT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePlugin(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePlugin(flatbuffers::FlatBufferBuilder &_fbb, const PluginT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PluginT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _type = _o->type.empty() ? 0 : _fbb.CreateString(_o->type); - auto _buffer = _o->buffer.size() ? _fbb.CreateVector> (_o->buffer.size(), [](size_t i, _VectorArgs *__va) { return CreateBlob(*__va->__fbb, __va->__o->buffer[i].get(), __va->__rehasher); }, &_va ) : 0; - return MNN::CreatePlugin( - _fbb, - _type, - _buffer); -} - -inline ExtraT *Extra::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ExtraT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Extra::UnPackTo(ExtraT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = type(); if (_e) _o->type = _e->str(); }; - { auto _e = engine(); if (_e) _o->engine = _e->str(); }; - { auto _e = info(); if (_e) { _o->info.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->info[_i] = _e->Get(_i); } } }; - { auto _e = attr(); if (_e) { _o->attr.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->attr[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; -} - -inline flatbuffers::Offset Extra::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExtraT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateExtra(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateExtra(flatbuffers::FlatBufferBuilder &_fbb, const ExtraT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExtraT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _type = _o->type.empty() ? 0 : _fbb.CreateString(_o->type); - auto _engine = _o->engine.empty() ? 0 : _fbb.CreateString(_o->engine); - auto _info = _o->info.size() ? _fbb.CreateVector(_o->info) : 0; - auto _attr = _o->attr.size() ? _fbb.CreateVector> (_o->attr.size(), [](size_t i, _VectorArgs *__va) { return CreateAttribute(*__va->__fbb, __va->__o->attr[i].get(), __va->__rehasher); }, &_va ) : 0; - return MNN::CreateExtra( - _fbb, - _type, - _engine, - _info, - _attr); -} - -inline OpT *Op::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OpT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Op::UnPackTo(OpT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = inputIndexes(); if (_e) { _o->inputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputIndexes[_i] = _e->Get(_i); } } }; - { auto _e = main_type(); _o->main.type = _e; }; - { auto _e = main(); if (_e) _o->main.value = OpParameterUnion::UnPack(_e, main_type(), _resolver); }; - { auto _e = name(); if (_e) _o->name = _e->str(); }; - { auto _e = outputIndexes(); if (_e) { _o->outputIndexes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputIndexes[_i] = _e->Get(_i); } } }; - { auto _e = type(); _o->type = _e; }; - { auto _e = defaultDimentionFormat(); _o->defaultDimentionFormat = _e; }; -} - -inline flatbuffers::Offset Op::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OpT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOp(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateOp(flatbuffers::FlatBufferBuilder &_fbb, const OpT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OpT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _inputIndexes = _o->inputIndexes.size() ? _fbb.CreateVector(_o->inputIndexes) : 0; - auto _main_type = _o->main.type; - auto _main = _o->main.Pack(_fbb); - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - auto _outputIndexes = _o->outputIndexes.size() ? _fbb.CreateVector(_o->outputIndexes) : 0; - auto _type = _o->type; - auto _defaultDimentionFormat = _o->defaultDimentionFormat; - return MNN::CreateOp( - _fbb, - _inputIndexes, - _main_type, - _main, - _name, - _outputIndexes, - _type, - _defaultDimentionFormat); -} - -inline TensorDescribeT *TensorDescribe::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TensorDescribeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TensorDescribe::UnPackTo(TensorDescribeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = blob(); if (_e) _o->blob = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = index(); _o->index = _e; }; - { auto _e = name(); if (_e) _o->name = _e->str(); }; -} - -inline flatbuffers::Offset TensorDescribe::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorDescribeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTensorDescribe(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTensorDescribe(flatbuffers::FlatBufferBuilder &_fbb, const TensorDescribeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorDescribeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _blob = _o->blob ? CreateBlob(_fbb, _o->blob.get(), _rehasher) : 0; - auto _index = _o->index; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - return MNN::CreateTensorDescribe( - _fbb, - _blob, - _index, - _name); -} - -inline NetT *Net::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NetT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Net::UnPackTo(NetT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = bizCode(); if (_e) _o->bizCode = _e->str(); }; - { auto _e = extraTensorDescribe(); if (_e) { _o->extraTensorDescribe.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->extraTensorDescribe[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; - { auto _e = gpulibrary(); if (_e) _o->gpulibrary = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = oplists(); if (_e) { _o->oplists.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->oplists[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } }; - { auto _e = outputName(); if (_e) { _o->outputName.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputName[_i] = _e->Get(_i)->str(); } } }; - { auto _e = preferForwardType(); _o->preferForwardType = _e; }; - { auto _e = sourceType(); _o->sourceType = _e; }; - { auto _e = tensorName(); if (_e) { _o->tensorName.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->tensorName[_i] = _e->Get(_i)->str(); } } }; - { auto _e = tensorNumber(); _o->tensorNumber = _e; }; - { auto _e = usage(); _o->usage = _e; }; -} - -inline flatbuffers::Offset Net::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NetT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNet(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNet(flatbuffers::FlatBufferBuilder &_fbb, const NetT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NetT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _bizCode = _o->bizCode.empty() ? 0 : _fbb.CreateString(_o->bizCode); - auto _extraTensorDescribe = _o->extraTensorDescribe.size() ? _fbb.CreateVector> (_o->extraTensorDescribe.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorDescribe(*__va->__fbb, __va->__o->extraTensorDescribe[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _gpulibrary = _o->gpulibrary ? CreateGpuLibrary(_fbb, _o->gpulibrary.get(), _rehasher) : 0; - auto _oplists = _o->oplists.size() ? _fbb.CreateVector> (_o->oplists.size(), [](size_t i, _VectorArgs *__va) { return CreateOp(*__va->__fbb, __va->__o->oplists[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _outputName = _o->outputName.size() ? _fbb.CreateVectorOfStrings(_o->outputName) : 0; - auto _preferForwardType = _o->preferForwardType; - auto _sourceType = _o->sourceType; - auto _tensorName = _o->tensorName.size() ? _fbb.CreateVectorOfStrings(_o->tensorName) : 0; - auto _tensorNumber = _o->tensorNumber; - auto _usage = _o->usage; - return MNN::CreateNet( - _fbb, - _bizCode, - _extraTensorDescribe, - _gpulibrary, - _oplists, - _outputName, - _preferForwardType, - _sourceType, - _tensorName, - _tensorNumber, - _usage); -} - -inline bool VerifyOpParameter(flatbuffers::Verifier &verifier, const void *obj, OpParameter type) { - switch (type) { - case OpParameter_NONE: { - return true; - } - case OpParameter_QuantizedAdd: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_ArgMax: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_AsString: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Axis: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_BatchNorm: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_BinaryOp: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Blob: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_CastParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Convolution2D: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Crop: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_CropAndResize: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Dequantize: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_DetectionOutput: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Eltwise: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_ExpandDims: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Fill: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Flatten: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Gather: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_GatherV2: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_InnerProduct: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Input: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Interp: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_LRN: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_LSTM: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_MatMul: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_NonMaxSuppressionV2: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Normalize: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_PackParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Permute: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Plugin: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Pool: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_PRelu: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_PriorBox: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Proposal: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizedAvgPool: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizedBiasAdd: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizedConcat: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizedLogistic: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizedMatMul: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizedMaxPool: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizedRelu: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizedRelu6: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizedReshape: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizedSoftmax: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizeMaxMin: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizeV2: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Range: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Rank: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_ReduceJoin: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_ReductionParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Relu: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Relu6: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_RequantizationRange: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Requantize: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Reshape: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Resize: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_RoiPooling: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Scale: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Selu: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Size: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Slice: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_SliceTf: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_SpaceBatch: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_SqueezeParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_StridedSliceParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_TensorConvertInfo: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_TfQuantizedConv2D: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_TopKV2: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Transpose: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_UnaryOp: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_MomentsParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_RNNParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_BatchMatMulParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_QuantizedFloatParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_DepthSpaceParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_EltwiseInt8: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_ReverseSequenceParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Extra: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Pool3D: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_Convolution3D: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_ELU: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_DetectionPostProcessParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_OneHotParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case OpParameter_PadParam: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - default: return false; - } -} - -inline bool VerifyOpParameterVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifyOpParameter( - verifier, values->Get(i), types->GetEnum(i))) { - return false; - } - } - return true; -} - -inline void *OpParameterUnion::UnPack(const void *obj, OpParameter type, const flatbuffers::resolver_function_t *resolver) { - switch (type) { - case OpParameter_QuantizedAdd: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_ArgMax: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_AsString: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Axis: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_BatchNorm: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_BinaryOp: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Blob: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_CastParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Convolution2D: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Crop: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_CropAndResize: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Dequantize: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_DetectionOutput: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Eltwise: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_ExpandDims: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Fill: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Flatten: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Gather: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_GatherV2: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_InnerProduct: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Input: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Interp: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_LRN: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_LSTM: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_MatMul: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_NonMaxSuppressionV2: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Normalize: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_PackParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Permute: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Plugin: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Pool: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_PRelu: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_PriorBox: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Proposal: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizedAvgPool: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizedBiasAdd: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizedConcat: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizedLogistic: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizedMatMul: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizedMaxPool: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizedRelu: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizedRelu6: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizedReshape: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizedSoftmax: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizeMaxMin: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizeV2: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Range: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Rank: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_ReduceJoin: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_ReductionParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Relu: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Relu6: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_RequantizationRange: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Requantize: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Reshape: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Resize: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_RoiPooling: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Scale: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Selu: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Size: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Slice: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_SliceTf: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_SpaceBatch: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_SqueezeParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_StridedSliceParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_TensorConvertInfo: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_TfQuantizedConv2D: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_TopKV2: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Transpose: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_UnaryOp: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_MomentsParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_RNNParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_BatchMatMulParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_QuantizedFloatParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_DepthSpaceParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_EltwiseInt8: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_ReverseSequenceParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Extra: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Pool3D: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_Convolution3D: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_ELU: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_DetectionPostProcessParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_OneHotParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case OpParameter_PadParam: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - default: return nullptr; - } -} - -inline flatbuffers::Offset OpParameterUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - switch (type) { - case OpParameter_QuantizedAdd: { - auto ptr = reinterpret_cast(value); - return CreateQuantizedAdd(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_ArgMax: { - auto ptr = reinterpret_cast(value); - return CreateArgMax(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_AsString: { - auto ptr = reinterpret_cast(value); - return CreateAsString(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Axis: { - auto ptr = reinterpret_cast(value); - return CreateAxis(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_BatchNorm: { - auto ptr = reinterpret_cast(value); - return CreateBatchNorm(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_BinaryOp: { - auto ptr = reinterpret_cast(value); - return CreateBinaryOp(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Blob: { - auto ptr = reinterpret_cast(value); - return CreateBlob(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_CastParam: { - auto ptr = reinterpret_cast(value); - return CreateCastParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Convolution2D: { - auto ptr = reinterpret_cast(value); - return CreateConvolution2D(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Crop: { - auto ptr = reinterpret_cast(value); - return CreateCrop(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_CropAndResize: { - auto ptr = reinterpret_cast(value); - return CreateCropAndResize(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Dequantize: { - auto ptr = reinterpret_cast(value); - return CreateDequantize(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_DetectionOutput: { - auto ptr = reinterpret_cast(value); - return CreateDetectionOutput(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Eltwise: { - auto ptr = reinterpret_cast(value); - return CreateEltwise(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_ExpandDims: { - auto ptr = reinterpret_cast(value); - return CreateExpandDims(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Fill: { - auto ptr = reinterpret_cast(value); - return CreateFill(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Flatten: { - auto ptr = reinterpret_cast(value); - return CreateFlatten(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Gather: { - auto ptr = reinterpret_cast(value); - return CreateGather(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_GatherV2: { - auto ptr = reinterpret_cast(value); - return CreateGatherV2(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_InnerProduct: { - auto ptr = reinterpret_cast(value); - return CreateInnerProduct(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Input: { - auto ptr = reinterpret_cast(value); - return CreateInput(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Interp: { - auto ptr = reinterpret_cast(value); - return CreateInterp(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_LRN: { - auto ptr = reinterpret_cast(value); - return CreateLRN(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_LSTM: { - auto ptr = reinterpret_cast(value); - return CreateLSTM(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_MatMul: { - auto ptr = reinterpret_cast(value); - return CreateMatMul(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_NonMaxSuppressionV2: { - auto ptr = reinterpret_cast(value); - return CreateNonMaxSuppressionV2(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Normalize: { - auto ptr = reinterpret_cast(value); - return CreateNormalize(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_PackParam: { - auto ptr = reinterpret_cast(value); - return CreatePackParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Permute: { - auto ptr = reinterpret_cast(value); - return CreatePermute(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Plugin: { - auto ptr = reinterpret_cast(value); - return CreatePlugin(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Pool: { - auto ptr = reinterpret_cast(value); - return CreatePool(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_PRelu: { - auto ptr = reinterpret_cast(value); - return CreatePRelu(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_PriorBox: { - auto ptr = reinterpret_cast(value); - return CreatePriorBox(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Proposal: { - auto ptr = reinterpret_cast(value); - return CreateProposal(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizedAvgPool: { - auto ptr = reinterpret_cast(value); - return CreateQuantizedAvgPool(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizedBiasAdd: { - auto ptr = reinterpret_cast(value); - return CreateQuantizedBiasAdd(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizedConcat: { - auto ptr = reinterpret_cast(value); - return CreateQuantizedConcat(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizedLogistic: { - auto ptr = reinterpret_cast(value); - return CreateQuantizedLogistic(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizedMatMul: { - auto ptr = reinterpret_cast(value); - return CreateQuantizedMatMul(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizedMaxPool: { - auto ptr = reinterpret_cast(value); - return CreateQuantizedMaxPool(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizedRelu: { - auto ptr = reinterpret_cast(value); - return CreateQuantizedRelu(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizedRelu6: { - auto ptr = reinterpret_cast(value); - return CreateQuantizedRelu6(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizedReshape: { - auto ptr = reinterpret_cast(value); - return CreateQuantizedReshape(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizedSoftmax: { - auto ptr = reinterpret_cast(value); - return CreateQuantizedSoftmax(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizeMaxMin: { - auto ptr = reinterpret_cast(value); - return CreateQuantizeMaxMin(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizeV2: { - auto ptr = reinterpret_cast(value); - return CreateQuantizeV2(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Range: { - auto ptr = reinterpret_cast(value); - return CreateRange(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Rank: { - auto ptr = reinterpret_cast(value); - return CreateRank(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_ReduceJoin: { - auto ptr = reinterpret_cast(value); - return CreateReduceJoin(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_ReductionParam: { - auto ptr = reinterpret_cast(value); - return CreateReductionParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Relu: { - auto ptr = reinterpret_cast(value); - return CreateRelu(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Relu6: { - auto ptr = reinterpret_cast(value); - return CreateRelu6(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_RequantizationRange: { - auto ptr = reinterpret_cast(value); - return CreateRequantizationRange(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Requantize: { - auto ptr = reinterpret_cast(value); - return CreateRequantize(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Reshape: { - auto ptr = reinterpret_cast(value); - return CreateReshape(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Resize: { - auto ptr = reinterpret_cast(value); - return CreateResize(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_RoiPooling: { - auto ptr = reinterpret_cast(value); - return CreateRoiPooling(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Scale: { - auto ptr = reinterpret_cast(value); - return CreateScale(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Selu: { - auto ptr = reinterpret_cast(value); - return CreateSelu(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Size: { - auto ptr = reinterpret_cast(value); - return CreateSize(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Slice: { - auto ptr = reinterpret_cast(value); - return CreateSlice(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_SliceTf: { - auto ptr = reinterpret_cast(value); - return CreateSliceTf(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_SpaceBatch: { - auto ptr = reinterpret_cast(value); - return CreateSpaceBatch(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_SqueezeParam: { - auto ptr = reinterpret_cast(value); - return CreateSqueezeParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_StridedSliceParam: { - auto ptr = reinterpret_cast(value); - return CreateStridedSliceParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_TensorConvertInfo: { - auto ptr = reinterpret_cast(value); - return CreateTensorConvertInfo(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_TfQuantizedConv2D: { - auto ptr = reinterpret_cast(value); - return CreateTfQuantizedConv2D(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_TopKV2: { - auto ptr = reinterpret_cast(value); - return CreateTopKV2(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Transpose: { - auto ptr = reinterpret_cast(value); - return CreateTranspose(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_UnaryOp: { - auto ptr = reinterpret_cast(value); - return CreateUnaryOp(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_MomentsParam: { - auto ptr = reinterpret_cast(value); - return CreateMomentsParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_RNNParam: { - auto ptr = reinterpret_cast(value); - return CreateRNNParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_BatchMatMulParam: { - auto ptr = reinterpret_cast(value); - return CreateBatchMatMulParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_QuantizedFloatParam: { - auto ptr = reinterpret_cast(value); - return CreateQuantizedFloatParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_DepthSpaceParam: { - auto ptr = reinterpret_cast(value); - return CreateDepthSpaceParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_EltwiseInt8: { - auto ptr = reinterpret_cast(value); - return CreateEltwiseInt8(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_ReverseSequenceParam: { - auto ptr = reinterpret_cast(value); - return CreateReverseSequenceParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Extra: { - auto ptr = reinterpret_cast(value); - return CreateExtra(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Pool3D: { - auto ptr = reinterpret_cast(value); - return CreatePool3D(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_Convolution3D: { - auto ptr = reinterpret_cast(value); - return CreateConvolution3D(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_ELU: { - auto ptr = reinterpret_cast(value); - return CreateELU(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_DetectionPostProcessParam: { - auto ptr = reinterpret_cast(value); - return CreateDetectionPostProcessParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_OneHotParam: { - auto ptr = reinterpret_cast(value); - return CreateOneHotParam(_fbb, ptr, _rehasher).Union(); - } - case OpParameter_PadParam: { - auto ptr = reinterpret_cast(value); - return CreatePadParam(_fbb, ptr, _rehasher).Union(); - } - default: return 0; - } -} - -inline OpParameterUnion::OpParameterUnion(const OpParameterUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { - switch (type) { - case OpParameter_QuantizedAdd: { - FLATBUFFERS_ASSERT(false); // QuantizedAddT not copyable. - break; - } - case OpParameter_ArgMax: { - value = new ArgMaxT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_AsString: { - value = new AsStringT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Axis: { - value = new AxisT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_BatchNorm: { - value = new BatchNormT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_BinaryOp: { - value = new BinaryOpT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Blob: { - value = new BlobT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_CastParam: { - value = new CastParamT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Convolution2D: { - FLATBUFFERS_ASSERT(false); // Convolution2DT not copyable. - break; - } - case OpParameter_Crop: { - value = new CropT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_CropAndResize: { - value = new CropAndResizeT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Dequantize: { - FLATBUFFERS_ASSERT(false); // DequantizeT not copyable. - break; - } - case OpParameter_DetectionOutput: { - value = new DetectionOutputT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Eltwise: { - value = new EltwiseT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_ExpandDims: { - value = new ExpandDimsT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Fill: { - value = new FillT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Flatten: { - value = new FlattenT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Gather: { - value = new GatherT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_GatherV2: { - value = new GatherV2T(*reinterpret_cast(u.value)); - break; - } - case OpParameter_InnerProduct: { - FLATBUFFERS_ASSERT(false); // InnerProductT not copyable. - break; - } - case OpParameter_Input: { - value = new InputT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Interp: { - value = new InterpT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_LRN: { - value = new LRNT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_LSTM: { - FLATBUFFERS_ASSERT(false); // LSTMT not copyable. - break; - } - case OpParameter_MatMul: { - value = new MatMulT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_NonMaxSuppressionV2: { - value = new NonMaxSuppressionV2T(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Normalize: { - value = new NormalizeT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_PackParam: { - value = new PackParamT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Permute: { - value = new PermuteT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Plugin: { - FLATBUFFERS_ASSERT(false); // PluginT not copyable. - break; - } - case OpParameter_Pool: { - value = new PoolT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_PRelu: { - value = new PReluT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_PriorBox: { - value = new PriorBoxT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Proposal: { - FLATBUFFERS_ASSERT(false); // ProposalT not copyable. - break; - } - case OpParameter_QuantizedAvgPool: { - value = new QuantizedAvgPoolT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_QuantizedBiasAdd: { - value = new QuantizedBiasAddT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_QuantizedConcat: { - FLATBUFFERS_ASSERT(false); // QuantizedConcatT not copyable. - break; - } - case OpParameter_QuantizedLogistic: { - FLATBUFFERS_ASSERT(false); // QuantizedLogisticT not copyable. - break; - } - case OpParameter_QuantizedMatMul: { - value = new QuantizedMatMulT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_QuantizedMaxPool: { - value = new QuantizedMaxPoolT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_QuantizedRelu: { - value = new QuantizedReluT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_QuantizedRelu6: { - value = new QuantizedRelu6T(*reinterpret_cast(u.value)); - break; - } - case OpParameter_QuantizedReshape: { - value = new QuantizedReshapeT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_QuantizedSoftmax: { - value = new QuantizedSoftmaxT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_QuantizeMaxMin: { - value = new QuantizeMaxMinT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_QuantizeV2: { - value = new QuantizeV2T(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Range: { - value = new RangeT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Rank: { - value = new RankT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_ReduceJoin: { - value = new ReduceJoinT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_ReductionParam: { - value = new ReductionParamT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Relu: { - value = new ReluT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Relu6: { - value = new Relu6T(*reinterpret_cast(u.value)); - break; - } - case OpParameter_RequantizationRange: { - value = new RequantizationRangeT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Requantize: { - value = new RequantizeT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Reshape: { - value = new ReshapeT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Resize: { - value = new ResizeT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_RoiPooling: { - value = new RoiPoolingT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Scale: { - value = new ScaleT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Selu: { - value = new SeluT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Size: { - value = new SizeT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Slice: { - value = new SliceT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_SliceTf: { - value = new SliceTfT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_SpaceBatch: { - FLATBUFFERS_ASSERT(false); // SpaceBatchT not copyable. - break; - } - case OpParameter_SqueezeParam: { - value = new SqueezeParamT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_StridedSliceParam: { - value = new StridedSliceParamT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_TensorConvertInfo: { - value = new TensorConvertInfoT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_TfQuantizedConv2D: { - FLATBUFFERS_ASSERT(false); // TfQuantizedConv2DT not copyable. - break; - } - case OpParameter_TopKV2: { - value = new TopKV2T(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Transpose: { - value = new TransposeT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_UnaryOp: { - value = new UnaryOpT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_MomentsParam: { - value = new MomentsParamT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_RNNParam: { - FLATBUFFERS_ASSERT(false); // RNNParamT not copyable. - break; - } - case OpParameter_BatchMatMulParam: { - value = new BatchMatMulParamT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_QuantizedFloatParam: { - value = new QuantizedFloatParamT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_DepthSpaceParam: { - value = new DepthSpaceParamT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_EltwiseInt8: { - FLATBUFFERS_ASSERT(false); // EltwiseInt8T not copyable. - break; - } - case OpParameter_ReverseSequenceParam: { - value = new ReverseSequenceParamT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Extra: { - FLATBUFFERS_ASSERT(false); // ExtraT not copyable. - break; - } - case OpParameter_Pool3D: { - value = new Pool3DT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_Convolution3D: { - FLATBUFFERS_ASSERT(false); // Convolution3DT not copyable. - break; - } - case OpParameter_ELU: { - value = new ELUT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_DetectionPostProcessParam: { - value = new DetectionPostProcessParamT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_OneHotParam: { - value = new OneHotParamT(*reinterpret_cast(u.value)); - break; - } - case OpParameter_PadParam: { - value = new PadParamT(*reinterpret_cast(u.value)); - break; - } - default: - break; - } -} - -inline void OpParameterUnion::Reset() { - switch (type) { - case OpParameter_QuantizedAdd: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_ArgMax: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_AsString: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Axis: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_BatchNorm: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_BinaryOp: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Blob: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_CastParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Convolution2D: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Crop: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_CropAndResize: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Dequantize: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_DetectionOutput: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Eltwise: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_ExpandDims: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Fill: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Flatten: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Gather: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_GatherV2: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_InnerProduct: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Input: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Interp: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_LRN: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_LSTM: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_MatMul: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_NonMaxSuppressionV2: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Normalize: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_PackParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Permute: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Plugin: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Pool: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_PRelu: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_PriorBox: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Proposal: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizedAvgPool: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizedBiasAdd: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizedConcat: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizedLogistic: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizedMatMul: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizedMaxPool: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizedRelu: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizedRelu6: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizedReshape: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizedSoftmax: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizeMaxMin: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizeV2: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Range: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Rank: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_ReduceJoin: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_ReductionParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Relu: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Relu6: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_RequantizationRange: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Requantize: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Reshape: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Resize: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_RoiPooling: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Scale: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Selu: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Size: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Slice: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_SliceTf: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_SpaceBatch: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_SqueezeParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_StridedSliceParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_TensorConvertInfo: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_TfQuantizedConv2D: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_TopKV2: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Transpose: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_UnaryOp: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_MomentsParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_RNNParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_BatchMatMulParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_QuantizedFloatParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_DepthSpaceParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_EltwiseInt8: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_ReverseSequenceParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Extra: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Pool3D: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_Convolution3D: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_ELU: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_DetectionPostProcessParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_OneHotParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case OpParameter_PadParam: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - default: break; - } - value = nullptr; - type = OpParameter_NONE; -} - -inline const flatbuffers::TypeTable *OpTypeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - OpTypeTypeTable - }; - static const int64_t values[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 128, 129, 130, 131, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 512, 513, 514, 515, 516, 517, 518 }; - static const char * const names[] = { - "AbsVal", - "QuantizedAdd", - "ArgMax", - "AsString", - "InstanceNorm", - "BatchToSpaceND", - "Bias", - "BinaryOp", - "Bnll", - "Cast", - "Concat", - "Const", - "Convolution", - "ConvolutionDepthwise", - "Crop", - "CropAndResize", - "Cubic", - "Deconvolution", - "DeconvolutionDepthwise", - "Dequantize", - "DetectionOutput", - "Dropout", - "Eltwise", - "ELU", - "Embed", - "Exp", - "ExpandDims", - "Fill", - "Flatten", - "FloorMod", - "Gather", - "GatherV2", - "Im2Seq", - "InnerProduct", - "Input", - "Interp", - "Log", - "LRN", - "LSTM", - "MatMul", - "MVN", - "NonMaxSuppression", - "NonMaxSuppressionV2", - "Normalize", - "Pack", - "Padding", - "Permute", - "Pooling", - "Power", - "PReLU", - "PriorBox", - "Proposal", - "QuantizedAvgPool", - "QuantizedBiasAdd", - "QuantizedConcat", - "QuantizedDepthwiseConv2D", - "QuantizedLogistic", - "QuantizedMatMul", - "QuantizedMaxPool", - "QuantizedRelu", - "QuantizedRelu6", - "QuantizedReshape", - "QuantizedSoftmax", - "QuantizeMaxMin", - "QuantizeV2", - "Range", - "Rank", - "ReduceJoin", - "Reduction", - "ReLU", - "ReLU6", - "RequantizationRange", - "Requantize", - "Reshape", - "Resize", - "RNN", - "ROIPooling", - "Scale", - "Selu", - "Seq2Out", - "Shape", - "Sigmoid", - "Size", - "Slice", - "SliceTf", - "Softmax", - "SpaceToBatchND", - "SpatialProduct", - "Split", - "SPP", - "Squeeze", - "StridedSlice", - "StringJoin", - "StringSplit", - "StringToNumber", - "TanH", - "TfQuantizedConv2D", - "Threshold", - "Tile", - "TopKV2", - "Transpose", - "UnaryOp", - "Unpack", - "Where", - "Moments", - "RNNSequenceGRU", - "BatchMatMul", - "Unsqueeze", - "CosineSimilarity", - "DepthToSpace", - "SpaceToDepth", - "ReverseSequence", - "Pooling3D", - "Convolution3D", - "MatrixBandPart", - "GatherND", - "DetectionPostProcess", - "UnravelIndex", - "ScatterNd", - "OneHot", - "BroadcastTo", - "Dilation2D", - "MaxLayerCount", - "ConvertTensor", - "ArgMin", - "LinSpace", - "PLUGIN", - "Select", - "ZerosLike", - "Broastcast", - "SetDiff1D", - "ReluGrad", - "Relu6Grad", - "PoolGrad", - "SoftmaxGrad", - "Conv2DBackPropFilter", - "TrainableParam", - "BatchNorm", - "Extra", - "ConvInt8", - "Int8ToFloat", - "DepthwiseConvInt8", - "PoolInt8", - "FloatToInt8", - "EltwiseInt8" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 145, type_codes, type_refs, values, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *OpParameterTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_SEQUENCE, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 1 }, - { flatbuffers::ET_SEQUENCE, 0, 2 }, - { flatbuffers::ET_SEQUENCE, 0, 3 }, - { flatbuffers::ET_SEQUENCE, 0, 4 }, - { flatbuffers::ET_SEQUENCE, 0, 5 }, - { flatbuffers::ET_SEQUENCE, 0, 6 }, - { flatbuffers::ET_SEQUENCE, 0, 7 }, - { flatbuffers::ET_SEQUENCE, 0, 8 }, - { flatbuffers::ET_SEQUENCE, 0, 9 }, - { flatbuffers::ET_SEQUENCE, 0, 10 }, - { flatbuffers::ET_SEQUENCE, 0, 11 }, - { flatbuffers::ET_SEQUENCE, 0, 12 }, - { flatbuffers::ET_SEQUENCE, 0, 13 }, - { flatbuffers::ET_SEQUENCE, 0, 14 }, - { flatbuffers::ET_SEQUENCE, 0, 15 }, - { flatbuffers::ET_SEQUENCE, 0, 16 }, - { flatbuffers::ET_SEQUENCE, 0, 17 }, - { flatbuffers::ET_SEQUENCE, 0, 18 }, - { flatbuffers::ET_SEQUENCE, 0, 19 }, - { flatbuffers::ET_SEQUENCE, 0, 20 }, - { flatbuffers::ET_SEQUENCE, 0, 21 }, - { flatbuffers::ET_SEQUENCE, 0, 22 }, - { flatbuffers::ET_SEQUENCE, 0, 23 }, - { flatbuffers::ET_SEQUENCE, 0, 24 }, - { flatbuffers::ET_SEQUENCE, 0, 25 }, - { flatbuffers::ET_SEQUENCE, 0, 26 }, - { flatbuffers::ET_SEQUENCE, 0, 27 }, - { flatbuffers::ET_SEQUENCE, 0, 28 }, - { flatbuffers::ET_SEQUENCE, 0, 29 }, - { flatbuffers::ET_SEQUENCE, 0, 30 }, - { flatbuffers::ET_SEQUENCE, 0, 31 }, - { flatbuffers::ET_SEQUENCE, 0, 32 }, - { flatbuffers::ET_SEQUENCE, 0, 33 }, - { flatbuffers::ET_SEQUENCE, 0, 34 }, - { flatbuffers::ET_SEQUENCE, 0, 35 }, - { flatbuffers::ET_SEQUENCE, 0, 36 }, - { flatbuffers::ET_SEQUENCE, 0, 37 }, - { flatbuffers::ET_SEQUENCE, 0, 38 }, - { flatbuffers::ET_SEQUENCE, 0, 39 }, - { flatbuffers::ET_SEQUENCE, 0, 40 }, - { flatbuffers::ET_SEQUENCE, 0, 41 }, - { flatbuffers::ET_SEQUENCE, 0, 42 }, - { flatbuffers::ET_SEQUENCE, 0, 43 }, - { flatbuffers::ET_SEQUENCE, 0, 44 }, - { flatbuffers::ET_SEQUENCE, 0, 45 }, - { flatbuffers::ET_SEQUENCE, 0, 46 }, - { flatbuffers::ET_SEQUENCE, 0, 47 }, - { flatbuffers::ET_SEQUENCE, 0, 48 }, - { flatbuffers::ET_SEQUENCE, 0, 49 }, - { flatbuffers::ET_SEQUENCE, 0, 50 }, - { flatbuffers::ET_SEQUENCE, 0, 51 }, - { flatbuffers::ET_SEQUENCE, 0, 52 }, - { flatbuffers::ET_SEQUENCE, 0, 53 }, - { flatbuffers::ET_SEQUENCE, 0, 54 }, - { flatbuffers::ET_SEQUENCE, 0, 55 }, - { flatbuffers::ET_SEQUENCE, 0, 56 }, - { flatbuffers::ET_SEQUENCE, 0, 57 }, - { flatbuffers::ET_SEQUENCE, 0, 58 }, - { flatbuffers::ET_SEQUENCE, 0, 59 }, - { flatbuffers::ET_SEQUENCE, 0, 60 }, - { flatbuffers::ET_SEQUENCE, 0, 61 }, - { flatbuffers::ET_SEQUENCE, 0, 62 }, - { flatbuffers::ET_SEQUENCE, 0, 63 }, - { flatbuffers::ET_SEQUENCE, 0, 64 }, - { flatbuffers::ET_SEQUENCE, 0, 65 }, - { flatbuffers::ET_SEQUENCE, 0, 66 }, - { flatbuffers::ET_SEQUENCE, 0, 67 }, - { flatbuffers::ET_SEQUENCE, 0, 68 }, - { flatbuffers::ET_SEQUENCE, 0, 69 }, - { flatbuffers::ET_SEQUENCE, 0, 70 }, - { flatbuffers::ET_SEQUENCE, 0, 71 }, - { flatbuffers::ET_SEQUENCE, 0, 72 }, - { flatbuffers::ET_SEQUENCE, 0, 73 }, - { flatbuffers::ET_SEQUENCE, 0, 74 }, - { flatbuffers::ET_SEQUENCE, 0, 75 }, - { flatbuffers::ET_SEQUENCE, 0, 76 }, - { flatbuffers::ET_SEQUENCE, 0, 77 }, - { flatbuffers::ET_SEQUENCE, 0, 78 }, - { flatbuffers::ET_SEQUENCE, 0, 79 }, - { flatbuffers::ET_SEQUENCE, 0, 80 }, - { flatbuffers::ET_SEQUENCE, 0, 81 }, - { flatbuffers::ET_SEQUENCE, 0, 82 }, - { flatbuffers::ET_SEQUENCE, 0, 83 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - QuantizedAddTypeTable, - ArgMaxTypeTable, - AsStringTypeTable, - AxisTypeTable, - BatchNormTypeTable, - BinaryOpTypeTable, - BlobTypeTable, - CastParamTypeTable, - Convolution2DTypeTable, - CropTypeTable, - CropAndResizeTypeTable, - DequantizeTypeTable, - DetectionOutputTypeTable, - EltwiseTypeTable, - ExpandDimsTypeTable, - FillTypeTable, - FlattenTypeTable, - GatherTypeTable, - GatherV2TypeTable, - InnerProductTypeTable, - InputTypeTable, - InterpTypeTable, - LRNTypeTable, - LSTMTypeTable, - MatMulTypeTable, - NonMaxSuppressionV2TypeTable, - NormalizeTypeTable, - PackParamTypeTable, - PermuteTypeTable, - PluginTypeTable, - PoolTypeTable, - PReluTypeTable, - PriorBoxTypeTable, - ProposalTypeTable, - QuantizedAvgPoolTypeTable, - QuantizedBiasAddTypeTable, - QuantizedConcatTypeTable, - QuantizedLogisticTypeTable, - QuantizedMatMulTypeTable, - QuantizedMaxPoolTypeTable, - QuantizedReluTypeTable, - QuantizedRelu6TypeTable, - QuantizedReshapeTypeTable, - QuantizedSoftmaxTypeTable, - QuantizeMaxMinTypeTable, - QuantizeV2TypeTable, - RangeTypeTable, - RankTypeTable, - ReduceJoinTypeTable, - ReductionParamTypeTable, - ReluTypeTable, - Relu6TypeTable, - RequantizationRangeTypeTable, - RequantizeTypeTable, - ReshapeTypeTable, - ResizeTypeTable, - RoiPoolingTypeTable, - ScaleTypeTable, - SeluTypeTable, - SizeTypeTable, - SliceTypeTable, - SliceTfTypeTable, - SpaceBatchTypeTable, - SqueezeParamTypeTable, - StridedSliceParamTypeTable, - TensorConvertInfoTypeTable, - TfQuantizedConv2DTypeTable, - TopKV2TypeTable, - TransposeTypeTable, - UnaryOpTypeTable, - MomentsParamTypeTable, - RNNParamTypeTable, - BatchMatMulParamTypeTable, - QuantizedFloatParamTypeTable, - DepthSpaceParamTypeTable, - EltwiseInt8TypeTable, - ReverseSequenceParamTypeTable, - ExtraTypeTable, - Pool3DTypeTable, - Convolution3DTypeTable, - ELUTypeTable, - DetectionPostProcessParamTypeTable, - OneHotParamTypeTable, - PadParamTypeTable - }; - static const char * const names[] = { - "NONE", - "QuantizedAdd", - "ArgMax", - "AsString", - "Axis", - "BatchNorm", - "BinaryOp", - "Blob", - "CastParam", - "Convolution2D", - "Crop", - "CropAndResize", - "Dequantize", - "DetectionOutput", - "Eltwise", - "ExpandDims", - "Fill", - "Flatten", - "Gather", - "GatherV2", - "InnerProduct", - "Input", - "Interp", - "LRN", - "LSTM", - "MatMul", - "NonMaxSuppressionV2", - "Normalize", - "PackParam", - "Permute", - "Plugin", - "Pool", - "PRelu", - "PriorBox", - "Proposal", - "QuantizedAvgPool", - "QuantizedBiasAdd", - "QuantizedConcat", - "QuantizedLogistic", - "QuantizedMatMul", - "QuantizedMaxPool", - "QuantizedRelu", - "QuantizedRelu6", - "QuantizedReshape", - "QuantizedSoftmax", - "QuantizeMaxMin", - "QuantizeV2", - "Range", - "Rank", - "ReduceJoin", - "ReductionParam", - "Relu", - "Relu6", - "RequantizationRange", - "Requantize", - "Reshape", - "Resize", - "RoiPooling", - "Scale", - "Selu", - "Size", - "Slice", - "SliceTf", - "SpaceBatch", - "SqueezeParam", - "StridedSliceParam", - "TensorConvertInfo", - "TfQuantizedConv2D", - "TopKV2", - "Transpose", - "UnaryOp", - "MomentsParam", - "RNNParam", - "BatchMatMulParam", - "QuantizedFloatParam", - "DepthSpaceParam", - "EltwiseInt8", - "ReverseSequenceParam", - "Extra", - "Pool3D", - "Convolution3D", - "ELU", - "DetectionPostProcessParam", - "OneHotParam", - "PadParam" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_UNION, 85, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ForwardTypeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ForwardTypeTypeTable - }; - static const char * const names[] = { - "CPU", - "METAL", - "OPENCL", - "OPENGLES", - "VULKAN" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 5, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *UsageTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - UsageTypeTable - }; - static const char * const names[] = { - "INFERENCE", - "TRAIN" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *PluginTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 1, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - BlobTypeTable - }; - static const char * const names[] = { - "type", - "buffer" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ExtraTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_CHAR, 1, -1 }, - { flatbuffers::ET_SEQUENCE, 1, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - AttributeTypeTable - }; - static const char * const names[] = { - "type", - "engine", - "info", - "attr" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *OpTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_UTYPE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 0, 1 }, - { flatbuffers::ET_CHAR, 0, 2 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - OpParameterTypeTable, - OpTypeTypeTable, - MNN_DATA_FORMATTypeTable - }; - static const char * const names[] = { - "inputIndexes", - "main_type", - "main", - "name", - "outputIndexes", - "type", - "defaultDimentionFormat" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 7, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *TensorDescribeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_STRING, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - BlobTypeTable - }; - static const char * const names[] = { - "blob", - "index", - "name" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *NetTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 1, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 1 }, - { flatbuffers::ET_SEQUENCE, 1, 2 }, - { flatbuffers::ET_STRING, 1, -1 }, - { flatbuffers::ET_CHAR, 0, 3 }, - { flatbuffers::ET_CHAR, 0, 4 }, - { flatbuffers::ET_STRING, 1, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 5 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - TensorDescribeTypeTable, - GpuLibraryTypeTable, - OpTypeTable, - ForwardTypeTypeTable, - NetSourceTypeTable, - UsageTypeTable - }; - static const char * const names[] = { - "bizCode", - "extraTensorDescribe", - "gpulibrary", - "oplists", - "outputName", - "preferForwardType", - "sourceType", - "tensorName", - "tensorNumber", - "usage" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 10, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const MNN::Net *GetNet(const void *buf) { - return flatbuffers::GetRoot(buf); -} - -inline const MNN::Net *GetSizePrefixedNet(const void *buf) { - return flatbuffers::GetSizePrefixedRoot(buf); -} - -inline bool VerifyNetBuffer( - flatbuffers::Verifier &verifier) { - return verifier.VerifyBuffer(nullptr); -} - -inline bool VerifySizePrefixedNetBuffer( - flatbuffers::Verifier &verifier) { - return verifier.VerifySizePrefixedBuffer(nullptr); -} - -inline void FinishNetBuffer( - flatbuffers::FlatBufferBuilder &fbb, - flatbuffers::Offset root) { - fbb.Finish(root); -} - -inline void FinishSizePrefixedNetBuffer( - flatbuffers::FlatBufferBuilder &fbb, - flatbuffers::Offset root) { - fbb.FinishSizePrefixed(root); -} - -inline std::unique_ptr UnPackNet( - const void *buf, - const flatbuffers::resolver_function_t *res = nullptr) { - return std::unique_ptr(GetNet(buf)->UnPack(res)); -} - -} // namespace MNN - -#endif // FLATBUFFERS_GENERATED_MNN_MNN_H_ diff --git a/schema/current/TFQuantizeOp_generated.h b/schema/current/TFQuantizeOp_generated.h deleted file mode 100644 index 18360b9c..00000000 --- a/schema/current/TFQuantizeOp_generated.h +++ /dev/null @@ -1,2978 +0,0 @@ -// automatically generated by the FlatBuffers compiler, do not modify - - -#ifndef FLATBUFFERS_GENERATED_TFQUANTIZEOP_MNN_H_ -#define FLATBUFFERS_GENERATED_TFQUANTIZEOP_MNN_H_ - - -#include "CaffeOp_generated.h" -#include "Tensor_generated.h" -#include "Type_generated.h" - -namespace MNN { - -struct QuantizedParam; -struct QuantizedParamT; - -struct QuantizedAdd; -struct QuantizedAddT; - -struct Dequantize; -struct DequantizeT; - -struct QuantizedAvgPool; -struct QuantizedAvgPoolT; - -struct QuantizedBiasAdd; -struct QuantizedBiasAddT; - -struct QuantizedConcat; -struct QuantizedConcatT; - -struct QuantizedLogistic; -struct QuantizedLogisticT; - -struct QuantizedMatMul; -struct QuantizedMatMulT; - -struct QuantizedMaxPool; -struct QuantizedMaxPoolT; - -struct QuantizedRelu; -struct QuantizedReluT; - -struct QuantizedRelu6; -struct QuantizedRelu6T; - -struct QuantizedReshape; -struct QuantizedReshapeT; - -struct QuantizedSoftmax; -struct QuantizedSoftmaxT; - -struct QuantizeV2; -struct QuantizeV2T; - -struct RequantizationRange; -struct RequantizationRangeT; - -struct Requantize; -struct RequantizeT; - -struct TfQuantizedConv2D; -struct TfQuantizedConv2DT; - -inline const flatbuffers::TypeTable *QuantizedParamTypeTable(); - -inline const flatbuffers::TypeTable *QuantizedAddTypeTable(); - -inline const flatbuffers::TypeTable *DequantizeTypeTable(); - -inline const flatbuffers::TypeTable *QuantizedAvgPoolTypeTable(); - -inline const flatbuffers::TypeTable *QuantizedBiasAddTypeTable(); - -inline const flatbuffers::TypeTable *QuantizedConcatTypeTable(); - -inline const flatbuffers::TypeTable *QuantizedLogisticTypeTable(); - -inline const flatbuffers::TypeTable *QuantizedMatMulTypeTable(); - -inline const flatbuffers::TypeTable *QuantizedMaxPoolTypeTable(); - -inline const flatbuffers::TypeTable *QuantizedReluTypeTable(); - -inline const flatbuffers::TypeTable *QuantizedRelu6TypeTable(); - -inline const flatbuffers::TypeTable *QuantizedReshapeTypeTable(); - -inline const flatbuffers::TypeTable *QuantizedSoftmaxTypeTable(); - -inline const flatbuffers::TypeTable *QuantizeV2TypeTable(); - -inline const flatbuffers::TypeTable *RequantizationRangeTypeTable(); - -inline const flatbuffers::TypeTable *RequantizeTypeTable(); - -inline const flatbuffers::TypeTable *TfQuantizedConv2DTypeTable(); - -enum FusedActivation { - FusedActivation_kTfLiteActNone = 0, - FusedActivation_kTfLiteActRelu = 1, - FusedActivation_kTfLiteActRelu1 = 2, - FusedActivation_kTfLiteActRelu6 = 3, - FusedActivation_kTfLiteActTanh = 4, - FusedActivation_kTfLiteActSignBit = 5, - FusedActivation_kTfLiteActSigmoid = 6, - FusedActivation_MIN = FusedActivation_kTfLiteActNone, - FusedActivation_MAX = FusedActivation_kTfLiteActSigmoid -}; - -inline const FusedActivation (&EnumValuesFusedActivation())[7] { - static const FusedActivation values[] = { - FusedActivation_kTfLiteActNone, - FusedActivation_kTfLiteActRelu, - FusedActivation_kTfLiteActRelu1, - FusedActivation_kTfLiteActRelu6, - FusedActivation_kTfLiteActTanh, - FusedActivation_kTfLiteActSignBit, - FusedActivation_kTfLiteActSigmoid - }; - return values; -} - -inline const char * const *EnumNamesFusedActivation() { - static const char * const names[] = { - "kTfLiteActNone", - "kTfLiteActRelu", - "kTfLiteActRelu1", - "kTfLiteActRelu6", - "kTfLiteActTanh", - "kTfLiteActSignBit", - "kTfLiteActSigmoid", - nullptr - }; - return names; -} - -inline const char *EnumNameFusedActivation(FusedActivation e) { - if (e < FusedActivation_kTfLiteActNone || e > FusedActivation_kTfLiteActSigmoid) return ""; - const size_t index = static_cast(e); - return EnumNamesFusedActivation()[index]; -} - -enum ModeFormat { - ModeFormat_TENSORFLOW = 0, - ModeFormat_TFLITE = 1, - ModeFormat_MIN = ModeFormat_TENSORFLOW, - ModeFormat_MAX = ModeFormat_TFLITE -}; - -inline const ModeFormat (&EnumValuesModeFormat())[2] { - static const ModeFormat values[] = { - ModeFormat_TENSORFLOW, - ModeFormat_TFLITE - }; - return values; -} - -inline const char * const *EnumNamesModeFormat() { - static const char * const names[] = { - "TENSORFLOW", - "TFLITE", - nullptr - }; - return names; -} - -inline const char *EnumNameModeFormat(ModeFormat e) { - if (e < ModeFormat_TENSORFLOW || e > ModeFormat_TFLITE) return ""; - const size_t index = static_cast(e); - return EnumNamesModeFormat()[index]; -} - -enum QuantizeMode { - QuantizeMode_MIN_COMBINED = 0, - QuantizeMode_MIN_FIRST = 1, - QuantizeMode_SCALED = 2, - QuantizeMode_MIN = QuantizeMode_MIN_COMBINED, - QuantizeMode_MAX = QuantizeMode_SCALED -}; - -inline const QuantizeMode (&EnumValuesQuantizeMode())[3] { - static const QuantizeMode values[] = { - QuantizeMode_MIN_COMBINED, - QuantizeMode_MIN_FIRST, - QuantizeMode_SCALED - }; - return values; -} - -inline const char * const *EnumNamesQuantizeMode() { - static const char * const names[] = { - "MIN_COMBINED", - "MIN_FIRST", - "SCALED", - nullptr - }; - return names; -} - -inline const char *EnumNameQuantizeMode(QuantizeMode e) { - if (e < QuantizeMode_MIN_COMBINED || e > QuantizeMode_SCALED) return ""; - const size_t index = static_cast(e); - return EnumNamesQuantizeMode()[index]; -} - -enum QuantizeRoundMode { - QuantizeRoundMode_HALF_AWAY_FROM_ZERO = 0, - QuantizeRoundMode_HALF_TO_EVEN = 1, - QuantizeRoundMode_MIN = QuantizeRoundMode_HALF_AWAY_FROM_ZERO, - QuantizeRoundMode_MAX = QuantizeRoundMode_HALF_TO_EVEN -}; - -inline const QuantizeRoundMode (&EnumValuesQuantizeRoundMode())[2] { - static const QuantizeRoundMode values[] = { - QuantizeRoundMode_HALF_AWAY_FROM_ZERO, - QuantizeRoundMode_HALF_TO_EVEN - }; - return values; -} - -inline const char * const *EnumNamesQuantizeRoundMode() { - static const char * const names[] = { - "HALF_AWAY_FROM_ZERO", - "HALF_TO_EVEN", - nullptr - }; - return names; -} - -inline const char *EnumNameQuantizeRoundMode(QuantizeRoundMode e) { - if (e < QuantizeRoundMode_HALF_AWAY_FROM_ZERO || e > QuantizeRoundMode_HALF_TO_EVEN) return ""; - const size_t index = static_cast(e); - return EnumNamesQuantizeRoundMode()[index]; -} - -struct QuantizedParamT : public flatbuffers::NativeTable { - typedef QuantizedParam TableType; - int32_t zeroPoint; - float scale; - QuantizedParamT() - : zeroPoint(0), - scale(0.0f) { - } -}; - -struct QuantizedParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ZEROPOINT = 4, - VT_SCALE = 6 - }; - int32_t zeroPoint() const { - return GetField(VT_ZEROPOINT, 0); - } - float scale() const { - return GetField(VT_SCALE, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ZEROPOINT) && - VerifyField(verifier, VT_SCALE) && - verifier.EndTable(); - } - QuantizedParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_zeroPoint(int32_t zeroPoint) { - fbb_.AddElement(QuantizedParam::VT_ZEROPOINT, zeroPoint, 0); - } - void add_scale(float scale) { - fbb_.AddElement(QuantizedParam::VT_SCALE, scale, 0.0f); - } - explicit QuantizedParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedParamBuilder &operator=(const QuantizedParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedParam( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t zeroPoint = 0, - float scale = 0.0f) { - QuantizedParamBuilder builder_(_fbb); - builder_.add_scale(scale); - builder_.add_zeroPoint(zeroPoint); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizedParam(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizedAddT : public flatbuffers::NativeTable { - typedef QuantizedAdd TableType; - FusedActivation activationType; - std::unique_ptr input1QuantizedParam; - std::unique_ptr input2QuantizedParam; - std::unique_ptr outputQuantizedParam; - QuantizedAddT() - : activationType(FusedActivation_kTfLiteActNone) { - } -}; - -struct QuantizedAdd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedAddT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedAddTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ACTIVATIONTYPE = 4, - VT_INPUT1QUANTIZEDPARAM = 6, - VT_INPUT2QUANTIZEDPARAM = 8, - VT_OUTPUTQUANTIZEDPARAM = 10 - }; - FusedActivation activationType() const { - return static_cast(GetField(VT_ACTIVATIONTYPE, 0)); - } - const QuantizedParam *input1QuantizedParam() const { - return GetPointer(VT_INPUT1QUANTIZEDPARAM); - } - const QuantizedParam *input2QuantizedParam() const { - return GetPointer(VT_INPUT2QUANTIZEDPARAM); - } - const QuantizedParam *outputQuantizedParam() const { - return GetPointer(VT_OUTPUTQUANTIZEDPARAM); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ACTIVATIONTYPE) && - VerifyOffset(verifier, VT_INPUT1QUANTIZEDPARAM) && - verifier.VerifyTable(input1QuantizedParam()) && - VerifyOffset(verifier, VT_INPUT2QUANTIZEDPARAM) && - verifier.VerifyTable(input2QuantizedParam()) && - VerifyOffset(verifier, VT_OUTPUTQUANTIZEDPARAM) && - verifier.VerifyTable(outputQuantizedParam()) && - verifier.EndTable(); - } - QuantizedAddT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedAddT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAddT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedAddBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_activationType(FusedActivation activationType) { - fbb_.AddElement(QuantizedAdd::VT_ACTIVATIONTYPE, static_cast(activationType), 0); - } - void add_input1QuantizedParam(flatbuffers::Offset input1QuantizedParam) { - fbb_.AddOffset(QuantizedAdd::VT_INPUT1QUANTIZEDPARAM, input1QuantizedParam); - } - void add_input2QuantizedParam(flatbuffers::Offset input2QuantizedParam) { - fbb_.AddOffset(QuantizedAdd::VT_INPUT2QUANTIZEDPARAM, input2QuantizedParam); - } - void add_outputQuantizedParam(flatbuffers::Offset outputQuantizedParam) { - fbb_.AddOffset(QuantizedAdd::VT_OUTPUTQUANTIZEDPARAM, outputQuantizedParam); - } - explicit QuantizedAddBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedAddBuilder &operator=(const QuantizedAddBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedAdd( - flatbuffers::FlatBufferBuilder &_fbb, - FusedActivation activationType = FusedActivation_kTfLiteActNone, - flatbuffers::Offset input1QuantizedParam = 0, - flatbuffers::Offset input2QuantizedParam = 0, - flatbuffers::Offset outputQuantizedParam = 0) { - QuantizedAddBuilder builder_(_fbb); - builder_.add_outputQuantizedParam(outputQuantizedParam); - builder_.add_input2QuantizedParam(input2QuantizedParam); - builder_.add_input1QuantizedParam(input1QuantizedParam); - builder_.add_activationType(activationType); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizedAdd(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAddT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DequantizeT : public flatbuffers::NativeTable { - typedef Dequantize TableType; - std::unique_ptr inputQuantizedParam; - QuantizeMode mode; - ModeFormat modelFormat; - DataType type; - DequantizeT() - : mode(QuantizeMode_MIN_COMBINED), - modelFormat(ModeFormat_TENSORFLOW), - type(DataType_DT_INVALID) { - } -}; - -struct Dequantize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DequantizeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return DequantizeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INPUTQUANTIZEDPARAM = 4, - VT_MODE = 6, - VT_MODELFORMAT = 8, - VT_TYPE = 10 - }; - const QuantizedParam *inputQuantizedParam() const { - return GetPointer(VT_INPUTQUANTIZEDPARAM); - } - QuantizeMode mode() const { - return static_cast(GetField(VT_MODE, 0)); - } - ModeFormat modelFormat() const { - return static_cast(GetField(VT_MODELFORMAT, 0)); - } - DataType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_INPUTQUANTIZEDPARAM) && - verifier.VerifyTable(inputQuantizedParam()) && - VerifyField(verifier, VT_MODE) && - VerifyField(verifier, VT_MODELFORMAT) && - VerifyField(verifier, VT_TYPE) && - verifier.EndTable(); - } - DequantizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DequantizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DequantizeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_inputQuantizedParam(flatbuffers::Offset inputQuantizedParam) { - fbb_.AddOffset(Dequantize::VT_INPUTQUANTIZEDPARAM, inputQuantizedParam); - } - void add_mode(QuantizeMode mode) { - fbb_.AddElement(Dequantize::VT_MODE, static_cast(mode), 0); - } - void add_modelFormat(ModeFormat modelFormat) { - fbb_.AddElement(Dequantize::VT_MODELFORMAT, static_cast(modelFormat), 0); - } - void add_type(DataType type) { - fbb_.AddElement(Dequantize::VT_TYPE, static_cast(type), 0); - } - explicit DequantizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DequantizeBuilder &operator=(const DequantizeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDequantize( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset inputQuantizedParam = 0, - QuantizeMode mode = QuantizeMode_MIN_COMBINED, - ModeFormat modelFormat = ModeFormat_TENSORFLOW, - DataType type = DataType_DT_INVALID) { - DequantizeBuilder builder_(_fbb); - builder_.add_type(type); - builder_.add_inputQuantizedParam(inputQuantizedParam); - builder_.add_modelFormat(modelFormat); - builder_.add_mode(mode); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDequantize(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizedAvgPoolT : public flatbuffers::NativeTable { - typedef QuantizedAvgPool TableType; - int32_t kernelX; - int32_t kernelY; - ModeFormat modelFormat; - int32_t outputActivationMax; - int32_t outputActivationMin; - PoolPadType padType; - int32_t padX; - int32_t padY; - int32_t strideX; - int32_t strideY; - DataType type; - QuantizedAvgPoolT() - : kernelX(0), - kernelY(0), - modelFormat(ModeFormat_TENSORFLOW), - outputActivationMax(0), - outputActivationMin(0), - padType(PoolPadType_CAFFE), - padX(0), - padY(0), - strideX(0), - strideY(0), - type(DataType_DT_INVALID) { - } -}; - -struct QuantizedAvgPool FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedAvgPoolT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedAvgPoolTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_KERNELX = 4, - VT_KERNELY = 6, - VT_MODELFORMAT = 8, - VT_OUTPUTACTIVATIONMAX = 10, - VT_OUTPUTACTIVATIONMIN = 12, - VT_PADTYPE = 14, - VT_PADX = 16, - VT_PADY = 18, - VT_STRIDEX = 20, - VT_STRIDEY = 22, - VT_TYPE = 24 - }; - int32_t kernelX() const { - return GetField(VT_KERNELX, 0); - } - int32_t kernelY() const { - return GetField(VT_KERNELY, 0); - } - ModeFormat modelFormat() const { - return static_cast(GetField(VT_MODELFORMAT, 0)); - } - int32_t outputActivationMax() const { - return GetField(VT_OUTPUTACTIVATIONMAX, 0); - } - int32_t outputActivationMin() const { - return GetField(VT_OUTPUTACTIVATIONMIN, 0); - } - PoolPadType padType() const { - return static_cast(GetField(VT_PADTYPE, 0)); - } - int32_t padX() const { - return GetField(VT_PADX, 0); - } - int32_t padY() const { - return GetField(VT_PADY, 0); - } - int32_t strideX() const { - return GetField(VT_STRIDEX, 0); - } - int32_t strideY() const { - return GetField(VT_STRIDEY, 0); - } - DataType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_KERNELX) && - VerifyField(verifier, VT_KERNELY) && - VerifyField(verifier, VT_MODELFORMAT) && - VerifyField(verifier, VT_OUTPUTACTIVATIONMAX) && - VerifyField(verifier, VT_OUTPUTACTIVATIONMIN) && - VerifyField(verifier, VT_PADTYPE) && - VerifyField(verifier, VT_PADX) && - VerifyField(verifier, VT_PADY) && - VerifyField(verifier, VT_STRIDEX) && - VerifyField(verifier, VT_STRIDEY) && - VerifyField(verifier, VT_TYPE) && - verifier.EndTable(); - } - QuantizedAvgPoolT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedAvgPoolT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAvgPoolT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedAvgPoolBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_kernelX(int32_t kernelX) { - fbb_.AddElement(QuantizedAvgPool::VT_KERNELX, kernelX, 0); - } - void add_kernelY(int32_t kernelY) { - fbb_.AddElement(QuantizedAvgPool::VT_KERNELY, kernelY, 0); - } - void add_modelFormat(ModeFormat modelFormat) { - fbb_.AddElement(QuantizedAvgPool::VT_MODELFORMAT, static_cast(modelFormat), 0); - } - void add_outputActivationMax(int32_t outputActivationMax) { - fbb_.AddElement(QuantizedAvgPool::VT_OUTPUTACTIVATIONMAX, outputActivationMax, 0); - } - void add_outputActivationMin(int32_t outputActivationMin) { - fbb_.AddElement(QuantizedAvgPool::VT_OUTPUTACTIVATIONMIN, outputActivationMin, 0); - } - void add_padType(PoolPadType padType) { - fbb_.AddElement(QuantizedAvgPool::VT_PADTYPE, static_cast(padType), 0); - } - void add_padX(int32_t padX) { - fbb_.AddElement(QuantizedAvgPool::VT_PADX, padX, 0); - } - void add_padY(int32_t padY) { - fbb_.AddElement(QuantizedAvgPool::VT_PADY, padY, 0); - } - void add_strideX(int32_t strideX) { - fbb_.AddElement(QuantizedAvgPool::VT_STRIDEX, strideX, 0); - } - void add_strideY(int32_t strideY) { - fbb_.AddElement(QuantizedAvgPool::VT_STRIDEY, strideY, 0); - } - void add_type(DataType type) { - fbb_.AddElement(QuantizedAvgPool::VT_TYPE, static_cast(type), 0); - } - explicit QuantizedAvgPoolBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedAvgPoolBuilder &operator=(const QuantizedAvgPoolBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedAvgPool( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t kernelX = 0, - int32_t kernelY = 0, - ModeFormat modelFormat = ModeFormat_TENSORFLOW, - int32_t outputActivationMax = 0, - int32_t outputActivationMin = 0, - PoolPadType padType = PoolPadType_CAFFE, - int32_t padX = 0, - int32_t padY = 0, - int32_t strideX = 0, - int32_t strideY = 0, - DataType type = DataType_DT_INVALID) { - QuantizedAvgPoolBuilder builder_(_fbb); - builder_.add_type(type); - builder_.add_strideY(strideY); - builder_.add_strideX(strideX); - builder_.add_padY(padY); - builder_.add_padX(padX); - builder_.add_outputActivationMin(outputActivationMin); - builder_.add_outputActivationMax(outputActivationMax); - builder_.add_kernelY(kernelY); - builder_.add_kernelX(kernelX); - builder_.add_padType(padType); - builder_.add_modelFormat(modelFormat); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizedAvgPool(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAvgPoolT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizedBiasAddT : public flatbuffers::NativeTable { - typedef QuantizedBiasAdd TableType; - std::vector bias; - DataType inputType; - int32_t max; - int32_t min; - DataType outputType; - QuantizedBiasAddT() - : inputType(DataType_DT_INVALID), - max(0), - min(0), - outputType(DataType_DT_INVALID) { - } -}; - -struct QuantizedBiasAdd FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedBiasAddT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedBiasAddTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BIAS = 4, - VT_INPUTTYPE = 6, - VT_MAX = 8, - VT_MIN = 10, - VT_OUTPUTTYPE = 12 - }; - const flatbuffers::Vector *bias() const { - return GetPointer *>(VT_BIAS); - } - DataType inputType() const { - return static_cast(GetField(VT_INPUTTYPE, 0)); - } - int32_t max() const { - return GetField(VT_MAX, 0); - } - int32_t min() const { - return GetField(VT_MIN, 0); - } - DataType outputType() const { - return static_cast(GetField(VT_OUTPUTTYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_BIAS) && - verifier.VerifyVector(bias()) && - VerifyField(verifier, VT_INPUTTYPE) && - VerifyField(verifier, VT_MAX) && - VerifyField(verifier, VT_MIN) && - VerifyField(verifier, VT_OUTPUTTYPE) && - verifier.EndTable(); - } - QuantizedBiasAddT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedBiasAddT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedBiasAddT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedBiasAddBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_bias(flatbuffers::Offset> bias) { - fbb_.AddOffset(QuantizedBiasAdd::VT_BIAS, bias); - } - void add_inputType(DataType inputType) { - fbb_.AddElement(QuantizedBiasAdd::VT_INPUTTYPE, static_cast(inputType), 0); - } - void add_max(int32_t max) { - fbb_.AddElement(QuantizedBiasAdd::VT_MAX, max, 0); - } - void add_min(int32_t min) { - fbb_.AddElement(QuantizedBiasAdd::VT_MIN, min, 0); - } - void add_outputType(DataType outputType) { - fbb_.AddElement(QuantizedBiasAdd::VT_OUTPUTTYPE, static_cast(outputType), 0); - } - explicit QuantizedBiasAddBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedBiasAddBuilder &operator=(const QuantizedBiasAddBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedBiasAdd( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> bias = 0, - DataType inputType = DataType_DT_INVALID, - int32_t max = 0, - int32_t min = 0, - DataType outputType = DataType_DT_INVALID) { - QuantizedBiasAddBuilder builder_(_fbb); - builder_.add_outputType(outputType); - builder_.add_min(min); - builder_.add_max(max); - builder_.add_inputType(inputType); - builder_.add_bias(bias); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateQuantizedBiasAddDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *bias = nullptr, - DataType inputType = DataType_DT_INVALID, - int32_t max = 0, - int32_t min = 0, - DataType outputType = DataType_DT_INVALID) { - auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; - return MNN::CreateQuantizedBiasAdd( - _fbb, - bias__, - inputType, - max, - min, - outputType); -} - -flatbuffers::Offset CreateQuantizedBiasAdd(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedBiasAddT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizedConcatT : public flatbuffers::NativeTable { - typedef QuantizedConcat TableType; - FusedActivation activationType; - int32_t axis; - std::vector inputScale; - std::vector inputZeroPoint; - std::unique_ptr outputQuantizedParam; - QuantizedConcatT() - : activationType(FusedActivation_kTfLiteActNone), - axis(0) { - } -}; - -struct QuantizedConcat FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedConcatT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedConcatTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ACTIVATIONTYPE = 4, - VT_AXIS = 6, - VT_INPUTSCALE = 8, - VT_INPUTZEROPOINT = 10, - VT_OUTPUTQUANTIZEDPARAM = 12 - }; - FusedActivation activationType() const { - return static_cast(GetField(VT_ACTIVATIONTYPE, 0)); - } - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - const flatbuffers::Vector *inputScale() const { - return GetPointer *>(VT_INPUTSCALE); - } - const flatbuffers::Vector *inputZeroPoint() const { - return GetPointer *>(VT_INPUTZEROPOINT); - } - const QuantizedParam *outputQuantizedParam() const { - return GetPointer(VT_OUTPUTQUANTIZEDPARAM); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ACTIVATIONTYPE) && - VerifyField(verifier, VT_AXIS) && - VerifyOffset(verifier, VT_INPUTSCALE) && - verifier.VerifyVector(inputScale()) && - VerifyOffset(verifier, VT_INPUTZEROPOINT) && - verifier.VerifyVector(inputZeroPoint()) && - VerifyOffset(verifier, VT_OUTPUTQUANTIZEDPARAM) && - verifier.VerifyTable(outputQuantizedParam()) && - verifier.EndTable(); - } - QuantizedConcatT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedConcatT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedConcatT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedConcatBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_activationType(FusedActivation activationType) { - fbb_.AddElement(QuantizedConcat::VT_ACTIVATIONTYPE, static_cast(activationType), 0); - } - void add_axis(int32_t axis) { - fbb_.AddElement(QuantizedConcat::VT_AXIS, axis, 0); - } - void add_inputScale(flatbuffers::Offset> inputScale) { - fbb_.AddOffset(QuantizedConcat::VT_INPUTSCALE, inputScale); - } - void add_inputZeroPoint(flatbuffers::Offset> inputZeroPoint) { - fbb_.AddOffset(QuantizedConcat::VT_INPUTZEROPOINT, inputZeroPoint); - } - void add_outputQuantizedParam(flatbuffers::Offset outputQuantizedParam) { - fbb_.AddOffset(QuantizedConcat::VT_OUTPUTQUANTIZEDPARAM, outputQuantizedParam); - } - explicit QuantizedConcatBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedConcatBuilder &operator=(const QuantizedConcatBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedConcat( - flatbuffers::FlatBufferBuilder &_fbb, - FusedActivation activationType = FusedActivation_kTfLiteActNone, - int32_t axis = 0, - flatbuffers::Offset> inputScale = 0, - flatbuffers::Offset> inputZeroPoint = 0, - flatbuffers::Offset outputQuantizedParam = 0) { - QuantizedConcatBuilder builder_(_fbb); - builder_.add_outputQuantizedParam(outputQuantizedParam); - builder_.add_inputZeroPoint(inputZeroPoint); - builder_.add_inputScale(inputScale); - builder_.add_axis(axis); - builder_.add_activationType(activationType); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateQuantizedConcatDirect( - flatbuffers::FlatBufferBuilder &_fbb, - FusedActivation activationType = FusedActivation_kTfLiteActNone, - int32_t axis = 0, - const std::vector *inputScale = nullptr, - const std::vector *inputZeroPoint = nullptr, - flatbuffers::Offset outputQuantizedParam = 0) { - auto inputScale__ = inputScale ? _fbb.CreateVector(*inputScale) : 0; - auto inputZeroPoint__ = inputZeroPoint ? _fbb.CreateVector(*inputZeroPoint) : 0; - return MNN::CreateQuantizedConcat( - _fbb, - activationType, - axis, - inputScale__, - inputZeroPoint__, - outputQuantizedParam); -} - -flatbuffers::Offset CreateQuantizedConcat(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedConcatT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizedLogisticT : public flatbuffers::NativeTable { - typedef QuantizedLogistic TableType; - std::unique_ptr inputQuantizedParam; - std::unique_ptr outputQuantizedParam; - QuantizedLogisticT() { - } -}; - -struct QuantizedLogistic FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedLogisticT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedLogisticTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INPUTQUANTIZEDPARAM = 4, - VT_OUTPUTQUANTIZEDPARAM = 6 - }; - const QuantizedParam *inputQuantizedParam() const { - return GetPointer(VT_INPUTQUANTIZEDPARAM); - } - const QuantizedParam *outputQuantizedParam() const { - return GetPointer(VT_OUTPUTQUANTIZEDPARAM); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_INPUTQUANTIZEDPARAM) && - verifier.VerifyTable(inputQuantizedParam()) && - VerifyOffset(verifier, VT_OUTPUTQUANTIZEDPARAM) && - verifier.VerifyTable(outputQuantizedParam()) && - verifier.EndTable(); - } - QuantizedLogisticT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedLogisticT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedLogisticT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedLogisticBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_inputQuantizedParam(flatbuffers::Offset inputQuantizedParam) { - fbb_.AddOffset(QuantizedLogistic::VT_INPUTQUANTIZEDPARAM, inputQuantizedParam); - } - void add_outputQuantizedParam(flatbuffers::Offset outputQuantizedParam) { - fbb_.AddOffset(QuantizedLogistic::VT_OUTPUTQUANTIZEDPARAM, outputQuantizedParam); - } - explicit QuantizedLogisticBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedLogisticBuilder &operator=(const QuantizedLogisticBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedLogistic( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset inputQuantizedParam = 0, - flatbuffers::Offset outputQuantizedParam = 0) { - QuantizedLogisticBuilder builder_(_fbb); - builder_.add_outputQuantizedParam(outputQuantizedParam); - builder_.add_inputQuantizedParam(inputQuantizedParam); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizedLogistic(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedLogisticT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizedMatMulT : public flatbuffers::NativeTable { - typedef QuantizedMatMul TableType; - bool transposeA; - bool transposeB; - QuantizedMatMulT() - : transposeA(false), - transposeB(false) { - } -}; - -struct QuantizedMatMul FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedMatMulT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedMatMulTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TRANSPOSEA = 4, - VT_TRANSPOSEB = 6 - }; - bool transposeA() const { - return GetField(VT_TRANSPOSEA, 0) != 0; - } - bool transposeB() const { - return GetField(VT_TRANSPOSEB, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TRANSPOSEA) && - VerifyField(verifier, VT_TRANSPOSEB) && - verifier.EndTable(); - } - QuantizedMatMulT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedMatMulT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMatMulT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedMatMulBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_transposeA(bool transposeA) { - fbb_.AddElement(QuantizedMatMul::VT_TRANSPOSEA, static_cast(transposeA), 0); - } - void add_transposeB(bool transposeB) { - fbb_.AddElement(QuantizedMatMul::VT_TRANSPOSEB, static_cast(transposeB), 0); - } - explicit QuantizedMatMulBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedMatMulBuilder &operator=(const QuantizedMatMulBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedMatMul( - flatbuffers::FlatBufferBuilder &_fbb, - bool transposeA = false, - bool transposeB = false) { - QuantizedMatMulBuilder builder_(_fbb); - builder_.add_transposeB(transposeB); - builder_.add_transposeA(transposeA); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizedMatMul(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMatMulT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizedMaxPoolT : public flatbuffers::NativeTable { - typedef QuantizedMaxPool TableType; - int32_t kernelX; - int32_t kernelY; - ModeFormat modelFormat; - int32_t outputActivationMax; - int32_t outputActivationMin; - PoolPadType padType; - int32_t padX; - int32_t padY; - int32_t strideX; - int32_t strideY; - DataType type; - QuantizedMaxPoolT() - : kernelX(0), - kernelY(0), - modelFormat(ModeFormat_TENSORFLOW), - outputActivationMax(0), - outputActivationMin(0), - padType(PoolPadType_CAFFE), - padX(0), - padY(0), - strideX(0), - strideY(0), - type(DataType_DT_INVALID) { - } -}; - -struct QuantizedMaxPool FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedMaxPoolT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedMaxPoolTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_KERNELX = 4, - VT_KERNELY = 6, - VT_MODELFORMAT = 8, - VT_OUTPUTACTIVATIONMAX = 10, - VT_OUTPUTACTIVATIONMIN = 12, - VT_PADTYPE = 14, - VT_PADX = 16, - VT_PADY = 18, - VT_STRIDEX = 20, - VT_STRIDEY = 22, - VT_TYPE = 24 - }; - int32_t kernelX() const { - return GetField(VT_KERNELX, 0); - } - int32_t kernelY() const { - return GetField(VT_KERNELY, 0); - } - ModeFormat modelFormat() const { - return static_cast(GetField(VT_MODELFORMAT, 0)); - } - int32_t outputActivationMax() const { - return GetField(VT_OUTPUTACTIVATIONMAX, 0); - } - int32_t outputActivationMin() const { - return GetField(VT_OUTPUTACTIVATIONMIN, 0); - } - PoolPadType padType() const { - return static_cast(GetField(VT_PADTYPE, 0)); - } - int32_t padX() const { - return GetField(VT_PADX, 0); - } - int32_t padY() const { - return GetField(VT_PADY, 0); - } - int32_t strideX() const { - return GetField(VT_STRIDEX, 0); - } - int32_t strideY() const { - return GetField(VT_STRIDEY, 0); - } - DataType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_KERNELX) && - VerifyField(verifier, VT_KERNELY) && - VerifyField(verifier, VT_MODELFORMAT) && - VerifyField(verifier, VT_OUTPUTACTIVATIONMAX) && - VerifyField(verifier, VT_OUTPUTACTIVATIONMIN) && - VerifyField(verifier, VT_PADTYPE) && - VerifyField(verifier, VT_PADX) && - VerifyField(verifier, VT_PADY) && - VerifyField(verifier, VT_STRIDEX) && - VerifyField(verifier, VT_STRIDEY) && - VerifyField(verifier, VT_TYPE) && - verifier.EndTable(); - } - QuantizedMaxPoolT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedMaxPoolT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMaxPoolT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedMaxPoolBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_kernelX(int32_t kernelX) { - fbb_.AddElement(QuantizedMaxPool::VT_KERNELX, kernelX, 0); - } - void add_kernelY(int32_t kernelY) { - fbb_.AddElement(QuantizedMaxPool::VT_KERNELY, kernelY, 0); - } - void add_modelFormat(ModeFormat modelFormat) { - fbb_.AddElement(QuantizedMaxPool::VT_MODELFORMAT, static_cast(modelFormat), 0); - } - void add_outputActivationMax(int32_t outputActivationMax) { - fbb_.AddElement(QuantizedMaxPool::VT_OUTPUTACTIVATIONMAX, outputActivationMax, 0); - } - void add_outputActivationMin(int32_t outputActivationMin) { - fbb_.AddElement(QuantizedMaxPool::VT_OUTPUTACTIVATIONMIN, outputActivationMin, 0); - } - void add_padType(PoolPadType padType) { - fbb_.AddElement(QuantizedMaxPool::VT_PADTYPE, static_cast(padType), 0); - } - void add_padX(int32_t padX) { - fbb_.AddElement(QuantizedMaxPool::VT_PADX, padX, 0); - } - void add_padY(int32_t padY) { - fbb_.AddElement(QuantizedMaxPool::VT_PADY, padY, 0); - } - void add_strideX(int32_t strideX) { - fbb_.AddElement(QuantizedMaxPool::VT_STRIDEX, strideX, 0); - } - void add_strideY(int32_t strideY) { - fbb_.AddElement(QuantizedMaxPool::VT_STRIDEY, strideY, 0); - } - void add_type(DataType type) { - fbb_.AddElement(QuantizedMaxPool::VT_TYPE, static_cast(type), 0); - } - explicit QuantizedMaxPoolBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedMaxPoolBuilder &operator=(const QuantizedMaxPoolBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedMaxPool( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t kernelX = 0, - int32_t kernelY = 0, - ModeFormat modelFormat = ModeFormat_TENSORFLOW, - int32_t outputActivationMax = 0, - int32_t outputActivationMin = 0, - PoolPadType padType = PoolPadType_CAFFE, - int32_t padX = 0, - int32_t padY = 0, - int32_t strideX = 0, - int32_t strideY = 0, - DataType type = DataType_DT_INVALID) { - QuantizedMaxPoolBuilder builder_(_fbb); - builder_.add_type(type); - builder_.add_strideY(strideY); - builder_.add_strideX(strideX); - builder_.add_padY(padY); - builder_.add_padX(padX); - builder_.add_outputActivationMin(outputActivationMin); - builder_.add_outputActivationMax(outputActivationMax); - builder_.add_kernelY(kernelY); - builder_.add_kernelX(kernelX); - builder_.add_padType(padType); - builder_.add_modelFormat(modelFormat); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizedMaxPool(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMaxPoolT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizedReluT : public flatbuffers::NativeTable { - typedef QuantizedRelu TableType; - DataType type; - QuantizedReluT() - : type(DataType_DT_INVALID) { - } -}; - -struct QuantizedRelu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedReluT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedReluTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TYPE = 4 - }; - DataType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TYPE) && - verifier.EndTable(); - } - QuantizedReluT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedReluT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReluT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedReluBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_type(DataType type) { - fbb_.AddElement(QuantizedRelu::VT_TYPE, static_cast(type), 0); - } - explicit QuantizedReluBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedReluBuilder &operator=(const QuantizedReluBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedRelu( - flatbuffers::FlatBufferBuilder &_fbb, - DataType type = DataType_DT_INVALID) { - QuantizedReluBuilder builder_(_fbb); - builder_.add_type(type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizedRelu(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReluT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizedRelu6T : public flatbuffers::NativeTable { - typedef QuantizedRelu6 TableType; - DataType type; - QuantizedRelu6T() - : type(DataType_DT_INVALID) { - } -}; - -struct QuantizedRelu6 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedRelu6T NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedRelu6TypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TYPE = 4 - }; - DataType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TYPE) && - verifier.EndTable(); - } - QuantizedRelu6T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedRelu6T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedRelu6T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedRelu6Builder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_type(DataType type) { - fbb_.AddElement(QuantizedRelu6::VT_TYPE, static_cast(type), 0); - } - explicit QuantizedRelu6Builder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedRelu6Builder &operator=(const QuantizedRelu6Builder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedRelu6( - flatbuffers::FlatBufferBuilder &_fbb, - DataType type = DataType_DT_INVALID) { - QuantizedRelu6Builder builder_(_fbb); - builder_.add_type(type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizedRelu6(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedRelu6T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizedReshapeT : public flatbuffers::NativeTable { - typedef QuantizedReshape TableType; - std::vector dims; - ModeFormat modelFormat; - QuantizedReshapeT() - : modelFormat(ModeFormat_TENSORFLOW) { - } -}; - -struct QuantizedReshape FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedReshapeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedReshapeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DIMS = 4, - VT_MODELFORMAT = 6 - }; - const flatbuffers::Vector *dims() const { - return GetPointer *>(VT_DIMS); - } - ModeFormat modelFormat() const { - return static_cast(GetField(VT_MODELFORMAT, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_DIMS) && - verifier.VerifyVector(dims()) && - VerifyField(verifier, VT_MODELFORMAT) && - verifier.EndTable(); - } - QuantizedReshapeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedReshapeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReshapeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedReshapeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_dims(flatbuffers::Offset> dims) { - fbb_.AddOffset(QuantizedReshape::VT_DIMS, dims); - } - void add_modelFormat(ModeFormat modelFormat) { - fbb_.AddElement(QuantizedReshape::VT_MODELFORMAT, static_cast(modelFormat), 0); - } - explicit QuantizedReshapeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedReshapeBuilder &operator=(const QuantizedReshapeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedReshape( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> dims = 0, - ModeFormat modelFormat = ModeFormat_TENSORFLOW) { - QuantizedReshapeBuilder builder_(_fbb); - builder_.add_dims(dims); - builder_.add_modelFormat(modelFormat); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateQuantizedReshapeDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *dims = nullptr, - ModeFormat modelFormat = ModeFormat_TENSORFLOW) { - auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; - return MNN::CreateQuantizedReshape( - _fbb, - dims__, - modelFormat); -} - -flatbuffers::Offset CreateQuantizedReshape(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReshapeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizedSoftmaxT : public flatbuffers::NativeTable { - typedef QuantizedSoftmax TableType; - float beta; - float inputScale; - QuantizedSoftmaxT() - : beta(0.0f), - inputScale(0.0f) { - } -}; - -struct QuantizedSoftmax FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizedSoftmaxT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizedSoftmaxTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BETA = 4, - VT_INPUTSCALE = 6 - }; - float beta() const { - return GetField(VT_BETA, 0.0f); - } - float inputScale() const { - return GetField(VT_INPUTSCALE, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BETA) && - VerifyField(verifier, VT_INPUTSCALE) && - verifier.EndTable(); - } - QuantizedSoftmaxT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizedSoftmaxT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedSoftmaxT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizedSoftmaxBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_beta(float beta) { - fbb_.AddElement(QuantizedSoftmax::VT_BETA, beta, 0.0f); - } - void add_inputScale(float inputScale) { - fbb_.AddElement(QuantizedSoftmax::VT_INPUTSCALE, inputScale, 0.0f); - } - explicit QuantizedSoftmaxBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizedSoftmaxBuilder &operator=(const QuantizedSoftmaxBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizedSoftmax( - flatbuffers::FlatBufferBuilder &_fbb, - float beta = 0.0f, - float inputScale = 0.0f) { - QuantizedSoftmaxBuilder builder_(_fbb); - builder_.add_inputScale(inputScale); - builder_.add_beta(beta); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizedSoftmax(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedSoftmaxT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizeV2T : public flatbuffers::NativeTable { - typedef QuantizeV2 TableType; - DataType type; - QuantizeMode mode; - QuantizeRoundMode roundMode; - QuantizeV2T() - : type(DataType_DT_INVALID), - mode(QuantizeMode_MIN_COMBINED), - roundMode(QuantizeRoundMode_HALF_AWAY_FROM_ZERO) { - } -}; - -struct QuantizeV2 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizeV2T NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizeV2TypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TYPE = 4, - VT_MODE = 6, - VT_ROUNDMODE = 8 - }; - DataType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - QuantizeMode mode() const { - return static_cast(GetField(VT_MODE, 0)); - } - QuantizeRoundMode roundMode() const { - return static_cast(GetField(VT_ROUNDMODE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TYPE) && - VerifyField(verifier, VT_MODE) && - VerifyField(verifier, VT_ROUNDMODE) && - verifier.EndTable(); - } - QuantizeV2T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizeV2T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeV2T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizeV2Builder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_type(DataType type) { - fbb_.AddElement(QuantizeV2::VT_TYPE, static_cast(type), 0); - } - void add_mode(QuantizeMode mode) { - fbb_.AddElement(QuantizeV2::VT_MODE, static_cast(mode), 0); - } - void add_roundMode(QuantizeRoundMode roundMode) { - fbb_.AddElement(QuantizeV2::VT_ROUNDMODE, static_cast(roundMode), 0); - } - explicit QuantizeV2Builder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizeV2Builder &operator=(const QuantizeV2Builder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizeV2( - flatbuffers::FlatBufferBuilder &_fbb, - DataType type = DataType_DT_INVALID, - QuantizeMode mode = QuantizeMode_MIN_COMBINED, - QuantizeRoundMode roundMode = QuantizeRoundMode_HALF_AWAY_FROM_ZERO) { - QuantizeV2Builder builder_(_fbb); - builder_.add_type(type); - builder_.add_roundMode(roundMode); - builder_.add_mode(mode); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizeV2(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeV2T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RequantizationRangeT : public flatbuffers::NativeTable { - typedef RequantizationRange TableType; - RequantizationRangeT() { - } -}; - -struct RequantizationRange FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RequantizationRangeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return RequantizationRangeTypeTable(); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - RequantizationRangeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RequantizationRangeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RequantizationRangeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RequantizationRangeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit RequantizationRangeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RequantizationRangeBuilder &operator=(const RequantizationRangeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRequantizationRange( - flatbuffers::FlatBufferBuilder &_fbb) { - RequantizationRangeBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRequantizationRange(flatbuffers::FlatBufferBuilder &_fbb, const RequantizationRangeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RequantizeT : public flatbuffers::NativeTable { - typedef Requantize TableType; - RequantizeT() { - } -}; - -struct Requantize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RequantizeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return RequantizeTypeTable(); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - RequantizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RequantizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RequantizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RequantizeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit RequantizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RequantizeBuilder &operator=(const RequantizeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRequantize( - flatbuffers::FlatBufferBuilder &_fbb) { - RequantizeBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRequantize(flatbuffers::FlatBufferBuilder &_fbb, const RequantizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TfQuantizedConv2DT : public flatbuffers::NativeTable { - typedef TfQuantizedConv2D TableType; - std::vector bias; - bool biasflag; - std::unique_ptr common; - std::vector weight; - FusedActivation activationType; - int32_t multiplier; - int32_t outMax; - int32_t outMin; - int32_t shift; - std::unique_ptr biasQuantizedParam; - int32_t depthMultiplier; - std::unique_ptr filterQuantizedParam; - std::unique_ptr inputQuantizedParam; - ModeFormat modelFormat; - std::unique_ptr outputQuantizedParam; - TfQuantizedConv2DT() - : biasflag(false), - activationType(FusedActivation_kTfLiteActNone), - multiplier(0), - outMax(0), - outMin(0), - shift(0), - depthMultiplier(0), - modelFormat(ModeFormat_TENSORFLOW) { - } -}; - -struct TfQuantizedConv2D FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TfQuantizedConv2DT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return TfQuantizedConv2DTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BIAS = 4, - VT_BIASFLAG = 6, - VT_COMMON = 8, - VT_WEIGHT = 10, - VT_ACTIVATIONTYPE = 12, - VT_MULTIPLIER = 14, - VT_OUTMAX = 16, - VT_OUTMIN = 18, - VT_SHIFT = 20, - VT_BIASQUANTIZEDPARAM = 22, - VT_DEPTHMULTIPLIER = 24, - VT_FILTERQUANTIZEDPARAM = 26, - VT_INPUTQUANTIZEDPARAM = 28, - VT_MODELFORMAT = 30, - VT_OUTPUTQUANTIZEDPARAM = 32 - }; - const flatbuffers::Vector *bias() const { - return GetPointer *>(VT_BIAS); - } - bool biasflag() const { - return GetField(VT_BIASFLAG, 0) != 0; - } - const Convolution2DCommon *common() const { - return GetPointer(VT_COMMON); - } - const flatbuffers::Vector *weight() const { - return GetPointer *>(VT_WEIGHT); - } - FusedActivation activationType() const { - return static_cast(GetField(VT_ACTIVATIONTYPE, 0)); - } - int32_t multiplier() const { - return GetField(VT_MULTIPLIER, 0); - } - int32_t outMax() const { - return GetField(VT_OUTMAX, 0); - } - int32_t outMin() const { - return GetField(VT_OUTMIN, 0); - } - int32_t shift() const { - return GetField(VT_SHIFT, 0); - } - const QuantizedParam *biasQuantizedParam() const { - return GetPointer(VT_BIASQUANTIZEDPARAM); - } - int32_t depthMultiplier() const { - return GetField(VT_DEPTHMULTIPLIER, 0); - } - const QuantizedParam *filterQuantizedParam() const { - return GetPointer(VT_FILTERQUANTIZEDPARAM); - } - const QuantizedParam *inputQuantizedParam() const { - return GetPointer(VT_INPUTQUANTIZEDPARAM); - } - ModeFormat modelFormat() const { - return static_cast(GetField(VT_MODELFORMAT, 0)); - } - const QuantizedParam *outputQuantizedParam() const { - return GetPointer(VT_OUTPUTQUANTIZEDPARAM); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_BIAS) && - verifier.VerifyVector(bias()) && - VerifyField(verifier, VT_BIASFLAG) && - VerifyOffset(verifier, VT_COMMON) && - verifier.VerifyTable(common()) && - VerifyOffset(verifier, VT_WEIGHT) && - verifier.VerifyVector(weight()) && - VerifyField(verifier, VT_ACTIVATIONTYPE) && - VerifyField(verifier, VT_MULTIPLIER) && - VerifyField(verifier, VT_OUTMAX) && - VerifyField(verifier, VT_OUTMIN) && - VerifyField(verifier, VT_SHIFT) && - VerifyOffset(verifier, VT_BIASQUANTIZEDPARAM) && - verifier.VerifyTable(biasQuantizedParam()) && - VerifyField(verifier, VT_DEPTHMULTIPLIER) && - VerifyOffset(verifier, VT_FILTERQUANTIZEDPARAM) && - verifier.VerifyTable(filterQuantizedParam()) && - VerifyOffset(verifier, VT_INPUTQUANTIZEDPARAM) && - verifier.VerifyTable(inputQuantizedParam()) && - VerifyField(verifier, VT_MODELFORMAT) && - VerifyOffset(verifier, VT_OUTPUTQUANTIZEDPARAM) && - verifier.VerifyTable(outputQuantizedParam()) && - verifier.EndTable(); - } - TfQuantizedConv2DT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TfQuantizedConv2DT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TfQuantizedConv2DT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TfQuantizedConv2DBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_bias(flatbuffers::Offset> bias) { - fbb_.AddOffset(TfQuantizedConv2D::VT_BIAS, bias); - } - void add_biasflag(bool biasflag) { - fbb_.AddElement(TfQuantizedConv2D::VT_BIASFLAG, static_cast(biasflag), 0); - } - void add_common(flatbuffers::Offset common) { - fbb_.AddOffset(TfQuantizedConv2D::VT_COMMON, common); - } - void add_weight(flatbuffers::Offset> weight) { - fbb_.AddOffset(TfQuantizedConv2D::VT_WEIGHT, weight); - } - void add_activationType(FusedActivation activationType) { - fbb_.AddElement(TfQuantizedConv2D::VT_ACTIVATIONTYPE, static_cast(activationType), 0); - } - void add_multiplier(int32_t multiplier) { - fbb_.AddElement(TfQuantizedConv2D::VT_MULTIPLIER, multiplier, 0); - } - void add_outMax(int32_t outMax) { - fbb_.AddElement(TfQuantizedConv2D::VT_OUTMAX, outMax, 0); - } - void add_outMin(int32_t outMin) { - fbb_.AddElement(TfQuantizedConv2D::VT_OUTMIN, outMin, 0); - } - void add_shift(int32_t shift) { - fbb_.AddElement(TfQuantizedConv2D::VT_SHIFT, shift, 0); - } - void add_biasQuantizedParam(flatbuffers::Offset biasQuantizedParam) { - fbb_.AddOffset(TfQuantizedConv2D::VT_BIASQUANTIZEDPARAM, biasQuantizedParam); - } - void add_depthMultiplier(int32_t depthMultiplier) { - fbb_.AddElement(TfQuantizedConv2D::VT_DEPTHMULTIPLIER, depthMultiplier, 0); - } - void add_filterQuantizedParam(flatbuffers::Offset filterQuantizedParam) { - fbb_.AddOffset(TfQuantizedConv2D::VT_FILTERQUANTIZEDPARAM, filterQuantizedParam); - } - void add_inputQuantizedParam(flatbuffers::Offset inputQuantizedParam) { - fbb_.AddOffset(TfQuantizedConv2D::VT_INPUTQUANTIZEDPARAM, inputQuantizedParam); - } - void add_modelFormat(ModeFormat modelFormat) { - fbb_.AddElement(TfQuantizedConv2D::VT_MODELFORMAT, static_cast(modelFormat), 0); - } - void add_outputQuantizedParam(flatbuffers::Offset outputQuantizedParam) { - fbb_.AddOffset(TfQuantizedConv2D::VT_OUTPUTQUANTIZEDPARAM, outputQuantizedParam); - } - explicit TfQuantizedConv2DBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TfQuantizedConv2DBuilder &operator=(const TfQuantizedConv2DBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTfQuantizedConv2D( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> bias = 0, - bool biasflag = false, - flatbuffers::Offset common = 0, - flatbuffers::Offset> weight = 0, - FusedActivation activationType = FusedActivation_kTfLiteActNone, - int32_t multiplier = 0, - int32_t outMax = 0, - int32_t outMin = 0, - int32_t shift = 0, - flatbuffers::Offset biasQuantizedParam = 0, - int32_t depthMultiplier = 0, - flatbuffers::Offset filterQuantizedParam = 0, - flatbuffers::Offset inputQuantizedParam = 0, - ModeFormat modelFormat = ModeFormat_TENSORFLOW, - flatbuffers::Offset outputQuantizedParam = 0) { - TfQuantizedConv2DBuilder builder_(_fbb); - builder_.add_outputQuantizedParam(outputQuantizedParam); - builder_.add_inputQuantizedParam(inputQuantizedParam); - builder_.add_filterQuantizedParam(filterQuantizedParam); - builder_.add_depthMultiplier(depthMultiplier); - builder_.add_biasQuantizedParam(biasQuantizedParam); - builder_.add_shift(shift); - builder_.add_outMin(outMin); - builder_.add_outMax(outMax); - builder_.add_multiplier(multiplier); - builder_.add_weight(weight); - builder_.add_common(common); - builder_.add_bias(bias); - builder_.add_modelFormat(modelFormat); - builder_.add_activationType(activationType); - builder_.add_biasflag(biasflag); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateTfQuantizedConv2DDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *bias = nullptr, - bool biasflag = false, - flatbuffers::Offset common = 0, - const std::vector *weight = nullptr, - FusedActivation activationType = FusedActivation_kTfLiteActNone, - int32_t multiplier = 0, - int32_t outMax = 0, - int32_t outMin = 0, - int32_t shift = 0, - flatbuffers::Offset biasQuantizedParam = 0, - int32_t depthMultiplier = 0, - flatbuffers::Offset filterQuantizedParam = 0, - flatbuffers::Offset inputQuantizedParam = 0, - ModeFormat modelFormat = ModeFormat_TENSORFLOW, - flatbuffers::Offset outputQuantizedParam = 0) { - auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; - auto weight__ = weight ? _fbb.CreateVector(*weight) : 0; - return MNN::CreateTfQuantizedConv2D( - _fbb, - bias__, - biasflag, - common, - weight__, - activationType, - multiplier, - outMax, - outMin, - shift, - biasQuantizedParam, - depthMultiplier, - filterQuantizedParam, - inputQuantizedParam, - modelFormat, - outputQuantizedParam); -} - -flatbuffers::Offset CreateTfQuantizedConv2D(flatbuffers::FlatBufferBuilder &_fbb, const TfQuantizedConv2DT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -inline QuantizedParamT *QuantizedParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedParam::UnPackTo(QuantizedParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = zeroPoint(); _o->zeroPoint = _e; }; - { auto _e = scale(); _o->scale = _e; }; -} - -inline flatbuffers::Offset QuantizedParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedParam(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _zeroPoint = _o->zeroPoint; - auto _scale = _o->scale; - return MNN::CreateQuantizedParam( - _fbb, - _zeroPoint, - _scale); -} - -inline QuantizedAddT *QuantizedAdd::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedAddT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedAdd::UnPackTo(QuantizedAddT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = activationType(); _o->activationType = _e; }; - { auto _e = input1QuantizedParam(); if (_e) _o->input1QuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = input2QuantizedParam(); if (_e) _o->input2QuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = outputQuantizedParam(); if (_e) _o->outputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; -} - -inline flatbuffers::Offset QuantizedAdd::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAddT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedAdd(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedAdd(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAddT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedAddT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _activationType = _o->activationType; - auto _input1QuantizedParam = _o->input1QuantizedParam ? CreateQuantizedParam(_fbb, _o->input1QuantizedParam.get(), _rehasher) : 0; - auto _input2QuantizedParam = _o->input2QuantizedParam ? CreateQuantizedParam(_fbb, _o->input2QuantizedParam.get(), _rehasher) : 0; - auto _outputQuantizedParam = _o->outputQuantizedParam ? CreateQuantizedParam(_fbb, _o->outputQuantizedParam.get(), _rehasher) : 0; - return MNN::CreateQuantizedAdd( - _fbb, - _activationType, - _input1QuantizedParam, - _input2QuantizedParam, - _outputQuantizedParam); -} - -inline DequantizeT *Dequantize::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DequantizeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Dequantize::UnPackTo(DequantizeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = inputQuantizedParam(); if (_e) _o->inputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = mode(); _o->mode = _e; }; - { auto _e = modelFormat(); _o->modelFormat = _e; }; - { auto _e = type(); _o->type = _e; }; -} - -inline flatbuffers::Offset Dequantize::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDequantize(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDequantize(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DequantizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _inputQuantizedParam = _o->inputQuantizedParam ? CreateQuantizedParam(_fbb, _o->inputQuantizedParam.get(), _rehasher) : 0; - auto _mode = _o->mode; - auto _modelFormat = _o->modelFormat; - auto _type = _o->type; - return MNN::CreateDequantize( - _fbb, - _inputQuantizedParam, - _mode, - _modelFormat, - _type); -} - -inline QuantizedAvgPoolT *QuantizedAvgPool::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedAvgPoolT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedAvgPool::UnPackTo(QuantizedAvgPoolT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = kernelX(); _o->kernelX = _e; }; - { auto _e = kernelY(); _o->kernelY = _e; }; - { auto _e = modelFormat(); _o->modelFormat = _e; }; - { auto _e = outputActivationMax(); _o->outputActivationMax = _e; }; - { auto _e = outputActivationMin(); _o->outputActivationMin = _e; }; - { auto _e = padType(); _o->padType = _e; }; - { auto _e = padX(); _o->padX = _e; }; - { auto _e = padY(); _o->padY = _e; }; - { auto _e = strideX(); _o->strideX = _e; }; - { auto _e = strideY(); _o->strideY = _e; }; - { auto _e = type(); _o->type = _e; }; -} - -inline flatbuffers::Offset QuantizedAvgPool::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAvgPoolT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedAvgPool(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedAvgPool(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedAvgPoolT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedAvgPoolT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _kernelX = _o->kernelX; - auto _kernelY = _o->kernelY; - auto _modelFormat = _o->modelFormat; - auto _outputActivationMax = _o->outputActivationMax; - auto _outputActivationMin = _o->outputActivationMin; - auto _padType = _o->padType; - auto _padX = _o->padX; - auto _padY = _o->padY; - auto _strideX = _o->strideX; - auto _strideY = _o->strideY; - auto _type = _o->type; - return MNN::CreateQuantizedAvgPool( - _fbb, - _kernelX, - _kernelY, - _modelFormat, - _outputActivationMax, - _outputActivationMin, - _padType, - _padX, - _padY, - _strideX, - _strideY, - _type); -} - -inline QuantizedBiasAddT *QuantizedBiasAdd::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedBiasAddT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedBiasAdd::UnPackTo(QuantizedBiasAddT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; - { auto _e = inputType(); _o->inputType = _e; }; - { auto _e = max(); _o->max = _e; }; - { auto _e = min(); _o->min = _e; }; - { auto _e = outputType(); _o->outputType = _e; }; -} - -inline flatbuffers::Offset QuantizedBiasAdd::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedBiasAddT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedBiasAdd(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedBiasAdd(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedBiasAddT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedBiasAddT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; - auto _inputType = _o->inputType; - auto _max = _o->max; - auto _min = _o->min; - auto _outputType = _o->outputType; - return MNN::CreateQuantizedBiasAdd( - _fbb, - _bias, - _inputType, - _max, - _min, - _outputType); -} - -inline QuantizedConcatT *QuantizedConcat::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedConcatT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedConcat::UnPackTo(QuantizedConcatT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = activationType(); _o->activationType = _e; }; - { auto _e = axis(); _o->axis = _e; }; - { auto _e = inputScale(); if (_e) { _o->inputScale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputScale[_i] = _e->Get(_i); } } }; - { auto _e = inputZeroPoint(); if (_e) { _o->inputZeroPoint.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputZeroPoint[_i] = _e->Get(_i); } } }; - { auto _e = outputQuantizedParam(); if (_e) _o->outputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; -} - -inline flatbuffers::Offset QuantizedConcat::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedConcatT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedConcat(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedConcat(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedConcatT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedConcatT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _activationType = _o->activationType; - auto _axis = _o->axis; - auto _inputScale = _o->inputScale.size() ? _fbb.CreateVector(_o->inputScale) : 0; - auto _inputZeroPoint = _o->inputZeroPoint.size() ? _fbb.CreateVector(_o->inputZeroPoint) : 0; - auto _outputQuantizedParam = _o->outputQuantizedParam ? CreateQuantizedParam(_fbb, _o->outputQuantizedParam.get(), _rehasher) : 0; - return MNN::CreateQuantizedConcat( - _fbb, - _activationType, - _axis, - _inputScale, - _inputZeroPoint, - _outputQuantizedParam); -} - -inline QuantizedLogisticT *QuantizedLogistic::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedLogisticT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedLogistic::UnPackTo(QuantizedLogisticT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = inputQuantizedParam(); if (_e) _o->inputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = outputQuantizedParam(); if (_e) _o->outputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; -} - -inline flatbuffers::Offset QuantizedLogistic::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedLogisticT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedLogistic(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedLogistic(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedLogisticT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedLogisticT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _inputQuantizedParam = _o->inputQuantizedParam ? CreateQuantizedParam(_fbb, _o->inputQuantizedParam.get(), _rehasher) : 0; - auto _outputQuantizedParam = _o->outputQuantizedParam ? CreateQuantizedParam(_fbb, _o->outputQuantizedParam.get(), _rehasher) : 0; - return MNN::CreateQuantizedLogistic( - _fbb, - _inputQuantizedParam, - _outputQuantizedParam); -} - -inline QuantizedMatMulT *QuantizedMatMul::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedMatMulT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedMatMul::UnPackTo(QuantizedMatMulT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = transposeA(); _o->transposeA = _e; }; - { auto _e = transposeB(); _o->transposeB = _e; }; -} - -inline flatbuffers::Offset QuantizedMatMul::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMatMulT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedMatMul(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedMatMul(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMatMulT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedMatMulT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _transposeA = _o->transposeA; - auto _transposeB = _o->transposeB; - return MNN::CreateQuantizedMatMul( - _fbb, - _transposeA, - _transposeB); -} - -inline QuantizedMaxPoolT *QuantizedMaxPool::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedMaxPoolT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedMaxPool::UnPackTo(QuantizedMaxPoolT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = kernelX(); _o->kernelX = _e; }; - { auto _e = kernelY(); _o->kernelY = _e; }; - { auto _e = modelFormat(); _o->modelFormat = _e; }; - { auto _e = outputActivationMax(); _o->outputActivationMax = _e; }; - { auto _e = outputActivationMin(); _o->outputActivationMin = _e; }; - { auto _e = padType(); _o->padType = _e; }; - { auto _e = padX(); _o->padX = _e; }; - { auto _e = padY(); _o->padY = _e; }; - { auto _e = strideX(); _o->strideX = _e; }; - { auto _e = strideY(); _o->strideY = _e; }; - { auto _e = type(); _o->type = _e; }; -} - -inline flatbuffers::Offset QuantizedMaxPool::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMaxPoolT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedMaxPool(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedMaxPool(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedMaxPoolT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedMaxPoolT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _kernelX = _o->kernelX; - auto _kernelY = _o->kernelY; - auto _modelFormat = _o->modelFormat; - auto _outputActivationMax = _o->outputActivationMax; - auto _outputActivationMin = _o->outputActivationMin; - auto _padType = _o->padType; - auto _padX = _o->padX; - auto _padY = _o->padY; - auto _strideX = _o->strideX; - auto _strideY = _o->strideY; - auto _type = _o->type; - return MNN::CreateQuantizedMaxPool( - _fbb, - _kernelX, - _kernelY, - _modelFormat, - _outputActivationMax, - _outputActivationMin, - _padType, - _padX, - _padY, - _strideX, - _strideY, - _type); -} - -inline QuantizedReluT *QuantizedRelu::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedReluT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedRelu::UnPackTo(QuantizedReluT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = type(); _o->type = _e; }; -} - -inline flatbuffers::Offset QuantizedRelu::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReluT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedRelu(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedRelu(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReluT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedReluT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _type = _o->type; - return MNN::CreateQuantizedRelu( - _fbb, - _type); -} - -inline QuantizedRelu6T *QuantizedRelu6::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedRelu6T(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedRelu6::UnPackTo(QuantizedRelu6T *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = type(); _o->type = _e; }; -} - -inline flatbuffers::Offset QuantizedRelu6::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedRelu6T* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedRelu6(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedRelu6(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedRelu6T *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedRelu6T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _type = _o->type; - return MNN::CreateQuantizedRelu6( - _fbb, - _type); -} - -inline QuantizedReshapeT *QuantizedReshape::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedReshapeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedReshape::UnPackTo(QuantizedReshapeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = dims(); if (_e) { _o->dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dims[_i] = _e->Get(_i); } } }; - { auto _e = modelFormat(); _o->modelFormat = _e; }; -} - -inline flatbuffers::Offset QuantizedReshape::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReshapeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedReshape(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedReshape(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedReshapeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedReshapeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _dims = _o->dims.size() ? _fbb.CreateVector(_o->dims) : 0; - auto _modelFormat = _o->modelFormat; - return MNN::CreateQuantizedReshape( - _fbb, - _dims, - _modelFormat); -} - -inline QuantizedSoftmaxT *QuantizedSoftmax::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizedSoftmaxT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizedSoftmax::UnPackTo(QuantizedSoftmaxT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = beta(); _o->beta = _e; }; - { auto _e = inputScale(); _o->inputScale = _e; }; -} - -inline flatbuffers::Offset QuantizedSoftmax::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedSoftmaxT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizedSoftmax(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizedSoftmax(flatbuffers::FlatBufferBuilder &_fbb, const QuantizedSoftmaxT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizedSoftmaxT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _beta = _o->beta; - auto _inputScale = _o->inputScale; - return MNN::CreateQuantizedSoftmax( - _fbb, - _beta, - _inputScale); -} - -inline QuantizeV2T *QuantizeV2::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizeV2T(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizeV2::UnPackTo(QuantizeV2T *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = type(); _o->type = _e; }; - { auto _e = mode(); _o->mode = _e; }; - { auto _e = roundMode(); _o->roundMode = _e; }; -} - -inline flatbuffers::Offset QuantizeV2::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeV2T* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizeV2(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizeV2(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeV2T *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizeV2T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _type = _o->type; - auto _mode = _o->mode; - auto _roundMode = _o->roundMode; - return MNN::CreateQuantizeV2( - _fbb, - _type, - _mode, - _roundMode); -} - -inline RequantizationRangeT *RequantizationRange::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RequantizationRangeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void RequantizationRange::UnPackTo(RequantizationRangeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset RequantizationRange::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RequantizationRangeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRequantizationRange(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRequantizationRange(flatbuffers::FlatBufferBuilder &_fbb, const RequantizationRangeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RequantizationRangeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return MNN::CreateRequantizationRange( - _fbb); -} - -inline RequantizeT *Requantize::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RequantizeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Requantize::UnPackTo(RequantizeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset Requantize::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RequantizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRequantize(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRequantize(flatbuffers::FlatBufferBuilder &_fbb, const RequantizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RequantizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return MNN::CreateRequantize( - _fbb); -} - -inline TfQuantizedConv2DT *TfQuantizedConv2D::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TfQuantizedConv2DT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TfQuantizedConv2D::UnPackTo(TfQuantizedConv2DT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; - { auto _e = biasflag(); _o->biasflag = _e; }; - { auto _e = common(); if (_e) _o->common = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } }; - { auto _e = activationType(); _o->activationType = _e; }; - { auto _e = multiplier(); _o->multiplier = _e; }; - { auto _e = outMax(); _o->outMax = _e; }; - { auto _e = outMin(); _o->outMin = _e; }; - { auto _e = shift(); _o->shift = _e; }; - { auto _e = biasQuantizedParam(); if (_e) _o->biasQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = depthMultiplier(); _o->depthMultiplier = _e; }; - { auto _e = filterQuantizedParam(); if (_e) _o->filterQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = inputQuantizedParam(); if (_e) _o->inputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = modelFormat(); _o->modelFormat = _e; }; - { auto _e = outputQuantizedParam(); if (_e) _o->outputQuantizedParam = std::unique_ptr(_e->UnPack(_resolver)); }; -} - -inline flatbuffers::Offset TfQuantizedConv2D::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TfQuantizedConv2DT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTfQuantizedConv2D(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTfQuantizedConv2D(flatbuffers::FlatBufferBuilder &_fbb, const TfQuantizedConv2DT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TfQuantizedConv2DT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; - auto _biasflag = _o->biasflag; - auto _common = _o->common ? CreateConvolution2DCommon(_fbb, _o->common.get(), _rehasher) : 0; - auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0; - auto _activationType = _o->activationType; - auto _multiplier = _o->multiplier; - auto _outMax = _o->outMax; - auto _outMin = _o->outMin; - auto _shift = _o->shift; - auto _biasQuantizedParam = _o->biasQuantizedParam ? CreateQuantizedParam(_fbb, _o->biasQuantizedParam.get(), _rehasher) : 0; - auto _depthMultiplier = _o->depthMultiplier; - auto _filterQuantizedParam = _o->filterQuantizedParam ? CreateQuantizedParam(_fbb, _o->filterQuantizedParam.get(), _rehasher) : 0; - auto _inputQuantizedParam = _o->inputQuantizedParam ? CreateQuantizedParam(_fbb, _o->inputQuantizedParam.get(), _rehasher) : 0; - auto _modelFormat = _o->modelFormat; - auto _outputQuantizedParam = _o->outputQuantizedParam ? CreateQuantizedParam(_fbb, _o->outputQuantizedParam.get(), _rehasher) : 0; - return MNN::CreateTfQuantizedConv2D( - _fbb, - _bias, - _biasflag, - _common, - _weight, - _activationType, - _multiplier, - _outMax, - _outMin, - _shift, - _biasQuantizedParam, - _depthMultiplier, - _filterQuantizedParam, - _inputQuantizedParam, - _modelFormat, - _outputQuantizedParam); -} - -inline const flatbuffers::TypeTable *FusedActivationTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - FusedActivationTypeTable - }; - static const char * const names[] = { - "kTfLiteActNone", - "kTfLiteActRelu", - "kTfLiteActRelu1", - "kTfLiteActRelu6", - "kTfLiteActTanh", - "kTfLiteActSignBit", - "kTfLiteActSigmoid" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 7, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ModeFormatTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ModeFormatTypeTable - }; - static const char * const names[] = { - "TENSORFLOW", - "TFLITE" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizeModeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - QuantizeModeTypeTable - }; - static const char * const names[] = { - "MIN_COMBINED", - "MIN_FIRST", - "SCALED" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizeRoundModeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - QuantizeRoundModeTypeTable - }; - static const char * const names[] = { - "HALF_AWAY_FROM_ZERO", - "HALF_TO_EVEN" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "zeroPoint", - "scale" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedAddTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 1 }, - { flatbuffers::ET_SEQUENCE, 0, 1 }, - { flatbuffers::ET_SEQUENCE, 0, 1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - FusedActivationTypeTable, - QuantizedParamTypeTable - }; - static const char * const names[] = { - "activationType", - "input1QuantizedParam", - "input2QuantizedParam", - "outputQuantizedParam" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *DequantizeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 1 }, - { flatbuffers::ET_CHAR, 0, 2 }, - { flatbuffers::ET_INT, 0, 3 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - QuantizedParamTypeTable, - QuantizeModeTypeTable, - ModeFormatTypeTable, - DataTypeTypeTable - }; - static const char * const names[] = { - "inputQuantizedParam", - "mode", - "modelFormat", - "type" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedAvgPoolTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, 2 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ModeFormatTypeTable, - PoolPadTypeTypeTable, - DataTypeTypeTable - }; - static const char * const names[] = { - "kernelX", - "kernelY", - "modelFormat", - "outputActivationMax", - "outputActivationMin", - "padType", - "padX", - "padY", - "strideX", - "strideY", - "type" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 11, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedBiasAddTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "bias", - "inputType", - "max", - "min", - "outputType" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedConcatTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - FusedActivationTypeTable, - QuantizedParamTypeTable - }; - static const char * const names[] = { - "activationType", - "axis", - "inputScale", - "inputZeroPoint", - "outputQuantizedParam" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedLogisticTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - QuantizedParamTypeTable - }; - static const char * const names[] = { - "inputQuantizedParam", - "outputQuantizedParam" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedMatMulTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const char * const names[] = { - "transposeA", - "transposeB" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedMaxPoolTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, 2 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ModeFormatTypeTable, - PoolPadTypeTypeTable, - DataTypeTypeTable - }; - static const char * const names[] = { - "kernelX", - "kernelY", - "modelFormat", - "outputActivationMax", - "outputActivationMin", - "padType", - "padX", - "padY", - "strideX", - "strideY", - "type" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 11, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedReluTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "type" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedRelu6TypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "type" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedReshapeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ModeFormatTypeTable - }; - static const char * const names[] = { - "dims", - "modelFormat" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizedSoftmaxTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "beta", - "inputScale" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizeV2TypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 1 }, - { flatbuffers::ET_CHAR, 0, 2 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable, - QuantizeModeTypeTable, - QuantizeRoundModeTypeTable - }; - static const char * const names[] = { - "type", - "mode", - "roundMode" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *RequantizationRangeTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; -} - -inline const flatbuffers::TypeTable *RequantizeTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; -} - -inline const flatbuffers::TypeTable *TfQuantizedConv2DTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_UCHAR, 1, -1 }, - { flatbuffers::ET_CHAR, 0, 1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 2 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 2 }, - { flatbuffers::ET_SEQUENCE, 0, 2 }, - { flatbuffers::ET_CHAR, 0, 3 }, - { flatbuffers::ET_SEQUENCE, 0, 2 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - Convolution2DCommonTypeTable, - FusedActivationTypeTable, - QuantizedParamTypeTable, - ModeFormatTypeTable - }; - static const char * const names[] = { - "bias", - "biasflag", - "common", - "weight", - "activationType", - "multiplier", - "outMax", - "outMin", - "shift", - "biasQuantizedParam", - "depthMultiplier", - "filterQuantizedParam", - "inputQuantizedParam", - "modelFormat", - "outputQuantizedParam" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 15, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -} // namespace MNN - -#endif // FLATBUFFERS_GENERATED_TFQUANTIZEOP_MNN_H_ diff --git a/schema/current/Tensor_generated.h b/schema/current/Tensor_generated.h deleted file mode 100644 index f12b7b9f..00000000 --- a/schema/current/Tensor_generated.h +++ /dev/null @@ -1,793 +0,0 @@ -// automatically generated by the FlatBuffers compiler, do not modify - - -#ifndef FLATBUFFERS_GENERATED_TENSOR_MNN_H_ -#define FLATBUFFERS_GENERATED_TENSOR_MNN_H_ - - -#include "Type_generated.h" - -namespace MNN { - -struct Blob; -struct BlobT; - -struct ListValue; -struct ListValueT; - -struct Attribute; -struct AttributeT; - -inline const flatbuffers::TypeTable *BlobTypeTable(); - -inline const flatbuffers::TypeTable *ListValueTypeTable(); - -inline const flatbuffers::TypeTable *AttributeTypeTable(); - -enum MNN_DATA_FORMAT { - MNN_DATA_FORMAT_NCHW = 0, - MNN_DATA_FORMAT_NHWC = 1, - MNN_DATA_FORMAT_NC4HW4 = 2, - MNN_DATA_FORMAT_NHWC4 = 3, - MNN_DATA_FORMAT_UNKNOWN = 4, - MNN_DATA_FORMAT_MIN = MNN_DATA_FORMAT_NCHW, - MNN_DATA_FORMAT_MAX = MNN_DATA_FORMAT_UNKNOWN -}; - -inline const MNN_DATA_FORMAT (&EnumValuesMNN_DATA_FORMAT())[5] { - static const MNN_DATA_FORMAT values[] = { - MNN_DATA_FORMAT_NCHW, - MNN_DATA_FORMAT_NHWC, - MNN_DATA_FORMAT_NC4HW4, - MNN_DATA_FORMAT_NHWC4, - MNN_DATA_FORMAT_UNKNOWN - }; - return values; -} - -inline const char * const *EnumNamesMNN_DATA_FORMAT() { - static const char * const names[] = { - "NCHW", - "NHWC", - "NC4HW4", - "NHWC4", - "UNKNOWN", - nullptr - }; - return names; -} - -inline const char *EnumNameMNN_DATA_FORMAT(MNN_DATA_FORMAT e) { - if (e < MNN_DATA_FORMAT_NCHW || e > MNN_DATA_FORMAT_UNKNOWN) return ""; - const size_t index = static_cast(e); - return EnumNamesMNN_DATA_FORMAT()[index]; -} - -struct BlobT : public flatbuffers::NativeTable { - typedef Blob TableType; - std::vector dims; - MNN_DATA_FORMAT dataFormat; - DataType dataType; - std::vector uint8s; - std::vector int8s; - std::vector int32s; - std::vector int64s; - std::vector float32s; - std::vector strings; - BlobT() - : dataFormat(MNN_DATA_FORMAT_NCHW), - dataType(DataType_DT_FLOAT) { - } -}; - -struct Blob FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BlobT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return BlobTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DIMS = 4, - VT_DATAFORMAT = 6, - VT_DATATYPE = 8, - VT_UINT8S = 10, - VT_INT8S = 12, - VT_INT32S = 14, - VT_INT64S = 16, - VT_FLOAT32S = 18, - VT_STRINGS = 20 - }; - const flatbuffers::Vector *dims() const { - return GetPointer *>(VT_DIMS); - } - MNN_DATA_FORMAT dataFormat() const { - return static_cast(GetField(VT_DATAFORMAT, 0)); - } - DataType dataType() const { - return static_cast(GetField(VT_DATATYPE, 1)); - } - const flatbuffers::Vector *uint8s() const { - return GetPointer *>(VT_UINT8S); - } - const flatbuffers::Vector *int8s() const { - return GetPointer *>(VT_INT8S); - } - const flatbuffers::Vector *int32s() const { - return GetPointer *>(VT_INT32S); - } - const flatbuffers::Vector *int64s() const { - return GetPointer *>(VT_INT64S); - } - const flatbuffers::Vector *float32s() const { - return GetPointer *>(VT_FLOAT32S); - } - const flatbuffers::Vector> *strings() const { - return GetPointer> *>(VT_STRINGS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_DIMS) && - verifier.VerifyVector(dims()) && - VerifyField(verifier, VT_DATAFORMAT) && - VerifyField(verifier, VT_DATATYPE) && - VerifyOffset(verifier, VT_UINT8S) && - verifier.VerifyVector(uint8s()) && - VerifyOffset(verifier, VT_INT8S) && - verifier.VerifyVector(int8s()) && - VerifyOffset(verifier, VT_INT32S) && - verifier.VerifyVector(int32s()) && - VerifyOffset(verifier, VT_INT64S) && - verifier.VerifyVector(int64s()) && - VerifyOffset(verifier, VT_FLOAT32S) && - verifier.VerifyVector(float32s()) && - VerifyOffset(verifier, VT_STRINGS) && - verifier.VerifyVector(strings()) && - verifier.VerifyVectorOfStrings(strings()) && - verifier.EndTable(); - } - BlobT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BlobT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BlobT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BlobBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_dims(flatbuffers::Offset> dims) { - fbb_.AddOffset(Blob::VT_DIMS, dims); - } - void add_dataFormat(MNN_DATA_FORMAT dataFormat) { - fbb_.AddElement(Blob::VT_DATAFORMAT, static_cast(dataFormat), 0); - } - void add_dataType(DataType dataType) { - fbb_.AddElement(Blob::VT_DATATYPE, static_cast(dataType), 1); - } - void add_uint8s(flatbuffers::Offset> uint8s) { - fbb_.AddOffset(Blob::VT_UINT8S, uint8s); - } - void add_int8s(flatbuffers::Offset> int8s) { - fbb_.AddOffset(Blob::VT_INT8S, int8s); - } - void add_int32s(flatbuffers::Offset> int32s) { - fbb_.AddOffset(Blob::VT_INT32S, int32s); - } - void add_int64s(flatbuffers::Offset> int64s) { - fbb_.AddOffset(Blob::VT_INT64S, int64s); - } - void add_float32s(flatbuffers::Offset> float32s) { - fbb_.AddOffset(Blob::VT_FLOAT32S, float32s); - } - void add_strings(flatbuffers::Offset>> strings) { - fbb_.AddOffset(Blob::VT_STRINGS, strings); - } - explicit BlobBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BlobBuilder &operator=(const BlobBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBlob( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> dims = 0, - MNN_DATA_FORMAT dataFormat = MNN_DATA_FORMAT_NCHW, - DataType dataType = DataType_DT_FLOAT, - flatbuffers::Offset> uint8s = 0, - flatbuffers::Offset> int8s = 0, - flatbuffers::Offset> int32s = 0, - flatbuffers::Offset> int64s = 0, - flatbuffers::Offset> float32s = 0, - flatbuffers::Offset>> strings = 0) { - BlobBuilder builder_(_fbb); - builder_.add_strings(strings); - builder_.add_float32s(float32s); - builder_.add_int64s(int64s); - builder_.add_int32s(int32s); - builder_.add_int8s(int8s); - builder_.add_uint8s(uint8s); - builder_.add_dataType(dataType); - builder_.add_dims(dims); - builder_.add_dataFormat(dataFormat); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateBlobDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *dims = nullptr, - MNN_DATA_FORMAT dataFormat = MNN_DATA_FORMAT_NCHW, - DataType dataType = DataType_DT_FLOAT, - const std::vector *uint8s = nullptr, - const std::vector *int8s = nullptr, - const std::vector *int32s = nullptr, - const std::vector *int64s = nullptr, - const std::vector *float32s = nullptr, - const std::vector> *strings = nullptr) { - auto dims__ = dims ? _fbb.CreateVector(*dims) : 0; - auto uint8s__ = uint8s ? _fbb.CreateVector(*uint8s) : 0; - auto int8s__ = int8s ? _fbb.CreateVector(*int8s) : 0; - auto int32s__ = int32s ? _fbb.CreateVector(*int32s) : 0; - auto int64s__ = int64s ? _fbb.CreateVector(*int64s) : 0; - auto float32s__ = float32s ? _fbb.CreateVector(*float32s) : 0; - auto strings__ = strings ? _fbb.CreateVector>(*strings) : 0; - return MNN::CreateBlob( - _fbb, - dims__, - dataFormat, - dataType, - uint8s__, - int8s__, - int32s__, - int64s__, - float32s__, - strings__); -} - -flatbuffers::Offset CreateBlob(flatbuffers::FlatBufferBuilder &_fbb, const BlobT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ListValueT : public flatbuffers::NativeTable { - typedef ListValue TableType; - std::vector s; - std::vector i; - std::vector f; - std::vector b; - std::vector type; - ListValueT() { - } -}; - -struct ListValue FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ListValueT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ListValueTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_S = 4, - VT_I = 6, - VT_F = 8, - VT_B = 10, - VT_TYPE = 12 - }; - const flatbuffers::Vector> *s() const { - return GetPointer> *>(VT_S); - } - const flatbuffers::Vector *i() const { - return GetPointer *>(VT_I); - } - const flatbuffers::Vector *f() const { - return GetPointer *>(VT_F); - } - const flatbuffers::Vector *b() const { - return GetPointer *>(VT_B); - } - const flatbuffers::Vector *type() const { - return GetPointer *>(VT_TYPE); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_S) && - verifier.VerifyVector(s()) && - verifier.VerifyVectorOfStrings(s()) && - VerifyOffset(verifier, VT_I) && - verifier.VerifyVector(i()) && - VerifyOffset(verifier, VT_F) && - verifier.VerifyVector(f()) && - VerifyOffset(verifier, VT_B) && - verifier.VerifyVector(b()) && - VerifyOffset(verifier, VT_TYPE) && - verifier.VerifyVector(type()) && - verifier.EndTable(); - } - ListValueT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ListValueT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ListValueT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ListValueBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_s(flatbuffers::Offset>> s) { - fbb_.AddOffset(ListValue::VT_S, s); - } - void add_i(flatbuffers::Offset> i) { - fbb_.AddOffset(ListValue::VT_I, i); - } - void add_f(flatbuffers::Offset> f) { - fbb_.AddOffset(ListValue::VT_F, f); - } - void add_b(flatbuffers::Offset> b) { - fbb_.AddOffset(ListValue::VT_B, b); - } - void add_type(flatbuffers::Offset> type) { - fbb_.AddOffset(ListValue::VT_TYPE, type); - } - explicit ListValueBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ListValueBuilder &operator=(const ListValueBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateListValue( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset>> s = 0, - flatbuffers::Offset> i = 0, - flatbuffers::Offset> f = 0, - flatbuffers::Offset> b = 0, - flatbuffers::Offset> type = 0) { - ListValueBuilder builder_(_fbb); - builder_.add_type(type); - builder_.add_b(b); - builder_.add_f(f); - builder_.add_i(i); - builder_.add_s(s); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateListValueDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector> *s = nullptr, - const std::vector *i = nullptr, - const std::vector *f = nullptr, - const std::vector *b = nullptr, - const std::vector *type = nullptr) { - auto s__ = s ? _fbb.CreateVector>(*s) : 0; - auto i__ = i ? _fbb.CreateVector(*i) : 0; - auto f__ = f ? _fbb.CreateVector(*f) : 0; - auto b__ = b ? _fbb.CreateVector(*b) : 0; - auto type__ = type ? _fbb.CreateVector(*type) : 0; - return MNN::CreateListValue( - _fbb, - s__, - i__, - f__, - b__, - type__); -} - -flatbuffers::Offset CreateListValue(flatbuffers::FlatBufferBuilder &_fbb, const ListValueT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AttributeT : public flatbuffers::NativeTable { - typedef Attribute TableType; - std::string s; - int32_t i; - bool b; - std::string key; - DataType type; - float f; - std::unique_ptr tensor; - std::unique_ptr list; - AttributeT() - : i(0), - b(false), - type(DataType_DT_INVALID), - f(0.0f) { - } -}; - -struct Attribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AttributeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return AttributeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_S = 4, - VT_I = 6, - VT_B = 8, - VT_KEY = 10, - VT_TYPE = 12, - VT_F = 14, - VT_TENSOR = 16, - VT_LIST = 18 - }; - const flatbuffers::String *s() const { - return GetPointer(VT_S); - } - int32_t i() const { - return GetField(VT_I, 0); - } - bool b() const { - return GetField(VT_B, 0) != 0; - } - const flatbuffers::String *key() const { - return GetPointer(VT_KEY); - } - DataType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - float f() const { - return GetField(VT_F, 0.0f); - } - const Blob *tensor() const { - return GetPointer(VT_TENSOR); - } - const ListValue *list() const { - return GetPointer(VT_LIST); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_S) && - verifier.VerifyString(s()) && - VerifyField(verifier, VT_I) && - VerifyField(verifier, VT_B) && - VerifyOffset(verifier, VT_KEY) && - verifier.VerifyString(key()) && - VerifyField(verifier, VT_TYPE) && - VerifyField(verifier, VT_F) && - VerifyOffset(verifier, VT_TENSOR) && - verifier.VerifyTable(tensor()) && - VerifyOffset(verifier, VT_LIST) && - verifier.VerifyTable(list()) && - verifier.EndTable(); - } - AttributeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AttributeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AttributeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AttributeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_s(flatbuffers::Offset s) { - fbb_.AddOffset(Attribute::VT_S, s); - } - void add_i(int32_t i) { - fbb_.AddElement(Attribute::VT_I, i, 0); - } - void add_b(bool b) { - fbb_.AddElement(Attribute::VT_B, static_cast(b), 0); - } - void add_key(flatbuffers::Offset key) { - fbb_.AddOffset(Attribute::VT_KEY, key); - } - void add_type(DataType type) { - fbb_.AddElement(Attribute::VT_TYPE, static_cast(type), 0); - } - void add_f(float f) { - fbb_.AddElement(Attribute::VT_F, f, 0.0f); - } - void add_tensor(flatbuffers::Offset tensor) { - fbb_.AddOffset(Attribute::VT_TENSOR, tensor); - } - void add_list(flatbuffers::Offset list) { - fbb_.AddOffset(Attribute::VT_LIST, list); - } - explicit AttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - AttributeBuilder &operator=(const AttributeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAttribute( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset s = 0, - int32_t i = 0, - bool b = false, - flatbuffers::Offset key = 0, - DataType type = DataType_DT_INVALID, - float f = 0.0f, - flatbuffers::Offset tensor = 0, - flatbuffers::Offset list = 0) { - AttributeBuilder builder_(_fbb); - builder_.add_list(list); - builder_.add_tensor(tensor); - builder_.add_f(f); - builder_.add_type(type); - builder_.add_key(key); - builder_.add_i(i); - builder_.add_s(s); - builder_.add_b(b); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateAttributeDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *s = nullptr, - int32_t i = 0, - bool b = false, - const char *key = nullptr, - DataType type = DataType_DT_INVALID, - float f = 0.0f, - flatbuffers::Offset tensor = 0, - flatbuffers::Offset list = 0) { - auto s__ = s ? _fbb.CreateString(s) : 0; - auto key__ = key ? _fbb.CreateString(key) : 0; - return MNN::CreateAttribute( - _fbb, - s__, - i, - b, - key__, - type, - f, - tensor, - list); -} - -flatbuffers::Offset CreateAttribute(flatbuffers::FlatBufferBuilder &_fbb, const AttributeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -inline BlobT *Blob::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BlobT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Blob::UnPackTo(BlobT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = dims(); if (_e) { _o->dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dims[_i] = _e->Get(_i); } } }; - { auto _e = dataFormat(); _o->dataFormat = _e; }; - { auto _e = dataType(); _o->dataType = _e; }; - { auto _e = uint8s(); if (_e) { _o->uint8s.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->uint8s[_i] = _e->Get(_i); } } }; - { auto _e = int8s(); if (_e) { _o->int8s.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->int8s[_i] = _e->Get(_i); } } }; - { auto _e = int32s(); if (_e) { _o->int32s.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->int32s[_i] = _e->Get(_i); } } }; - { auto _e = int64s(); if (_e) { _o->int64s.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->int64s[_i] = _e->Get(_i); } } }; - { auto _e = float32s(); if (_e) { _o->float32s.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->float32s[_i] = _e->Get(_i); } } }; - { auto _e = strings(); if (_e) { _o->strings.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->strings[_i] = _e->Get(_i)->str(); } } }; -} - -inline flatbuffers::Offset Blob::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BlobT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBlob(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBlob(flatbuffers::FlatBufferBuilder &_fbb, const BlobT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BlobT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _dims = _o->dims.size() ? _fbb.CreateVector(_o->dims) : 0; - auto _dataFormat = _o->dataFormat; - auto _dataType = _o->dataType; - auto _uint8s = _o->uint8s.size() ? _fbb.CreateVector(_o->uint8s) : 0; - auto _int8s = _o->int8s.size() ? _fbb.CreateVector(_o->int8s) : 0; - auto _int32s = _o->int32s.size() ? _fbb.CreateVector(_o->int32s) : 0; - auto _int64s = _o->int64s.size() ? _fbb.CreateVector(_o->int64s) : 0; - auto _float32s = _o->float32s.size() ? _fbb.CreateVector(_o->float32s) : 0; - auto _strings = _o->strings.size() ? _fbb.CreateVectorOfStrings(_o->strings) : 0; - return MNN::CreateBlob( - _fbb, - _dims, - _dataFormat, - _dataType, - _uint8s, - _int8s, - _int32s, - _int64s, - _float32s, - _strings); -} - -inline ListValueT *ListValue::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ListValueT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ListValue::UnPackTo(ListValueT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = s(); if (_e) { _o->s.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->s[_i] = _e->Get(_i)->str(); } } }; - { auto _e = i(); if (_e) { _o->i.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->i[_i] = _e->Get(_i); } } }; - { auto _e = f(); if (_e) { _o->f.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->f[_i] = _e->Get(_i); } } }; - { auto _e = b(); if (_e) { _o->b.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->b[_i] = _e->Get(_i) != 0; } } }; - { auto _e = type(); if (_e) { _o->type.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->type[_i] = static_cast(_e->Get(_i)); } } }; -} - -inline flatbuffers::Offset ListValue::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ListValueT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateListValue(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateListValue(flatbuffers::FlatBufferBuilder &_fbb, const ListValueT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ListValueT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _s = _o->s.size() ? _fbb.CreateVectorOfStrings(_o->s) : 0; - auto _i = _o->i.size() ? _fbb.CreateVector(_o->i) : 0; - auto _f = _o->f.size() ? _fbb.CreateVector(_o->f) : 0; - auto _b = _o->b.size() ? _fbb.CreateVector(_o->b) : 0; - auto _type = _o->type.size() ? _fbb.CreateVectorScalarCast(flatbuffers::data(_o->type), _o->type.size()) : 0; - return MNN::CreateListValue( - _fbb, - _s, - _i, - _f, - _b, - _type); -} - -inline AttributeT *Attribute::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AttributeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Attribute::UnPackTo(AttributeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = s(); if (_e) _o->s = _e->str(); }; - { auto _e = i(); _o->i = _e; }; - { auto _e = b(); _o->b = _e; }; - { auto _e = key(); if (_e) _o->key = _e->str(); }; - { auto _e = type(); _o->type = _e; }; - { auto _e = f(); _o->f = _e; }; - { auto _e = tensor(); if (_e) _o->tensor = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = list(); if (_e) _o->list = std::unique_ptr(_e->UnPack(_resolver)); }; -} - -inline flatbuffers::Offset Attribute::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AttributeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAttribute(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAttribute(flatbuffers::FlatBufferBuilder &_fbb, const AttributeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AttributeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _s = _o->s.empty() ? 0 : _fbb.CreateString(_o->s); - auto _i = _o->i; - auto _b = _o->b; - auto _key = _o->key.empty() ? 0 : _fbb.CreateString(_o->key); - auto _type = _o->type; - auto _f = _o->f; - auto _tensor = _o->tensor ? CreateBlob(_fbb, _o->tensor.get(), _rehasher) : 0; - auto _list = _o->list ? CreateListValue(_fbb, _o->list.get(), _rehasher) : 0; - return MNN::CreateAttribute( - _fbb, - _s, - _i, - _b, - _key, - _type, - _f, - _tensor, - _list); -} - -inline const flatbuffers::TypeTable *MNN_DATA_FORMATTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - MNN_DATA_FORMATTypeTable - }; - static const char * const names[] = { - "NCHW", - "NHWC", - "NC4HW4", - "NHWC4", - "UNKNOWN" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 5, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *BlobTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_INT, 0, 1 }, - { flatbuffers::ET_UCHAR, 1, -1 }, - { flatbuffers::ET_CHAR, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_LONG, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_STRING, 1, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - MNN_DATA_FORMATTypeTable, - DataTypeTypeTable - }; - static const char * const names[] = { - "dims", - "dataFormat", - "dataType", - "uint8s", - "int8s", - "int32s", - "int64s", - "float32s", - "strings" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 9, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ListValueTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_STRING, 1, -1 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_BOOL, 1, -1 }, - { flatbuffers::ET_INT, 1, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "s", - "i", - "f", - "b", - "type" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *AttributeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_STRING, 0, -1 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 1 }, - { flatbuffers::ET_SEQUENCE, 0, 2 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable, - BlobTypeTable, - ListValueTypeTable - }; - static const char * const names[] = { - "s", - "i", - "b", - "key", - "type", - "f", - "tensor", - "list" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 8, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -} // namespace MNN - -#endif // FLATBUFFERS_GENERATED_TENSOR_MNN_H_ diff --git a/schema/current/TensorflowOp_generated.h b/schema/current/TensorflowOp_generated.h deleted file mode 100644 index 8183e4b8..00000000 --- a/schema/current/TensorflowOp_generated.h +++ /dev/null @@ -1,5075 +0,0 @@ -// automatically generated by the FlatBuffers compiler, do not modify - - -#ifndef FLATBUFFERS_GENERATED_TENSORFLOWOP_MNN_H_ -#define FLATBUFFERS_GENERATED_TENSORFLOWOP_MNN_H_ - - -#include "Tensor_generated.h" -#include "Type_generated.h" - -namespace MNN { - -struct BinaryOp; -struct BinaryOpT; - -struct PackParam; -struct PackParamT; - -struct StridedSliceParam; -struct StridedSliceParamT; - -struct SqueezeParam; -struct SqueezeParamT; - -struct CastParam; -struct CastParamT; - -struct ReductionParam; -struct ReductionParamT; - -struct Gather; -struct GatherT; - -struct ExpandDims; -struct ExpandDimsT; - -struct Selu; -struct SeluT; - -struct AsString; -struct AsStringT; - -struct ReduceJoin; -struct ReduceJoinT; - -struct UnaryOp; -struct UnaryOpT; - -struct TopKV2; -struct TopKV2T; - -struct CropAndResize; -struct CropAndResizeT; - -struct Fill; -struct FillT; - -struct GatherV2; -struct GatherV2T; - -struct NonMaxSuppressionV2; -struct NonMaxSuppressionV2T; - -struct Range; -struct RangeT; - -struct Rank; -struct RankT; - -struct Size; -struct SizeT; - -struct Transpose; -struct TransposeT; - -struct SliceTf; -struct SliceTfT; - -struct QuantizeMaxMin; -struct QuantizeMaxMinT; - -struct Crop; -struct CropT; - -struct SpaceBatch; -struct SpaceBatchT; - -struct MatMul; -struct MatMulT; - -struct MomentsParam; -struct MomentsParamT; - -struct RNNParam; -struct RNNParamT; - -struct BatchMatMulParam; -struct BatchMatMulParamT; - -struct DepthSpaceParam; -struct DepthSpaceParamT; - -struct ReverseSequenceParam; -struct ReverseSequenceParamT; - -struct DetectionPostProcessParam; -struct DetectionPostProcessParamT; - -struct OneHotParam; -struct OneHotParamT; - -struct PadParam; -struct PadParamT; - -inline const flatbuffers::TypeTable *BinaryOpTypeTable(); - -inline const flatbuffers::TypeTable *PackParamTypeTable(); - -inline const flatbuffers::TypeTable *StridedSliceParamTypeTable(); - -inline const flatbuffers::TypeTable *SqueezeParamTypeTable(); - -inline const flatbuffers::TypeTable *CastParamTypeTable(); - -inline const flatbuffers::TypeTable *ReductionParamTypeTable(); - -inline const flatbuffers::TypeTable *GatherTypeTable(); - -inline const flatbuffers::TypeTable *ExpandDimsTypeTable(); - -inline const flatbuffers::TypeTable *SeluTypeTable(); - -inline const flatbuffers::TypeTable *AsStringTypeTable(); - -inline const flatbuffers::TypeTable *ReduceJoinTypeTable(); - -inline const flatbuffers::TypeTable *UnaryOpTypeTable(); - -inline const flatbuffers::TypeTable *TopKV2TypeTable(); - -inline const flatbuffers::TypeTable *CropAndResizeTypeTable(); - -inline const flatbuffers::TypeTable *FillTypeTable(); - -inline const flatbuffers::TypeTable *GatherV2TypeTable(); - -inline const flatbuffers::TypeTable *NonMaxSuppressionV2TypeTable(); - -inline const flatbuffers::TypeTable *RangeTypeTable(); - -inline const flatbuffers::TypeTable *RankTypeTable(); - -inline const flatbuffers::TypeTable *SizeTypeTable(); - -inline const flatbuffers::TypeTable *TransposeTypeTable(); - -inline const flatbuffers::TypeTable *SliceTfTypeTable(); - -inline const flatbuffers::TypeTable *QuantizeMaxMinTypeTable(); - -inline const flatbuffers::TypeTable *CropTypeTable(); - -inline const flatbuffers::TypeTable *SpaceBatchTypeTable(); - -inline const flatbuffers::TypeTable *MatMulTypeTable(); - -inline const flatbuffers::TypeTable *MomentsParamTypeTable(); - -inline const flatbuffers::TypeTable *RNNParamTypeTable(); - -inline const flatbuffers::TypeTable *BatchMatMulParamTypeTable(); - -inline const flatbuffers::TypeTable *DepthSpaceParamTypeTable(); - -inline const flatbuffers::TypeTable *ReverseSequenceParamTypeTable(); - -inline const flatbuffers::TypeTable *DetectionPostProcessParamTypeTable(); - -inline const flatbuffers::TypeTable *OneHotParamTypeTable(); - -inline const flatbuffers::TypeTable *PadParamTypeTable(); - -enum BinaryOpOperation { - BinaryOpOperation_ADD = 0, - BinaryOpOperation_SUB = 1, - BinaryOpOperation_MUL = 2, - BinaryOpOperation_DIV = 3, - BinaryOpOperation_MAX_TEMP = 4, - BinaryOpOperation_MIN_TEMP = 5, - BinaryOpOperation_POW = 6, - BinaryOpOperation_REALDIV = 7, - BinaryOpOperation_MINIMUM = 8, - BinaryOpOperation_MAXIMUM = 9, - BinaryOpOperation_GREATER = 10, - BinaryOpOperation_GREATER_EQUAL = 11, - BinaryOpOperation_LESS = 12, - BinaryOpOperation_FLOORDIV = 13, - BinaryOpOperation_SquaredDifference = 14, - BinaryOpOperation_EQUAL = 15, - BinaryOpOperation_LESS_EQUAL = 16, - BinaryOpOperation_FLOORMOD = 17, - BinaryOpOperation_MOD = 19, - BinaryOpOperation_ATAN2 = 20, - BinaryOpOperation_LOGICALOR = 21, - BinaryOpOperation_NOTEQUAL = 22, - BinaryOpOperation_MIN = BinaryOpOperation_ADD, - BinaryOpOperation_MAX = BinaryOpOperation_NOTEQUAL -}; - -inline const BinaryOpOperation (&EnumValuesBinaryOpOperation())[22] { - static const BinaryOpOperation values[] = { - BinaryOpOperation_ADD, - BinaryOpOperation_SUB, - BinaryOpOperation_MUL, - BinaryOpOperation_DIV, - BinaryOpOperation_MAX_TEMP, - BinaryOpOperation_MIN_TEMP, - BinaryOpOperation_POW, - BinaryOpOperation_REALDIV, - BinaryOpOperation_MINIMUM, - BinaryOpOperation_MAXIMUM, - BinaryOpOperation_GREATER, - BinaryOpOperation_GREATER_EQUAL, - BinaryOpOperation_LESS, - BinaryOpOperation_FLOORDIV, - BinaryOpOperation_SquaredDifference, - BinaryOpOperation_EQUAL, - BinaryOpOperation_LESS_EQUAL, - BinaryOpOperation_FLOORMOD, - BinaryOpOperation_MOD, - BinaryOpOperation_ATAN2, - BinaryOpOperation_LOGICALOR, - BinaryOpOperation_NOTEQUAL - }; - return values; -} - -inline const char * const *EnumNamesBinaryOpOperation() { - static const char * const names[] = { - "ADD", - "SUB", - "MUL", - "DIV", - "MAX_TEMP", - "MIN_TEMP", - "POW", - "REALDIV", - "MINIMUM", - "MAXIMUM", - "GREATER", - "GREATER_EQUAL", - "LESS", - "FLOORDIV", - "SquaredDifference", - "EQUAL", - "LESS_EQUAL", - "FLOORMOD", - "", - "MOD", - "ATAN2", - "LOGICALOR", - "NOTEQUAL", - nullptr - }; - return names; -} - -inline const char *EnumNameBinaryOpOperation(BinaryOpOperation e) { - if (e < BinaryOpOperation_ADD || e > BinaryOpOperation_NOTEQUAL) return ""; - const size_t index = static_cast(e); - return EnumNamesBinaryOpOperation()[index]; -} - -enum ReductionType { - ReductionType_SUM = 0, - ReductionType_ASUM = 1, - ReductionType_SUMSQ = 2, - ReductionType_MEAN = 3, - ReductionType_MAXIMUM = 4, - ReductionType_MINIMUM = 5, - ReductionType_PROD = 6, - ReductionType_ANY = 7, - ReductionType_ALL = 8, - ReductionType_MIN = ReductionType_SUM, - ReductionType_MAX = ReductionType_ALL -}; - -inline const ReductionType (&EnumValuesReductionType())[9] { - static const ReductionType values[] = { - ReductionType_SUM, - ReductionType_ASUM, - ReductionType_SUMSQ, - ReductionType_MEAN, - ReductionType_MAXIMUM, - ReductionType_MINIMUM, - ReductionType_PROD, - ReductionType_ANY, - ReductionType_ALL - }; - return values; -} - -inline const char * const *EnumNamesReductionType() { - static const char * const names[] = { - "SUM", - "ASUM", - "SUMSQ", - "MEAN", - "MAXIMUM", - "MINIMUM", - "PROD", - "ANY", - "ALL", - nullptr - }; - return names; -} - -inline const char *EnumNameReductionType(ReductionType e) { - if (e < ReductionType_SUM || e > ReductionType_ALL) return ""; - const size_t index = static_cast(e); - return EnumNamesReductionType()[index]; -} - -enum UnaryOpOperation { - UnaryOpOperation_ABS = 0, - UnaryOpOperation_NEG = 1, - UnaryOpOperation_FLOOR = 2, - UnaryOpOperation_CEIL = 3, - UnaryOpOperation_SQUARE = 4, - UnaryOpOperation_SQRT = 5, - UnaryOpOperation_RSQRT = 6, - UnaryOpOperation_EXP = 7, - UnaryOpOperation_LOG = 8, - UnaryOpOperation_SIN = 9, - UnaryOpOperation_COS = 10, - UnaryOpOperation_TAN = 11, - UnaryOpOperation_ASIN = 12, - UnaryOpOperation_ACOS = 13, - UnaryOpOperation_ATAN = 14, - UnaryOpOperation_RECIPROCAL = 15, - UnaryOpOperation_LOG1P = 16, - UnaryOpOperation_BNLL = 17, - UnaryOpOperation_ACOSH = 18, - UnaryOpOperation_SINH = 19, - UnaryOpOperation_ASINH = 20, - UnaryOpOperation_ATANH = 21, - UnaryOpOperation_SIGN = 22, - UnaryOpOperation_ROUND = 23, - UnaryOpOperation_COSH = 24, - UnaryOpOperation_ERF = 25, - UnaryOpOperation_ERFC = 26, - UnaryOpOperation_ERFINV = 27, - UnaryOpOperation_EXPM1 = 28, - UnaryOpOperation_MIN = UnaryOpOperation_ABS, - UnaryOpOperation_MAX = UnaryOpOperation_EXPM1 -}; - -inline const UnaryOpOperation (&EnumValuesUnaryOpOperation())[29] { - static const UnaryOpOperation values[] = { - UnaryOpOperation_ABS, - UnaryOpOperation_NEG, - UnaryOpOperation_FLOOR, - UnaryOpOperation_CEIL, - UnaryOpOperation_SQUARE, - UnaryOpOperation_SQRT, - UnaryOpOperation_RSQRT, - UnaryOpOperation_EXP, - UnaryOpOperation_LOG, - UnaryOpOperation_SIN, - UnaryOpOperation_COS, - UnaryOpOperation_TAN, - UnaryOpOperation_ASIN, - UnaryOpOperation_ACOS, - UnaryOpOperation_ATAN, - UnaryOpOperation_RECIPROCAL, - UnaryOpOperation_LOG1P, - UnaryOpOperation_BNLL, - UnaryOpOperation_ACOSH, - UnaryOpOperation_SINH, - UnaryOpOperation_ASINH, - UnaryOpOperation_ATANH, - UnaryOpOperation_SIGN, - UnaryOpOperation_ROUND, - UnaryOpOperation_COSH, - UnaryOpOperation_ERF, - UnaryOpOperation_ERFC, - UnaryOpOperation_ERFINV, - UnaryOpOperation_EXPM1 - }; - return values; -} - -inline const char * const *EnumNamesUnaryOpOperation() { - static const char * const names[] = { - "ABS", - "NEG", - "FLOOR", - "CEIL", - "SQUARE", - "SQRT", - "RSQRT", - "EXP", - "LOG", - "SIN", - "COS", - "TAN", - "ASIN", - "ACOS", - "ATAN", - "RECIPROCAL", - "LOG1P", - "BNLL", - "ACOSH", - "SINH", - "ASINH", - "ATANH", - "SIGN", - "ROUND", - "COSH", - "ERF", - "ERFC", - "ERFINV", - "EXPM1", - nullptr - }; - return names; -} - -inline const char *EnumNameUnaryOpOperation(UnaryOpOperation e) { - if (e < UnaryOpOperation_ABS || e > UnaryOpOperation_EXPM1) return ""; - const size_t index = static_cast(e); - return EnumNamesUnaryOpOperation()[index]; -} - -enum CropAndResizeMethod { - CropAndResizeMethod_BILINEAR = 0, - CropAndResizeMethod_NEAREST = 1, - CropAndResizeMethod_MIN = CropAndResizeMethod_BILINEAR, - CropAndResizeMethod_MAX = CropAndResizeMethod_NEAREST -}; - -inline const CropAndResizeMethod (&EnumValuesCropAndResizeMethod())[2] { - static const CropAndResizeMethod values[] = { - CropAndResizeMethod_BILINEAR, - CropAndResizeMethod_NEAREST - }; - return values; -} - -inline const char * const *EnumNamesCropAndResizeMethod() { - static const char * const names[] = { - "BILINEAR", - "NEAREST", - nullptr - }; - return names; -} - -inline const char *EnumNameCropAndResizeMethod(CropAndResizeMethod e) { - if (e < CropAndResizeMethod_BILINEAR || e > CropAndResizeMethod_NEAREST) return ""; - const size_t index = static_cast(e); - return EnumNamesCropAndResizeMethod()[index]; -} - -enum PadValueMode { - PadValueMode_CONSTANT = 0, - PadValueMode_REFLECT = 1, - PadValueMode_SYMMETRIC = 2, - PadValueMode_MIN = PadValueMode_CONSTANT, - PadValueMode_MAX = PadValueMode_SYMMETRIC -}; - -inline const PadValueMode (&EnumValuesPadValueMode())[3] { - static const PadValueMode values[] = { - PadValueMode_CONSTANT, - PadValueMode_REFLECT, - PadValueMode_SYMMETRIC - }; - return values; -} - -inline const char * const *EnumNamesPadValueMode() { - static const char * const names[] = { - "CONSTANT", - "REFLECT", - "SYMMETRIC", - nullptr - }; - return names; -} - -inline const char *EnumNamePadValueMode(PadValueMode e) { - if (e < PadValueMode_CONSTANT || e > PadValueMode_SYMMETRIC) return ""; - const size_t index = static_cast(e); - return EnumNamesPadValueMode()[index]; -} - -struct BinaryOpT : public flatbuffers::NativeTable { - typedef BinaryOp TableType; - int32_t opType; - DataType T; - BinaryOpT() - : opType(0), - T(DataType_DT_FLOAT) { - } -}; - -struct BinaryOp FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BinaryOpT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return BinaryOpTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OPTYPE = 4, - VT_T = 6 - }; - int32_t opType() const { - return GetField(VT_OPTYPE, 0); - } - DataType T() const { - return static_cast(GetField(VT_T, 1)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OPTYPE) && - VerifyField(verifier, VT_T) && - verifier.EndTable(); - } - BinaryOpT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BinaryOpT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BinaryOpT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BinaryOpBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_opType(int32_t opType) { - fbb_.AddElement(BinaryOp::VT_OPTYPE, opType, 0); - } - void add_T(DataType T) { - fbb_.AddElement(BinaryOp::VT_T, static_cast(T), 1); - } - explicit BinaryOpBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BinaryOpBuilder &operator=(const BinaryOpBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBinaryOp( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t opType = 0, - DataType T = DataType_DT_FLOAT) { - BinaryOpBuilder builder_(_fbb); - builder_.add_T(T); - builder_.add_opType(opType); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBinaryOp(flatbuffers::FlatBufferBuilder &_fbb, const BinaryOpT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PackParamT : public flatbuffers::NativeTable { - typedef PackParam TableType; - DataType dataType; - int32_t axis; - PackParamT() - : dataType(DataType_DT_INVALID), - axis(0) { - } -}; - -struct PackParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PackParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return PackParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DATATYPE = 4, - VT_AXIS = 6 - }; - DataType dataType() const { - return static_cast(GetField(VT_DATATYPE, 0)); - } - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_DATATYPE) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); - } - PackParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PackParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PackParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_dataType(DataType dataType) { - fbb_.AddElement(PackParam::VT_DATATYPE, static_cast(dataType), 0); - } - void add_axis(int32_t axis) { - fbb_.AddElement(PackParam::VT_AXIS, axis, 0); - } - explicit PackParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PackParamBuilder &operator=(const PackParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePackParam( - flatbuffers::FlatBufferBuilder &_fbb, - DataType dataType = DataType_DT_INVALID, - int32_t axis = 0) { - PackParamBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_dataType(dataType); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePackParam(flatbuffers::FlatBufferBuilder &_fbb, const PackParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct StridedSliceParamT : public flatbuffers::NativeTable { - typedef StridedSliceParam TableType; - DataType Index; - DataType T; - int32_t beginMask; - int32_t endMask; - int32_t ellipsisMask; - int32_t newAxisMask; - int32_t shrinkAxisMask; - StridedSliceParamT() - : Index(DataType_DT_INVALID), - T(DataType_DT_INVALID), - beginMask(0), - endMask(0), - ellipsisMask(0), - newAxisMask(0), - shrinkAxisMask(0) { - } -}; - -struct StridedSliceParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef StridedSliceParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return StridedSliceParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INDEX = 4, - VT_T = 6, - VT_BEGINMASK = 8, - VT_ENDMASK = 10, - VT_ELLIPSISMASK = 12, - VT_NEWAXISMASK = 14, - VT_SHRINKAXISMASK = 16 - }; - DataType Index() const { - return static_cast(GetField(VT_INDEX, 0)); - } - DataType T() const { - return static_cast(GetField(VT_T, 0)); - } - int32_t beginMask() const { - return GetField(VT_BEGINMASK, 0); - } - int32_t endMask() const { - return GetField(VT_ENDMASK, 0); - } - int32_t ellipsisMask() const { - return GetField(VT_ELLIPSISMASK, 0); - } - int32_t newAxisMask() const { - return GetField(VT_NEWAXISMASK, 0); - } - int32_t shrinkAxisMask() const { - return GetField(VT_SHRINKAXISMASK, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_INDEX) && - VerifyField(verifier, VT_T) && - VerifyField(verifier, VT_BEGINMASK) && - VerifyField(verifier, VT_ENDMASK) && - VerifyField(verifier, VT_ELLIPSISMASK) && - VerifyField(verifier, VT_NEWAXISMASK) && - VerifyField(verifier, VT_SHRINKAXISMASK) && - verifier.EndTable(); - } - StridedSliceParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(StridedSliceParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct StridedSliceParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_Index(DataType Index) { - fbb_.AddElement(StridedSliceParam::VT_INDEX, static_cast(Index), 0); - } - void add_T(DataType T) { - fbb_.AddElement(StridedSliceParam::VT_T, static_cast(T), 0); - } - void add_beginMask(int32_t beginMask) { - fbb_.AddElement(StridedSliceParam::VT_BEGINMASK, beginMask, 0); - } - void add_endMask(int32_t endMask) { - fbb_.AddElement(StridedSliceParam::VT_ENDMASK, endMask, 0); - } - void add_ellipsisMask(int32_t ellipsisMask) { - fbb_.AddElement(StridedSliceParam::VT_ELLIPSISMASK, ellipsisMask, 0); - } - void add_newAxisMask(int32_t newAxisMask) { - fbb_.AddElement(StridedSliceParam::VT_NEWAXISMASK, newAxisMask, 0); - } - void add_shrinkAxisMask(int32_t shrinkAxisMask) { - fbb_.AddElement(StridedSliceParam::VT_SHRINKAXISMASK, shrinkAxisMask, 0); - } - explicit StridedSliceParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - StridedSliceParamBuilder &operator=(const StridedSliceParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateStridedSliceParam( - flatbuffers::FlatBufferBuilder &_fbb, - DataType Index = DataType_DT_INVALID, - DataType T = DataType_DT_INVALID, - int32_t beginMask = 0, - int32_t endMask = 0, - int32_t ellipsisMask = 0, - int32_t newAxisMask = 0, - int32_t shrinkAxisMask = 0) { - StridedSliceParamBuilder builder_(_fbb); - builder_.add_shrinkAxisMask(shrinkAxisMask); - builder_.add_newAxisMask(newAxisMask); - builder_.add_ellipsisMask(ellipsisMask); - builder_.add_endMask(endMask); - builder_.add_beginMask(beginMask); - builder_.add_T(T); - builder_.add_Index(Index); - return builder_.Finish(); -} - -flatbuffers::Offset CreateStridedSliceParam(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SqueezeParamT : public flatbuffers::NativeTable { - typedef SqueezeParam TableType; - std::vector squeezeDims; - SqueezeParamT() { - } -}; - -struct SqueezeParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SqueezeParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SqueezeParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SQUEEZEDIMS = 4 - }; - const flatbuffers::Vector *squeezeDims() const { - return GetPointer *>(VT_SQUEEZEDIMS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_SQUEEZEDIMS) && - verifier.VerifyVector(squeezeDims()) && - verifier.EndTable(); - } - SqueezeParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SqueezeParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SqueezeParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_squeezeDims(flatbuffers::Offset> squeezeDims) { - fbb_.AddOffset(SqueezeParam::VT_SQUEEZEDIMS, squeezeDims); - } - explicit SqueezeParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SqueezeParamBuilder &operator=(const SqueezeParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSqueezeParam( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> squeezeDims = 0) { - SqueezeParamBuilder builder_(_fbb); - builder_.add_squeezeDims(squeezeDims); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSqueezeParamDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *squeezeDims = nullptr) { - auto squeezeDims__ = squeezeDims ? _fbb.CreateVector(*squeezeDims) : 0; - return MNN::CreateSqueezeParam( - _fbb, - squeezeDims__); -} - -flatbuffers::Offset CreateSqueezeParam(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CastParamT : public flatbuffers::NativeTable { - typedef CastParam TableType; - DataType srcT; - DataType dstT; - CastParamT() - : srcT(DataType_DT_INVALID), - dstT(DataType_DT_INVALID) { - } -}; - -struct CastParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CastParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return CastParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SRCT = 4, - VT_DSTT = 6 - }; - DataType srcT() const { - return static_cast(GetField(VT_SRCT, 0)); - } - DataType dstT() const { - return static_cast(GetField(VT_DSTT, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SRCT) && - VerifyField(verifier, VT_DSTT) && - verifier.EndTable(); - } - CastParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CastParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CastParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_srcT(DataType srcT) { - fbb_.AddElement(CastParam::VT_SRCT, static_cast(srcT), 0); - } - void add_dstT(DataType dstT) { - fbb_.AddElement(CastParam::VT_DSTT, static_cast(dstT), 0); - } - explicit CastParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CastParamBuilder &operator=(const CastParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCastParam( - flatbuffers::FlatBufferBuilder &_fbb, - DataType srcT = DataType_DT_INVALID, - DataType dstT = DataType_DT_INVALID) { - CastParamBuilder builder_(_fbb); - builder_.add_dstT(dstT); - builder_.add_srcT(srcT); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCastParam(flatbuffers::FlatBufferBuilder &_fbb, const CastParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReductionParamT : public flatbuffers::NativeTable { - typedef ReductionParam TableType; - ReductionType operation; - std::vector dim; - float coeff; - bool keepDims; - DataType dType; - ReductionParamT() - : operation(ReductionType_SUM), - coeff(0.0f), - keepDims(false), - dType(DataType_DT_FLOAT) { - } -}; - -struct ReductionParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReductionParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ReductionParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OPERATION = 4, - VT_DIM = 6, - VT_COEFF = 8, - VT_KEEPDIMS = 10, - VT_DTYPE = 12 - }; - ReductionType operation() const { - return static_cast(GetField(VT_OPERATION, 0)); - } - const flatbuffers::Vector *dim() const { - return GetPointer *>(VT_DIM); - } - float coeff() const { - return GetField(VT_COEFF, 0.0f); - } - bool keepDims() const { - return GetField(VT_KEEPDIMS, 0) != 0; - } - DataType dType() const { - return static_cast(GetField(VT_DTYPE, 1)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OPERATION) && - VerifyOffset(verifier, VT_DIM) && - verifier.VerifyVector(dim()) && - VerifyField(verifier, VT_COEFF) && - VerifyField(verifier, VT_KEEPDIMS) && - VerifyField(verifier, VT_DTYPE) && - verifier.EndTable(); - } - ReductionParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReductionParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReductionParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReductionParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_operation(ReductionType operation) { - fbb_.AddElement(ReductionParam::VT_OPERATION, static_cast(operation), 0); - } - void add_dim(flatbuffers::Offset> dim) { - fbb_.AddOffset(ReductionParam::VT_DIM, dim); - } - void add_coeff(float coeff) { - fbb_.AddElement(ReductionParam::VT_COEFF, coeff, 0.0f); - } - void add_keepDims(bool keepDims) { - fbb_.AddElement(ReductionParam::VT_KEEPDIMS, static_cast(keepDims), 0); - } - void add_dType(DataType dType) { - fbb_.AddElement(ReductionParam::VT_DTYPE, static_cast(dType), 1); - } - explicit ReductionParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReductionParamBuilder &operator=(const ReductionParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReductionParam( - flatbuffers::FlatBufferBuilder &_fbb, - ReductionType operation = ReductionType_SUM, - flatbuffers::Offset> dim = 0, - float coeff = 0.0f, - bool keepDims = false, - DataType dType = DataType_DT_FLOAT) { - ReductionParamBuilder builder_(_fbb); - builder_.add_dType(dType); - builder_.add_coeff(coeff); - builder_.add_dim(dim); - builder_.add_keepDims(keepDims); - builder_.add_operation(operation); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateReductionParamDirect( - flatbuffers::FlatBufferBuilder &_fbb, - ReductionType operation = ReductionType_SUM, - const std::vector *dim = nullptr, - float coeff = 0.0f, - bool keepDims = false, - DataType dType = DataType_DT_FLOAT) { - auto dim__ = dim ? _fbb.CreateVector(*dim) : 0; - return MNN::CreateReductionParam( - _fbb, - operation, - dim__, - coeff, - keepDims, - dType); -} - -flatbuffers::Offset CreateReductionParam(flatbuffers::FlatBufferBuilder &_fbb, const ReductionParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GatherT : public flatbuffers::NativeTable { - typedef Gather TableType; - DataType Tindices; - DataType Tparams; - bool validateIndices; - int32_t axis; - GatherT() - : Tindices(DataType_DT_INVALID), - Tparams(DataType_DT_INVALID), - validateIndices(false), - axis(0) { - } -}; - -struct Gather FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GatherT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return GatherTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TINDICES = 4, - VT_TPARAMS = 6, - VT_VALIDATEINDICES = 8, - VT_AXIS = 10 - }; - DataType Tindices() const { - return static_cast(GetField(VT_TINDICES, 0)); - } - DataType Tparams() const { - return static_cast(GetField(VT_TPARAMS, 0)); - } - bool validateIndices() const { - return GetField(VT_VALIDATEINDICES, 0) != 0; - } - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TINDICES) && - VerifyField(verifier, VT_TPARAMS) && - VerifyField(verifier, VT_VALIDATEINDICES) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); - } - GatherT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GatherT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GatherBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_Tindices(DataType Tindices) { - fbb_.AddElement(Gather::VT_TINDICES, static_cast(Tindices), 0); - } - void add_Tparams(DataType Tparams) { - fbb_.AddElement(Gather::VT_TPARAMS, static_cast(Tparams), 0); - } - void add_validateIndices(bool validateIndices) { - fbb_.AddElement(Gather::VT_VALIDATEINDICES, static_cast(validateIndices), 0); - } - void add_axis(int32_t axis) { - fbb_.AddElement(Gather::VT_AXIS, axis, 0); - } - explicit GatherBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GatherBuilder &operator=(const GatherBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGather( - flatbuffers::FlatBufferBuilder &_fbb, - DataType Tindices = DataType_DT_INVALID, - DataType Tparams = DataType_DT_INVALID, - bool validateIndices = false, - int32_t axis = 0) { - GatherBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_Tparams(Tparams); - builder_.add_Tindices(Tindices); - builder_.add_validateIndices(validateIndices); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGather(flatbuffers::FlatBufferBuilder &_fbb, const GatherT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ExpandDimsT : public flatbuffers::NativeTable { - typedef ExpandDims TableType; - DataType T; - DataType Tdim; - int32_t axis; - ExpandDimsT() - : T(DataType_DT_INVALID), - Tdim(DataType_DT_INVALID), - axis(0) { - } -}; - -struct ExpandDims FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ExpandDimsT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ExpandDimsTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_T = 4, - VT_TDIM = 6, - VT_AXIS = 8 - }; - DataType T() const { - return static_cast(GetField(VT_T, 0)); - } - DataType Tdim() const { - return static_cast(GetField(VT_TDIM, 0)); - } - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_T) && - VerifyField(verifier, VT_TDIM) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); - } - ExpandDimsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ExpandDimsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ExpandDimsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_T(DataType T) { - fbb_.AddElement(ExpandDims::VT_T, static_cast(T), 0); - } - void add_Tdim(DataType Tdim) { - fbb_.AddElement(ExpandDims::VT_TDIM, static_cast(Tdim), 0); - } - void add_axis(int32_t axis) { - fbb_.AddElement(ExpandDims::VT_AXIS, axis, 0); - } - explicit ExpandDimsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ExpandDimsBuilder &operator=(const ExpandDimsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateExpandDims( - flatbuffers::FlatBufferBuilder &_fbb, - DataType T = DataType_DT_INVALID, - DataType Tdim = DataType_DT_INVALID, - int32_t axis = 0) { - ExpandDimsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_Tdim(Tdim); - builder_.add_T(T); - return builder_.Finish(); -} - -flatbuffers::Offset CreateExpandDims(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SeluT : public flatbuffers::NativeTable { - typedef Selu TableType; - float scale; - float alpha; - SeluT() - : scale(0.0f), - alpha(0.0f) { - } -}; - -struct Selu FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SeluT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SeluTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SCALE = 4, - VT_ALPHA = 6 - }; - float scale() const { - return GetField(VT_SCALE, 0.0f); - } - float alpha() const { - return GetField(VT_ALPHA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SCALE) && - VerifyField(verifier, VT_ALPHA) && - verifier.EndTable(); - } - SeluT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SeluT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SeluT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SeluBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_scale(float scale) { - fbb_.AddElement(Selu::VT_SCALE, scale, 0.0f); - } - void add_alpha(float alpha) { - fbb_.AddElement(Selu::VT_ALPHA, alpha, 0.0f); - } - explicit SeluBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SeluBuilder &operator=(const SeluBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSelu( - flatbuffers::FlatBufferBuilder &_fbb, - float scale = 0.0f, - float alpha = 0.0f) { - SeluBuilder builder_(_fbb); - builder_.add_alpha(alpha); - builder_.add_scale(scale); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSelu(flatbuffers::FlatBufferBuilder &_fbb, const SeluT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AsStringT : public flatbuffers::NativeTable { - typedef AsString TableType; - DataType T; - int32_t precision; - bool scientific; - bool shortest; - int32_t width; - std::string fillString; - AsStringT() - : T(DataType_DT_INVALID), - precision(0), - scientific(false), - shortest(false), - width(0) { - } -}; - -struct AsString FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AsStringT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return AsStringTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_T = 4, - VT_PRECISION = 6, - VT_SCIENTIFIC = 8, - VT_SHORTEST = 10, - VT_WIDTH = 12, - VT_FILLSTRING = 14 - }; - DataType T() const { - return static_cast(GetField(VT_T, 0)); - } - int32_t precision() const { - return GetField(VT_PRECISION, 0); - } - bool scientific() const { - return GetField(VT_SCIENTIFIC, 0) != 0; - } - bool shortest() const { - return GetField(VT_SHORTEST, 0) != 0; - } - int32_t width() const { - return GetField(VT_WIDTH, 0); - } - const flatbuffers::String *fillString() const { - return GetPointer(VT_FILLSTRING); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_T) && - VerifyField(verifier, VT_PRECISION) && - VerifyField(verifier, VT_SCIENTIFIC) && - VerifyField(verifier, VT_SHORTEST) && - VerifyField(verifier, VT_WIDTH) && - VerifyOffset(verifier, VT_FILLSTRING) && - verifier.VerifyString(fillString()) && - verifier.EndTable(); - } - AsStringT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AsStringT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AsStringT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AsStringBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_T(DataType T) { - fbb_.AddElement(AsString::VT_T, static_cast(T), 0); - } - void add_precision(int32_t precision) { - fbb_.AddElement(AsString::VT_PRECISION, precision, 0); - } - void add_scientific(bool scientific) { - fbb_.AddElement(AsString::VT_SCIENTIFIC, static_cast(scientific), 0); - } - void add_shortest(bool shortest) { - fbb_.AddElement(AsString::VT_SHORTEST, static_cast(shortest), 0); - } - void add_width(int32_t width) { - fbb_.AddElement(AsString::VT_WIDTH, width, 0); - } - void add_fillString(flatbuffers::Offset fillString) { - fbb_.AddOffset(AsString::VT_FILLSTRING, fillString); - } - explicit AsStringBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - AsStringBuilder &operator=(const AsStringBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAsString( - flatbuffers::FlatBufferBuilder &_fbb, - DataType T = DataType_DT_INVALID, - int32_t precision = 0, - bool scientific = false, - bool shortest = false, - int32_t width = 0, - flatbuffers::Offset fillString = 0) { - AsStringBuilder builder_(_fbb); - builder_.add_fillString(fillString); - builder_.add_width(width); - builder_.add_precision(precision); - builder_.add_T(T); - builder_.add_shortest(shortest); - builder_.add_scientific(scientific); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateAsStringDirect( - flatbuffers::FlatBufferBuilder &_fbb, - DataType T = DataType_DT_INVALID, - int32_t precision = 0, - bool scientific = false, - bool shortest = false, - int32_t width = 0, - const char *fillString = nullptr) { - auto fillString__ = fillString ? _fbb.CreateString(fillString) : 0; - return MNN::CreateAsString( - _fbb, - T, - precision, - scientific, - shortest, - width, - fillString__); -} - -flatbuffers::Offset CreateAsString(flatbuffers::FlatBufferBuilder &_fbb, const AsStringT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReduceJoinT : public flatbuffers::NativeTable { - typedef ReduceJoin TableType; - bool keepDims; - std::string separator; - ReduceJoinT() - : keepDims(false) { - } -}; - -struct ReduceJoin FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReduceJoinT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ReduceJoinTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_KEEPDIMS = 4, - VT_SEPARATOR = 6 - }; - bool keepDims() const { - return GetField(VT_KEEPDIMS, 0) != 0; - } - const flatbuffers::String *separator() const { - return GetPointer(VT_SEPARATOR); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_KEEPDIMS) && - VerifyOffset(verifier, VT_SEPARATOR) && - verifier.VerifyString(separator()) && - verifier.EndTable(); - } - ReduceJoinT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReduceJoinT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReduceJoinT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReduceJoinBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_keepDims(bool keepDims) { - fbb_.AddElement(ReduceJoin::VT_KEEPDIMS, static_cast(keepDims), 0); - } - void add_separator(flatbuffers::Offset separator) { - fbb_.AddOffset(ReduceJoin::VT_SEPARATOR, separator); - } - explicit ReduceJoinBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReduceJoinBuilder &operator=(const ReduceJoinBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReduceJoin( - flatbuffers::FlatBufferBuilder &_fbb, - bool keepDims = false, - flatbuffers::Offset separator = 0) { - ReduceJoinBuilder builder_(_fbb); - builder_.add_separator(separator); - builder_.add_keepDims(keepDims); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateReduceJoinDirect( - flatbuffers::FlatBufferBuilder &_fbb, - bool keepDims = false, - const char *separator = nullptr) { - auto separator__ = separator ? _fbb.CreateString(separator) : 0; - return MNN::CreateReduceJoin( - _fbb, - keepDims, - separator__); -} - -flatbuffers::Offset CreateReduceJoin(flatbuffers::FlatBufferBuilder &_fbb, const ReduceJoinT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UnaryOpT : public flatbuffers::NativeTable { - typedef UnaryOp TableType; - UnaryOpOperation opType; - DataType T; - UnaryOpT() - : opType(UnaryOpOperation_ABS), - T(DataType_DT_INVALID) { - } -}; - -struct UnaryOp FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnaryOpT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return UnaryOpTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OPTYPE = 4, - VT_T = 6 - }; - UnaryOpOperation opType() const { - return static_cast(GetField(VT_OPTYPE, 0)); - } - DataType T() const { - return static_cast(GetField(VT_T, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OPTYPE) && - VerifyField(verifier, VT_T) && - verifier.EndTable(); - } - UnaryOpT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnaryOpT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnaryOpT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UnaryOpBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_opType(UnaryOpOperation opType) { - fbb_.AddElement(UnaryOp::VT_OPTYPE, static_cast(opType), 0); - } - void add_T(DataType T) { - fbb_.AddElement(UnaryOp::VT_T, static_cast(T), 0); - } - explicit UnaryOpBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - UnaryOpBuilder &operator=(const UnaryOpBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUnaryOp( - flatbuffers::FlatBufferBuilder &_fbb, - UnaryOpOperation opType = UnaryOpOperation_ABS, - DataType T = DataType_DT_INVALID) { - UnaryOpBuilder builder_(_fbb); - builder_.add_T(T); - builder_.add_opType(opType); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUnaryOp(flatbuffers::FlatBufferBuilder &_fbb, const UnaryOpT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TopKV2T : public flatbuffers::NativeTable { - typedef TopKV2 TableType; - DataType T; - bool sorted; - TopKV2T() - : T(DataType_DT_FLOAT), - sorted(false) { - } -}; - -struct TopKV2 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TopKV2T NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return TopKV2TypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_T = 4, - VT_SORTED = 6 - }; - DataType T() const { - return static_cast(GetField(VT_T, 1)); - } - bool sorted() const { - return GetField(VT_SORTED, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_T) && - VerifyField(verifier, VT_SORTED) && - verifier.EndTable(); - } - TopKV2T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TopKV2T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TopKV2Builder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_T(DataType T) { - fbb_.AddElement(TopKV2::VT_T, static_cast(T), 1); - } - void add_sorted(bool sorted) { - fbb_.AddElement(TopKV2::VT_SORTED, static_cast(sorted), 0); - } - explicit TopKV2Builder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TopKV2Builder &operator=(const TopKV2Builder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTopKV2( - flatbuffers::FlatBufferBuilder &_fbb, - DataType T = DataType_DT_FLOAT, - bool sorted = false) { - TopKV2Builder builder_(_fbb); - builder_.add_T(T); - builder_.add_sorted(sorted); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTopKV2(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CropAndResizeT : public flatbuffers::NativeTable { - typedef CropAndResize TableType; - float extrapolationValue; - CropAndResizeMethod method; - CropAndResizeT() - : extrapolationValue(0.0f), - method(CropAndResizeMethod_BILINEAR) { - } -}; - -struct CropAndResize FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CropAndResizeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return CropAndResizeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_EXTRAPOLATIONVALUE = 4, - VT_METHOD = 6 - }; - float extrapolationValue() const { - return GetField(VT_EXTRAPOLATIONVALUE, 0.0f); - } - CropAndResizeMethod method() const { - return static_cast(GetField(VT_METHOD, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_EXTRAPOLATIONVALUE) && - VerifyField(verifier, VT_METHOD) && - verifier.EndTable(); - } - CropAndResizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CropAndResizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CropAndResizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CropAndResizeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_extrapolationValue(float extrapolationValue) { - fbb_.AddElement(CropAndResize::VT_EXTRAPOLATIONVALUE, extrapolationValue, 0.0f); - } - void add_method(CropAndResizeMethod method) { - fbb_.AddElement(CropAndResize::VT_METHOD, static_cast(method), 0); - } - explicit CropAndResizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CropAndResizeBuilder &operator=(const CropAndResizeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCropAndResize( - flatbuffers::FlatBufferBuilder &_fbb, - float extrapolationValue = 0.0f, - CropAndResizeMethod method = CropAndResizeMethod_BILINEAR) { - CropAndResizeBuilder builder_(_fbb); - builder_.add_extrapolationValue(extrapolationValue); - builder_.add_method(method); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCropAndResize(flatbuffers::FlatBufferBuilder &_fbb, const CropAndResizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FillT : public flatbuffers::NativeTable { - typedef Fill TableType; - FillT() { - } -}; - -struct Fill FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FillT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return FillTypeTable(); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - FillT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FillT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FillBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FillBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FillBuilder &operator=(const FillBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFill( - flatbuffers::FlatBufferBuilder &_fbb) { - FillBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFill(flatbuffers::FlatBufferBuilder &_fbb, const FillT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GatherV2T : public flatbuffers::NativeTable { - typedef GatherV2 TableType; - DataType Taxis; - DataType Tindices; - DataType Tparams; - GatherV2T() - : Taxis(DataType_DT_INVALID), - Tindices(DataType_DT_INVALID), - Tparams(DataType_DT_INVALID) { - } -}; - -struct GatherV2 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GatherV2T NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return GatherV2TypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TAXIS = 4, - VT_TINDICES = 6, - VT_TPARAMS = 8 - }; - DataType Taxis() const { - return static_cast(GetField(VT_TAXIS, 0)); - } - DataType Tindices() const { - return static_cast(GetField(VT_TINDICES, 0)); - } - DataType Tparams() const { - return static_cast(GetField(VT_TPARAMS, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TAXIS) && - VerifyField(verifier, VT_TINDICES) && - VerifyField(verifier, VT_TPARAMS) && - verifier.EndTable(); - } - GatherV2T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GatherV2T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherV2T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GatherV2Builder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_Taxis(DataType Taxis) { - fbb_.AddElement(GatherV2::VT_TAXIS, static_cast(Taxis), 0); - } - void add_Tindices(DataType Tindices) { - fbb_.AddElement(GatherV2::VT_TINDICES, static_cast(Tindices), 0); - } - void add_Tparams(DataType Tparams) { - fbb_.AddElement(GatherV2::VT_TPARAMS, static_cast(Tparams), 0); - } - explicit GatherV2Builder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GatherV2Builder &operator=(const GatherV2Builder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGatherV2( - flatbuffers::FlatBufferBuilder &_fbb, - DataType Taxis = DataType_DT_INVALID, - DataType Tindices = DataType_DT_INVALID, - DataType Tparams = DataType_DT_INVALID) { - GatherV2Builder builder_(_fbb); - builder_.add_Tparams(Tparams); - builder_.add_Tindices(Tindices); - builder_.add_Taxis(Taxis); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGatherV2(flatbuffers::FlatBufferBuilder &_fbb, const GatherV2T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NonMaxSuppressionV2T : public flatbuffers::NativeTable { - typedef NonMaxSuppressionV2 TableType; - NonMaxSuppressionV2T() { - } -}; - -struct NonMaxSuppressionV2 FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NonMaxSuppressionV2T NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return NonMaxSuppressionV2TypeTable(); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NonMaxSuppressionV2T *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NonMaxSuppressionV2T *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV2T* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NonMaxSuppressionV2Builder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NonMaxSuppressionV2Builder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NonMaxSuppressionV2Builder &operator=(const NonMaxSuppressionV2Builder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNonMaxSuppressionV2( - flatbuffers::FlatBufferBuilder &_fbb) { - NonMaxSuppressionV2Builder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNonMaxSuppressionV2(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV2T *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RangeT : public flatbuffers::NativeTable { - typedef Range TableType; - DataType Tidx; - RangeT() - : Tidx(DataType_DT_INVALID) { - } -}; - -struct Range FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RangeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return RangeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TIDX = 4 - }; - DataType Tidx() const { - return static_cast(GetField(VT_TIDX, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TIDX) && - verifier.EndTable(); - } - RangeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RangeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RangeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_Tidx(DataType Tidx) { - fbb_.AddElement(Range::VT_TIDX, static_cast(Tidx), 0); - } - explicit RangeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RangeBuilder &operator=(const RangeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRange( - flatbuffers::FlatBufferBuilder &_fbb, - DataType Tidx = DataType_DT_INVALID) { - RangeBuilder builder_(_fbb); - builder_.add_Tidx(Tidx); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRange(flatbuffers::FlatBufferBuilder &_fbb, const RangeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RankT : public flatbuffers::NativeTable { - typedef Rank TableType; - RankT() { - } -}; - -struct Rank FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RankT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return RankTypeTable(); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - RankT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RankT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RankBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit RankBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RankBuilder &operator=(const RankBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRank( - flatbuffers::FlatBufferBuilder &_fbb) { - RankBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRank(flatbuffers::FlatBufferBuilder &_fbb, const RankT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SizeT : public flatbuffers::NativeTable { - typedef Size TableType; - DataType outputDataType; - SizeT() - : outputDataType(DataType_DT_INVALID) { - } -}; - -struct Size FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SizeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SizeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTPUTDATATYPE = 4 - }; - DataType outputDataType() const { - return static_cast(GetField(VT_OUTPUTDATATYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUTPUTDATATYPE) && - verifier.EndTable(); - } - SizeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SizeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SizeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SizeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_outputDataType(DataType outputDataType) { - fbb_.AddElement(Size::VT_OUTPUTDATATYPE, static_cast(outputDataType), 0); - } - explicit SizeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SizeBuilder &operator=(const SizeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSize( - flatbuffers::FlatBufferBuilder &_fbb, - DataType outputDataType = DataType_DT_INVALID) { - SizeBuilder builder_(_fbb); - builder_.add_outputDataType(outputDataType); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSize(flatbuffers::FlatBufferBuilder &_fbb, const SizeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TransposeT : public flatbuffers::NativeTable { - typedef Transpose TableType; - DataType Tperm; - TransposeT() - : Tperm(DataType_DT_INVALID) { - } -}; - -struct Transpose FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TransposeT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return TransposeTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TPERM = 4 - }; - DataType Tperm() const { - return static_cast(GetField(VT_TPERM, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TPERM) && - verifier.EndTable(); - } - TransposeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TransposeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TransposeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_Tperm(DataType Tperm) { - fbb_.AddElement(Transpose::VT_TPERM, static_cast(Tperm), 0); - } - explicit TransposeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TransposeBuilder &operator=(const TransposeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTranspose( - flatbuffers::FlatBufferBuilder &_fbb, - DataType Tperm = DataType_DT_INVALID) { - TransposeBuilder builder_(_fbb); - builder_.add_Tperm(Tperm); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTranspose(flatbuffers::FlatBufferBuilder &_fbb, const TransposeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SliceTfT : public flatbuffers::NativeTable { - typedef SliceTf TableType; - DataType T; - SliceTfT() - : T(DataType_DT_INVALID) { - } -}; - -struct SliceTf FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SliceTfT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SliceTfTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_T = 4 - }; - DataType T() const { - return static_cast(GetField(VT_T, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_T) && - verifier.EndTable(); - } - SliceTfT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SliceTfT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceTfT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SliceTfBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_T(DataType T) { - fbb_.AddElement(SliceTf::VT_T, static_cast(T), 0); - } - explicit SliceTfBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SliceTfBuilder &operator=(const SliceTfBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSliceTf( - flatbuffers::FlatBufferBuilder &_fbb, - DataType T = DataType_DT_INVALID) { - SliceTfBuilder builder_(_fbb); - builder_.add_T(T); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSliceTf(flatbuffers::FlatBufferBuilder &_fbb, const SliceTfT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizeMaxMinT : public flatbuffers::NativeTable { - typedef QuantizeMaxMin TableType; - DataType T; - QuantizeMaxMinT() - : T(DataType_DT_INVALID) { - } -}; - -struct QuantizeMaxMin FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizeMaxMinT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return QuantizeMaxMinTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_T = 4 - }; - DataType T() const { - return static_cast(GetField(VT_T, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_T) && - verifier.EndTable(); - } - QuantizeMaxMinT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizeMaxMinT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeMaxMinT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizeMaxMinBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_T(DataType T) { - fbb_.AddElement(QuantizeMaxMin::VT_T, static_cast(T), 0); - } - explicit QuantizeMaxMinBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizeMaxMinBuilder &operator=(const QuantizeMaxMinBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizeMaxMin( - flatbuffers::FlatBufferBuilder &_fbb, - DataType T = DataType_DT_INVALID) { - QuantizeMaxMinBuilder builder_(_fbb); - builder_.add_T(T); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizeMaxMin(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeMaxMinT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CropT : public flatbuffers::NativeTable { - typedef Crop TableType; - int32_t axis; - std::vector offset; - CropT() - : axis(2) { - } -}; - -struct Crop FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CropT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return CropTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4, - VT_OFFSET = 6 - }; - int32_t axis() const { - return GetField(VT_AXIS, 2); - } - const flatbuffers::Vector *offset() const { - return GetPointer *>(VT_OFFSET); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && - VerifyOffset(verifier, VT_OFFSET) && - verifier.VerifyVector(offset()) && - verifier.EndTable(); - } - CropT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CropT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CropT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CropBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(Crop::VT_AXIS, axis, 2); - } - void add_offset(flatbuffers::Offset> offset) { - fbb_.AddOffset(Crop::VT_OFFSET, offset); - } - explicit CropBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CropBuilder &operator=(const CropBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCrop( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 2, - flatbuffers::Offset> offset = 0) { - CropBuilder builder_(_fbb); - builder_.add_offset(offset); - builder_.add_axis(axis); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateCropDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 2, - const std::vector *offset = nullptr) { - auto offset__ = offset ? _fbb.CreateVector(*offset) : 0; - return MNN::CreateCrop( - _fbb, - axis, - offset__); -} - -flatbuffers::Offset CreateCrop(flatbuffers::FlatBufferBuilder &_fbb, const CropT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SpaceBatchT : public flatbuffers::NativeTable { - typedef SpaceBatch TableType; - std::unique_ptr blockShape; - std::unique_ptr padding; - SpaceBatchT() { - } -}; - -struct SpaceBatch FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SpaceBatchT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return SpaceBatchTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BLOCKSHAPE = 4, - VT_PADDING = 6 - }; - const Blob *blockShape() const { - return GetPointer(VT_BLOCKSHAPE); - } - const Blob *padding() const { - return GetPointer(VT_PADDING); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_BLOCKSHAPE) && - verifier.VerifyTable(blockShape()) && - VerifyOffset(verifier, VT_PADDING) && - verifier.VerifyTable(padding()) && - verifier.EndTable(); - } - SpaceBatchT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SpaceBatchT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceBatchT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SpaceBatchBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_blockShape(flatbuffers::Offset blockShape) { - fbb_.AddOffset(SpaceBatch::VT_BLOCKSHAPE, blockShape); - } - void add_padding(flatbuffers::Offset padding) { - fbb_.AddOffset(SpaceBatch::VT_PADDING, padding); - } - explicit SpaceBatchBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SpaceBatchBuilder &operator=(const SpaceBatchBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSpaceBatch( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset blockShape = 0, - flatbuffers::Offset padding = 0) { - SpaceBatchBuilder builder_(_fbb); - builder_.add_padding(padding); - builder_.add_blockShape(blockShape); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSpaceBatch(flatbuffers::FlatBufferBuilder &_fbb, const SpaceBatchT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MatMulT : public flatbuffers::NativeTable { - typedef MatMul TableType; - DataType T; - bool transposeA; - bool transposeB; - std::vector weight; - std::vector bias; - MatMulT() - : T(DataType_DT_INVALID), - transposeA(false), - transposeB(false) { - } -}; - -struct MatMul FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MatMulT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return MatMulTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_T = 4, - VT_TRANSPOSEA = 6, - VT_TRANSPOSEB = 8, - VT_WEIGHT = 10, - VT_BIAS = 12 - }; - DataType T() const { - return static_cast(GetField(VT_T, 0)); - } - bool transposeA() const { - return GetField(VT_TRANSPOSEA, 0) != 0; - } - bool transposeB() const { - return GetField(VT_TRANSPOSEB, 0) != 0; - } - const flatbuffers::Vector *weight() const { - return GetPointer *>(VT_WEIGHT); - } - const flatbuffers::Vector *bias() const { - return GetPointer *>(VT_BIAS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_T) && - VerifyField(verifier, VT_TRANSPOSEA) && - VerifyField(verifier, VT_TRANSPOSEB) && - VerifyOffset(verifier, VT_WEIGHT) && - verifier.VerifyVector(weight()) && - VerifyOffset(verifier, VT_BIAS) && - verifier.VerifyVector(bias()) && - verifier.EndTable(); - } - MatMulT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MatMulT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatMulT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MatMulBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_T(DataType T) { - fbb_.AddElement(MatMul::VT_T, static_cast(T), 0); - } - void add_transposeA(bool transposeA) { - fbb_.AddElement(MatMul::VT_TRANSPOSEA, static_cast(transposeA), 0); - } - void add_transposeB(bool transposeB) { - fbb_.AddElement(MatMul::VT_TRANSPOSEB, static_cast(transposeB), 0); - } - void add_weight(flatbuffers::Offset> weight) { - fbb_.AddOffset(MatMul::VT_WEIGHT, weight); - } - void add_bias(flatbuffers::Offset> bias) { - fbb_.AddOffset(MatMul::VT_BIAS, bias); - } - explicit MatMulBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MatMulBuilder &operator=(const MatMulBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMatMul( - flatbuffers::FlatBufferBuilder &_fbb, - DataType T = DataType_DT_INVALID, - bool transposeA = false, - bool transposeB = false, - flatbuffers::Offset> weight = 0, - flatbuffers::Offset> bias = 0) { - MatMulBuilder builder_(_fbb); - builder_.add_bias(bias); - builder_.add_weight(weight); - builder_.add_T(T); - builder_.add_transposeB(transposeB); - builder_.add_transposeA(transposeA); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateMatMulDirect( - flatbuffers::FlatBufferBuilder &_fbb, - DataType T = DataType_DT_INVALID, - bool transposeA = false, - bool transposeB = false, - const std::vector *weight = nullptr, - const std::vector *bias = nullptr) { - auto weight__ = weight ? _fbb.CreateVector(*weight) : 0; - auto bias__ = bias ? _fbb.CreateVector(*bias) : 0; - return MNN::CreateMatMul( - _fbb, - T, - transposeA, - transposeB, - weight__, - bias__); -} - -flatbuffers::Offset CreateMatMul(flatbuffers::FlatBufferBuilder &_fbb, const MatMulT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MomentsParamT : public flatbuffers::NativeTable { - typedef MomentsParam TableType; - std::vector dim; - bool keepDims; - DataType dType; - MomentsParamT() - : keepDims(true), - dType(DataType_DT_FLOAT) { - } -}; - -struct MomentsParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MomentsParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return MomentsParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DIM = 4, - VT_KEEPDIMS = 6, - VT_DTYPE = 8 - }; - const flatbuffers::Vector *dim() const { - return GetPointer *>(VT_DIM); - } - bool keepDims() const { - return GetField(VT_KEEPDIMS, 1) != 0; - } - DataType dType() const { - return static_cast(GetField(VT_DTYPE, 1)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_DIM) && - verifier.VerifyVector(dim()) && - VerifyField(verifier, VT_KEEPDIMS) && - VerifyField(verifier, VT_DTYPE) && - verifier.EndTable(); - } - MomentsParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MomentsParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MomentsParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MomentsParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_dim(flatbuffers::Offset> dim) { - fbb_.AddOffset(MomentsParam::VT_DIM, dim); - } - void add_keepDims(bool keepDims) { - fbb_.AddElement(MomentsParam::VT_KEEPDIMS, static_cast(keepDims), 1); - } - void add_dType(DataType dType) { - fbb_.AddElement(MomentsParam::VT_DTYPE, static_cast(dType), 1); - } - explicit MomentsParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MomentsParamBuilder &operator=(const MomentsParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMomentsParam( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> dim = 0, - bool keepDims = true, - DataType dType = DataType_DT_FLOAT) { - MomentsParamBuilder builder_(_fbb); - builder_.add_dType(dType); - builder_.add_dim(dim); - builder_.add_keepDims(keepDims); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateMomentsParamDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *dim = nullptr, - bool keepDims = true, - DataType dType = DataType_DT_FLOAT) { - auto dim__ = dim ? _fbb.CreateVector(*dim) : 0; - return MNN::CreateMomentsParam( - _fbb, - dim__, - keepDims, - dType); -} - -flatbuffers::Offset CreateMomentsParam(flatbuffers::FlatBufferBuilder &_fbb, const MomentsParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RNNParamT : public flatbuffers::NativeTable { - typedef RNNParam TableType; - int32_t numUnits; - bool isBidirectionalRNN; - bool keepAllOutputs; - std::unique_ptr fwGateWeight; - std::unique_ptr fwGateBias; - std::unique_ptr fwCandidateWeight; - std::unique_ptr fwCandidateBias; - std::unique_ptr bwGateWeight; - std::unique_ptr bwGateBias; - std::unique_ptr bwCandidateWeight; - std::unique_ptr bwCandidateBias; - RNNParamT() - : numUnits(0), - isBidirectionalRNN(false), - keepAllOutputs(false) { - } -}; - -struct RNNParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RNNParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return RNNParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUMUNITS = 4, - VT_ISBIDIRECTIONALRNN = 6, - VT_KEEPALLOUTPUTS = 8, - VT_FWGATEWEIGHT = 10, - VT_FWGATEBIAS = 12, - VT_FWCANDIDATEWEIGHT = 14, - VT_FWCANDIDATEBIAS = 16, - VT_BWGATEWEIGHT = 18, - VT_BWGATEBIAS = 20, - VT_BWCANDIDATEWEIGHT = 22, - VT_BWCANDIDATEBIAS = 24 - }; - int32_t numUnits() const { - return GetField(VT_NUMUNITS, 0); - } - bool isBidirectionalRNN() const { - return GetField(VT_ISBIDIRECTIONALRNN, 0) != 0; - } - bool keepAllOutputs() const { - return GetField(VT_KEEPALLOUTPUTS, 0) != 0; - } - const Blob *fwGateWeight() const { - return GetPointer(VT_FWGATEWEIGHT); - } - const Blob *fwGateBias() const { - return GetPointer(VT_FWGATEBIAS); - } - const Blob *fwCandidateWeight() const { - return GetPointer(VT_FWCANDIDATEWEIGHT); - } - const Blob *fwCandidateBias() const { - return GetPointer(VT_FWCANDIDATEBIAS); - } - const Blob *bwGateWeight() const { - return GetPointer(VT_BWGATEWEIGHT); - } - const Blob *bwGateBias() const { - return GetPointer(VT_BWGATEBIAS); - } - const Blob *bwCandidateWeight() const { - return GetPointer(VT_BWCANDIDATEWEIGHT); - } - const Blob *bwCandidateBias() const { - return GetPointer(VT_BWCANDIDATEBIAS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUMUNITS) && - VerifyField(verifier, VT_ISBIDIRECTIONALRNN) && - VerifyField(verifier, VT_KEEPALLOUTPUTS) && - VerifyOffset(verifier, VT_FWGATEWEIGHT) && - verifier.VerifyTable(fwGateWeight()) && - VerifyOffset(verifier, VT_FWGATEBIAS) && - verifier.VerifyTable(fwGateBias()) && - VerifyOffset(verifier, VT_FWCANDIDATEWEIGHT) && - verifier.VerifyTable(fwCandidateWeight()) && - VerifyOffset(verifier, VT_FWCANDIDATEBIAS) && - verifier.VerifyTable(fwCandidateBias()) && - VerifyOffset(verifier, VT_BWGATEWEIGHT) && - verifier.VerifyTable(bwGateWeight()) && - VerifyOffset(verifier, VT_BWGATEBIAS) && - verifier.VerifyTable(bwGateBias()) && - VerifyOffset(verifier, VT_BWCANDIDATEWEIGHT) && - verifier.VerifyTable(bwCandidateWeight()) && - VerifyOffset(verifier, VT_BWCANDIDATEBIAS) && - verifier.VerifyTable(bwCandidateBias()) && - verifier.EndTable(); - } - RNNParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RNNParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RNNParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_numUnits(int32_t numUnits) { - fbb_.AddElement(RNNParam::VT_NUMUNITS, numUnits, 0); - } - void add_isBidirectionalRNN(bool isBidirectionalRNN) { - fbb_.AddElement(RNNParam::VT_ISBIDIRECTIONALRNN, static_cast(isBidirectionalRNN), 0); - } - void add_keepAllOutputs(bool keepAllOutputs) { - fbb_.AddElement(RNNParam::VT_KEEPALLOUTPUTS, static_cast(keepAllOutputs), 0); - } - void add_fwGateWeight(flatbuffers::Offset fwGateWeight) { - fbb_.AddOffset(RNNParam::VT_FWGATEWEIGHT, fwGateWeight); - } - void add_fwGateBias(flatbuffers::Offset fwGateBias) { - fbb_.AddOffset(RNNParam::VT_FWGATEBIAS, fwGateBias); - } - void add_fwCandidateWeight(flatbuffers::Offset fwCandidateWeight) { - fbb_.AddOffset(RNNParam::VT_FWCANDIDATEWEIGHT, fwCandidateWeight); - } - void add_fwCandidateBias(flatbuffers::Offset fwCandidateBias) { - fbb_.AddOffset(RNNParam::VT_FWCANDIDATEBIAS, fwCandidateBias); - } - void add_bwGateWeight(flatbuffers::Offset bwGateWeight) { - fbb_.AddOffset(RNNParam::VT_BWGATEWEIGHT, bwGateWeight); - } - void add_bwGateBias(flatbuffers::Offset bwGateBias) { - fbb_.AddOffset(RNNParam::VT_BWGATEBIAS, bwGateBias); - } - void add_bwCandidateWeight(flatbuffers::Offset bwCandidateWeight) { - fbb_.AddOffset(RNNParam::VT_BWCANDIDATEWEIGHT, bwCandidateWeight); - } - void add_bwCandidateBias(flatbuffers::Offset bwCandidateBias) { - fbb_.AddOffset(RNNParam::VT_BWCANDIDATEBIAS, bwCandidateBias); - } - explicit RNNParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RNNParamBuilder &operator=(const RNNParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRNNParam( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t numUnits = 0, - bool isBidirectionalRNN = false, - bool keepAllOutputs = false, - flatbuffers::Offset fwGateWeight = 0, - flatbuffers::Offset fwGateBias = 0, - flatbuffers::Offset fwCandidateWeight = 0, - flatbuffers::Offset fwCandidateBias = 0, - flatbuffers::Offset bwGateWeight = 0, - flatbuffers::Offset bwGateBias = 0, - flatbuffers::Offset bwCandidateWeight = 0, - flatbuffers::Offset bwCandidateBias = 0) { - RNNParamBuilder builder_(_fbb); - builder_.add_bwCandidateBias(bwCandidateBias); - builder_.add_bwCandidateWeight(bwCandidateWeight); - builder_.add_bwGateBias(bwGateBias); - builder_.add_bwGateWeight(bwGateWeight); - builder_.add_fwCandidateBias(fwCandidateBias); - builder_.add_fwCandidateWeight(fwCandidateWeight); - builder_.add_fwGateBias(fwGateBias); - builder_.add_fwGateWeight(fwGateWeight); - builder_.add_numUnits(numUnits); - builder_.add_keepAllOutputs(keepAllOutputs); - builder_.add_isBidirectionalRNN(isBidirectionalRNN); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRNNParam(flatbuffers::FlatBufferBuilder &_fbb, const RNNParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BatchMatMulParamT : public flatbuffers::NativeTable { - typedef BatchMatMulParam TableType; - bool adjX; - bool adjY; - BatchMatMulParamT() - : adjX(false), - adjY(false) { - } -}; - -struct BatchMatMulParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BatchMatMulParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return BatchMatMulParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ADJX = 4, - VT_ADJY = 6 - }; - bool adjX() const { - return GetField(VT_ADJX, 0) != 0; - } - bool adjY() const { - return GetField(VT_ADJY, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ADJX) && - VerifyField(verifier, VT_ADJY) && - verifier.EndTable(); - } - BatchMatMulParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BatchMatMulParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BatchMatMulParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_adjX(bool adjX) { - fbb_.AddElement(BatchMatMulParam::VT_ADJX, static_cast(adjX), 0); - } - void add_adjY(bool adjY) { - fbb_.AddElement(BatchMatMulParam::VT_ADJY, static_cast(adjY), 0); - } - explicit BatchMatMulParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BatchMatMulParamBuilder &operator=(const BatchMatMulParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBatchMatMulParam( - flatbuffers::FlatBufferBuilder &_fbb, - bool adjX = false, - bool adjY = false) { - BatchMatMulParamBuilder builder_(_fbb); - builder_.add_adjY(adjY); - builder_.add_adjX(adjX); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBatchMatMulParam(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DepthSpaceParamT : public flatbuffers::NativeTable { - typedef DepthSpaceParam TableType; - int32_t blockSize; - DepthSpaceParamT() - : blockSize(0) { - } -}; - -struct DepthSpaceParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DepthSpaceParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return DepthSpaceParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BLOCKSIZE = 4 - }; - int32_t blockSize() const { - return GetField(VT_BLOCKSIZE, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BLOCKSIZE) && - verifier.EndTable(); - } - DepthSpaceParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DepthSpaceParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthSpaceParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DepthSpaceParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_blockSize(int32_t blockSize) { - fbb_.AddElement(DepthSpaceParam::VT_BLOCKSIZE, blockSize, 0); - } - explicit DepthSpaceParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DepthSpaceParamBuilder &operator=(const DepthSpaceParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDepthSpaceParam( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t blockSize = 0) { - DepthSpaceParamBuilder builder_(_fbb); - builder_.add_blockSize(blockSize); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDepthSpaceParam(flatbuffers::FlatBufferBuilder &_fbb, const DepthSpaceParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReverseSequenceParamT : public flatbuffers::NativeTable { - typedef ReverseSequenceParam TableType; - int32_t batchDim; - int32_t seqDim; - ReverseSequenceParamT() - : batchDim(0), - seqDim(0) { - } -}; - -struct ReverseSequenceParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReverseSequenceParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return ReverseSequenceParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BATCHDIM = 4, - VT_SEQDIM = 6 - }; - int32_t batchDim() const { - return GetField(VT_BATCHDIM, 0); - } - int32_t seqDim() const { - return GetField(VT_SEQDIM, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BATCHDIM) && - VerifyField(verifier, VT_SEQDIM) && - verifier.EndTable(); - } - ReverseSequenceParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReverseSequenceParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReverseSequenceParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_batchDim(int32_t batchDim) { - fbb_.AddElement(ReverseSequenceParam::VT_BATCHDIM, batchDim, 0); - } - void add_seqDim(int32_t seqDim) { - fbb_.AddElement(ReverseSequenceParam::VT_SEQDIM, seqDim, 0); - } - explicit ReverseSequenceParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReverseSequenceParamBuilder &operator=(const ReverseSequenceParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReverseSequenceParam( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t batchDim = 0, - int32_t seqDim = 0) { - ReverseSequenceParamBuilder builder_(_fbb); - builder_.add_seqDim(seqDim); - builder_.add_batchDim(batchDim); - return builder_.Finish(); -} - -flatbuffers::Offset CreateReverseSequenceParam(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DetectionPostProcessParamT : public flatbuffers::NativeTable { - typedef DetectionPostProcessParam TableType; - int32_t maxDetections; - int32_t maxClassesPerDetection; - int32_t detectionsPerClass; - float nmsScoreThreshold; - float iouThreshold; - int32_t numClasses; - bool useRegularNMS; - std::vector centerSizeEncoding; - DetectionPostProcessParamT() - : maxDetections(0), - maxClassesPerDetection(0), - detectionsPerClass(0), - nmsScoreThreshold(0.0f), - iouThreshold(0.0f), - numClasses(0), - useRegularNMS(false) { - } -}; - -struct DetectionPostProcessParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DetectionPostProcessParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return DetectionPostProcessParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MAXDETECTIONS = 4, - VT_MAXCLASSESPERDETECTION = 6, - VT_DETECTIONSPERCLASS = 8, - VT_NMSSCORETHRESHOLD = 10, - VT_IOUTHRESHOLD = 12, - VT_NUMCLASSES = 14, - VT_USEREGULARNMS = 16, - VT_CENTERSIZEENCODING = 18 - }; - int32_t maxDetections() const { - return GetField(VT_MAXDETECTIONS, 0); - } - int32_t maxClassesPerDetection() const { - return GetField(VT_MAXCLASSESPERDETECTION, 0); - } - int32_t detectionsPerClass() const { - return GetField(VT_DETECTIONSPERCLASS, 0); - } - float nmsScoreThreshold() const { - return GetField(VT_NMSSCORETHRESHOLD, 0.0f); - } - float iouThreshold() const { - return GetField(VT_IOUTHRESHOLD, 0.0f); - } - int32_t numClasses() const { - return GetField(VT_NUMCLASSES, 0); - } - bool useRegularNMS() const { - return GetField(VT_USEREGULARNMS, 0) != 0; - } - const flatbuffers::Vector *centerSizeEncoding() const { - return GetPointer *>(VT_CENTERSIZEENCODING); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_MAXDETECTIONS) && - VerifyField(verifier, VT_MAXCLASSESPERDETECTION) && - VerifyField(verifier, VT_DETECTIONSPERCLASS) && - VerifyField(verifier, VT_NMSSCORETHRESHOLD) && - VerifyField(verifier, VT_IOUTHRESHOLD) && - VerifyField(verifier, VT_NUMCLASSES) && - VerifyField(verifier, VT_USEREGULARNMS) && - VerifyOffset(verifier, VT_CENTERSIZEENCODING) && - verifier.VerifyVector(centerSizeEncoding()) && - verifier.EndTable(); - } - DetectionPostProcessParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DetectionPostProcessParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DetectionPostProcessParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DetectionPostProcessParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_maxDetections(int32_t maxDetections) { - fbb_.AddElement(DetectionPostProcessParam::VT_MAXDETECTIONS, maxDetections, 0); - } - void add_maxClassesPerDetection(int32_t maxClassesPerDetection) { - fbb_.AddElement(DetectionPostProcessParam::VT_MAXCLASSESPERDETECTION, maxClassesPerDetection, 0); - } - void add_detectionsPerClass(int32_t detectionsPerClass) { - fbb_.AddElement(DetectionPostProcessParam::VT_DETECTIONSPERCLASS, detectionsPerClass, 0); - } - void add_nmsScoreThreshold(float nmsScoreThreshold) { - fbb_.AddElement(DetectionPostProcessParam::VT_NMSSCORETHRESHOLD, nmsScoreThreshold, 0.0f); - } - void add_iouThreshold(float iouThreshold) { - fbb_.AddElement(DetectionPostProcessParam::VT_IOUTHRESHOLD, iouThreshold, 0.0f); - } - void add_numClasses(int32_t numClasses) { - fbb_.AddElement(DetectionPostProcessParam::VT_NUMCLASSES, numClasses, 0); - } - void add_useRegularNMS(bool useRegularNMS) { - fbb_.AddElement(DetectionPostProcessParam::VT_USEREGULARNMS, static_cast(useRegularNMS), 0); - } - void add_centerSizeEncoding(flatbuffers::Offset> centerSizeEncoding) { - fbb_.AddOffset(DetectionPostProcessParam::VT_CENTERSIZEENCODING, centerSizeEncoding); - } - explicit DetectionPostProcessParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DetectionPostProcessParamBuilder &operator=(const DetectionPostProcessParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDetectionPostProcessParam( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t maxDetections = 0, - int32_t maxClassesPerDetection = 0, - int32_t detectionsPerClass = 0, - float nmsScoreThreshold = 0.0f, - float iouThreshold = 0.0f, - int32_t numClasses = 0, - bool useRegularNMS = false, - flatbuffers::Offset> centerSizeEncoding = 0) { - DetectionPostProcessParamBuilder builder_(_fbb); - builder_.add_centerSizeEncoding(centerSizeEncoding); - builder_.add_numClasses(numClasses); - builder_.add_iouThreshold(iouThreshold); - builder_.add_nmsScoreThreshold(nmsScoreThreshold); - builder_.add_detectionsPerClass(detectionsPerClass); - builder_.add_maxClassesPerDetection(maxClassesPerDetection); - builder_.add_maxDetections(maxDetections); - builder_.add_useRegularNMS(useRegularNMS); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateDetectionPostProcessParamDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t maxDetections = 0, - int32_t maxClassesPerDetection = 0, - int32_t detectionsPerClass = 0, - float nmsScoreThreshold = 0.0f, - float iouThreshold = 0.0f, - int32_t numClasses = 0, - bool useRegularNMS = false, - const std::vector *centerSizeEncoding = nullptr) { - auto centerSizeEncoding__ = centerSizeEncoding ? _fbb.CreateVector(*centerSizeEncoding) : 0; - return MNN::CreateDetectionPostProcessParam( - _fbb, - maxDetections, - maxClassesPerDetection, - detectionsPerClass, - nmsScoreThreshold, - iouThreshold, - numClasses, - useRegularNMS, - centerSizeEncoding__); -} - -flatbuffers::Offset CreateDetectionPostProcessParam(flatbuffers::FlatBufferBuilder &_fbb, const DetectionPostProcessParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct OneHotParamT : public flatbuffers::NativeTable { - typedef OneHotParam TableType; - DataType dType; - int32_t axis; - OneHotParamT() - : dType(DataType_DT_FLOAT), - axis(-1) { - } -}; - -struct OneHotParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OneHotParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return OneHotParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DTYPE = 4, - VT_AXIS = 6 - }; - DataType dType() const { - return static_cast(GetField(VT_DTYPE, 1)); - } - int32_t axis() const { - return GetField(VT_AXIS, -1); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_DTYPE) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); - } - OneHotParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OneHotParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct OneHotParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_dType(DataType dType) { - fbb_.AddElement(OneHotParam::VT_DTYPE, static_cast(dType), 1); - } - void add_axis(int32_t axis) { - fbb_.AddElement(OneHotParam::VT_AXIS, axis, -1); - } - explicit OneHotParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - OneHotParamBuilder &operator=(const OneHotParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOneHotParam( - flatbuffers::FlatBufferBuilder &_fbb, - DataType dType = DataType_DT_FLOAT, - int32_t axis = -1) { - OneHotParamBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_dType(dType); - return builder_.Finish(); -} - -flatbuffers::Offset CreateOneHotParam(flatbuffers::FlatBufferBuilder &_fbb, const OneHotParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PadParamT : public flatbuffers::NativeTable { - typedef PadParam TableType; - PadValueMode mode; - PadParamT() - : mode(PadValueMode_CONSTANT) { - } -}; - -struct PadParam FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PadParamT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return PadParamTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MODE = 4 - }; - PadValueMode mode() const { - return static_cast(GetField(VT_MODE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_MODE) && - verifier.EndTable(); - } - PadParamT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PadParamT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadParamT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PadParamBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_mode(PadValueMode mode) { - fbb_.AddElement(PadParam::VT_MODE, static_cast(mode), 0); - } - explicit PadParamBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PadParamBuilder &operator=(const PadParamBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePadParam( - flatbuffers::FlatBufferBuilder &_fbb, - PadValueMode mode = PadValueMode_CONSTANT) { - PadParamBuilder builder_(_fbb); - builder_.add_mode(mode); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePadParam(flatbuffers::FlatBufferBuilder &_fbb, const PadParamT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -inline BinaryOpT *BinaryOp::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BinaryOpT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BinaryOp::UnPackTo(BinaryOpT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = opType(); _o->opType = _e; }; - { auto _e = T(); _o->T = _e; }; -} - -inline flatbuffers::Offset BinaryOp::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BinaryOpT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBinaryOp(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBinaryOp(flatbuffers::FlatBufferBuilder &_fbb, const BinaryOpT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BinaryOpT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _opType = _o->opType; - auto _T = _o->T; - return MNN::CreateBinaryOp( - _fbb, - _opType, - _T); -} - -inline PackParamT *PackParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PackParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void PackParam::UnPackTo(PackParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = dataType(); _o->dataType = _e; }; - { auto _e = axis(); _o->axis = _e; }; -} - -inline flatbuffers::Offset PackParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePackParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePackParam(flatbuffers::FlatBufferBuilder &_fbb, const PackParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PackParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _dataType = _o->dataType; - auto _axis = _o->axis; - return MNN::CreatePackParam( - _fbb, - _dataType, - _axis); -} - -inline StridedSliceParamT *StridedSliceParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new StridedSliceParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void StridedSliceParam::UnPackTo(StridedSliceParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = Index(); _o->Index = _e; }; - { auto _e = T(); _o->T = _e; }; - { auto _e = beginMask(); _o->beginMask = _e; }; - { auto _e = endMask(); _o->endMask = _e; }; - { auto _e = ellipsisMask(); _o->ellipsisMask = _e; }; - { auto _e = newAxisMask(); _o->newAxisMask = _e; }; - { auto _e = shrinkAxisMask(); _o->shrinkAxisMask = _e; }; -} - -inline flatbuffers::Offset StridedSliceParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateStridedSliceParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateStridedSliceParam(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StridedSliceParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _Index = _o->Index; - auto _T = _o->T; - auto _beginMask = _o->beginMask; - auto _endMask = _o->endMask; - auto _ellipsisMask = _o->ellipsisMask; - auto _newAxisMask = _o->newAxisMask; - auto _shrinkAxisMask = _o->shrinkAxisMask; - return MNN::CreateStridedSliceParam( - _fbb, - _Index, - _T, - _beginMask, - _endMask, - _ellipsisMask, - _newAxisMask, - _shrinkAxisMask); -} - -inline SqueezeParamT *SqueezeParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SqueezeParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SqueezeParam::UnPackTo(SqueezeParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = squeezeDims(); if (_e) { _o->squeezeDims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->squeezeDims[_i] = _e->Get(_i); } } }; -} - -inline flatbuffers::Offset SqueezeParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSqueezeParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSqueezeParam(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SqueezeParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _squeezeDims = _o->squeezeDims.size() ? _fbb.CreateVector(_o->squeezeDims) : 0; - return MNN::CreateSqueezeParam( - _fbb, - _squeezeDims); -} - -inline CastParamT *CastParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CastParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CastParam::UnPackTo(CastParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = srcT(); _o->srcT = _e; }; - { auto _e = dstT(); _o->dstT = _e; }; -} - -inline flatbuffers::Offset CastParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCastParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCastParam(flatbuffers::FlatBufferBuilder &_fbb, const CastParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CastParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _srcT = _o->srcT; - auto _dstT = _o->dstT; - return MNN::CreateCastParam( - _fbb, - _srcT, - _dstT); -} - -inline ReductionParamT *ReductionParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReductionParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ReductionParam::UnPackTo(ReductionParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = operation(); _o->operation = _e; }; - { auto _e = dim(); if (_e) { _o->dim.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dim[_i] = _e->Get(_i); } } }; - { auto _e = coeff(); _o->coeff = _e; }; - { auto _e = keepDims(); _o->keepDims = _e; }; - { auto _e = dType(); _o->dType = _e; }; -} - -inline flatbuffers::Offset ReductionParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReductionParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReductionParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReductionParam(flatbuffers::FlatBufferBuilder &_fbb, const ReductionParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReductionParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _operation = _o->operation; - auto _dim = _o->dim.size() ? _fbb.CreateVector(_o->dim) : 0; - auto _coeff = _o->coeff; - auto _keepDims = _o->keepDims; - auto _dType = _o->dType; - return MNN::CreateReductionParam( - _fbb, - _operation, - _dim, - _coeff, - _keepDims, - _dType); -} - -inline GatherT *Gather::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GatherT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Gather::UnPackTo(GatherT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = Tindices(); _o->Tindices = _e; }; - { auto _e = Tparams(); _o->Tparams = _e; }; - { auto _e = validateIndices(); _o->validateIndices = _e; }; - { auto _e = axis(); _o->axis = _e; }; -} - -inline flatbuffers::Offset Gather::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGather(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGather(flatbuffers::FlatBufferBuilder &_fbb, const GatherT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _Tindices = _o->Tindices; - auto _Tparams = _o->Tparams; - auto _validateIndices = _o->validateIndices; - auto _axis = _o->axis; - return MNN::CreateGather( - _fbb, - _Tindices, - _Tparams, - _validateIndices, - _axis); -} - -inline ExpandDimsT *ExpandDims::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ExpandDimsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ExpandDims::UnPackTo(ExpandDimsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = T(); _o->T = _e; }; - { auto _e = Tdim(); _o->Tdim = _e; }; - { auto _e = axis(); _o->axis = _e; }; -} - -inline flatbuffers::Offset ExpandDims::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateExpandDims(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateExpandDims(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpandDimsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _T = _o->T; - auto _Tdim = _o->Tdim; - auto _axis = _o->axis; - return MNN::CreateExpandDims( - _fbb, - _T, - _Tdim, - _axis); -} - -inline SeluT *Selu::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SeluT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Selu::UnPackTo(SeluT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = scale(); _o->scale = _e; }; - { auto _e = alpha(); _o->alpha = _e; }; -} - -inline flatbuffers::Offset Selu::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SeluT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSelu(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSelu(flatbuffers::FlatBufferBuilder &_fbb, const SeluT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SeluT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _scale = _o->scale; - auto _alpha = _o->alpha; - return MNN::CreateSelu( - _fbb, - _scale, - _alpha); -} - -inline AsStringT *AsString::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AsStringT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void AsString::UnPackTo(AsStringT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = T(); _o->T = _e; }; - { auto _e = precision(); _o->precision = _e; }; - { auto _e = scientific(); _o->scientific = _e; }; - { auto _e = shortest(); _o->shortest = _e; }; - { auto _e = width(); _o->width = _e; }; - { auto _e = fillString(); if (_e) _o->fillString = _e->str(); }; -} - -inline flatbuffers::Offset AsString::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AsStringT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAsString(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAsString(flatbuffers::FlatBufferBuilder &_fbb, const AsStringT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AsStringT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _T = _o->T; - auto _precision = _o->precision; - auto _scientific = _o->scientific; - auto _shortest = _o->shortest; - auto _width = _o->width; - auto _fillString = _o->fillString.empty() ? 0 : _fbb.CreateString(_o->fillString); - return MNN::CreateAsString( - _fbb, - _T, - _precision, - _scientific, - _shortest, - _width, - _fillString); -} - -inline ReduceJoinT *ReduceJoin::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReduceJoinT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ReduceJoin::UnPackTo(ReduceJoinT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = keepDims(); _o->keepDims = _e; }; - { auto _e = separator(); if (_e) _o->separator = _e->str(); }; -} - -inline flatbuffers::Offset ReduceJoin::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReduceJoinT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReduceJoin(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReduceJoin(flatbuffers::FlatBufferBuilder &_fbb, const ReduceJoinT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReduceJoinT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _keepDims = _o->keepDims; - auto _separator = _o->separator.empty() ? 0 : _fbb.CreateString(_o->separator); - return MNN::CreateReduceJoin( - _fbb, - _keepDims, - _separator); -} - -inline UnaryOpT *UnaryOp::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new UnaryOpT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void UnaryOp::UnPackTo(UnaryOpT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = opType(); _o->opType = _e; }; - { auto _e = T(); _o->T = _e; }; -} - -inline flatbuffers::Offset UnaryOp::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnaryOpT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnaryOp(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUnaryOp(flatbuffers::FlatBufferBuilder &_fbb, const UnaryOpT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnaryOpT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _opType = _o->opType; - auto _T = _o->T; - return MNN::CreateUnaryOp( - _fbb, - _opType, - _T); -} - -inline TopKV2T *TopKV2::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TopKV2T(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TopKV2::UnPackTo(TopKV2T *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = T(); _o->T = _e; }; - { auto _e = sorted(); _o->sorted = _e; }; -} - -inline flatbuffers::Offset TopKV2::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2T* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTopKV2(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTopKV2(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2T *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TopKV2T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _T = _o->T; - auto _sorted = _o->sorted; - return MNN::CreateTopKV2( - _fbb, - _T, - _sorted); -} - -inline CropAndResizeT *CropAndResize::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CropAndResizeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CropAndResize::UnPackTo(CropAndResizeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = extrapolationValue(); _o->extrapolationValue = _e; }; - { auto _e = method(); _o->method = _e; }; -} - -inline flatbuffers::Offset CropAndResize::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CropAndResizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCropAndResize(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCropAndResize(flatbuffers::FlatBufferBuilder &_fbb, const CropAndResizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CropAndResizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _extrapolationValue = _o->extrapolationValue; - auto _method = _o->method; - return MNN::CreateCropAndResize( - _fbb, - _extrapolationValue, - _method); -} - -inline FillT *Fill::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FillT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Fill::UnPackTo(FillT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset Fill::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFill(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFill(flatbuffers::FlatBufferBuilder &_fbb, const FillT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FillT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return MNN::CreateFill( - _fbb); -} - -inline GatherV2T *GatherV2::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GatherV2T(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GatherV2::UnPackTo(GatherV2T *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = Taxis(); _o->Taxis = _e; }; - { auto _e = Tindices(); _o->Tindices = _e; }; - { auto _e = Tparams(); _o->Tparams = _e; }; -} - -inline flatbuffers::Offset GatherV2::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherV2T* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGatherV2(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGatherV2(flatbuffers::FlatBufferBuilder &_fbb, const GatherV2T *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherV2T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _Taxis = _o->Taxis; - auto _Tindices = _o->Tindices; - auto _Tparams = _o->Tparams; - return MNN::CreateGatherV2( - _fbb, - _Taxis, - _Tindices, - _Tparams); -} - -inline NonMaxSuppressionV2T *NonMaxSuppressionV2::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NonMaxSuppressionV2T(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void NonMaxSuppressionV2::UnPackTo(NonMaxSuppressionV2T *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NonMaxSuppressionV2::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV2T* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNonMaxSuppressionV2(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNonMaxSuppressionV2(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV2T *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV2T* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return MNN::CreateNonMaxSuppressionV2( - _fbb); -} - -inline RangeT *Range::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RangeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Range::UnPackTo(RangeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = Tidx(); _o->Tidx = _e; }; -} - -inline flatbuffers::Offset Range::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRange(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRange(flatbuffers::FlatBufferBuilder &_fbb, const RangeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RangeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _Tidx = _o->Tidx; - return MNN::CreateRange( - _fbb, - _Tidx); -} - -inline RankT *Rank::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RankT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Rank::UnPackTo(RankT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset Rank::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRank(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRank(flatbuffers::FlatBufferBuilder &_fbb, const RankT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RankT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return MNN::CreateRank( - _fbb); -} - -inline SizeT *Size::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SizeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Size::UnPackTo(SizeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = outputDataType(); _o->outputDataType = _e; }; -} - -inline flatbuffers::Offset Size::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SizeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSize(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSize(flatbuffers::FlatBufferBuilder &_fbb, const SizeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SizeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _outputDataType = _o->outputDataType; - return MNN::CreateSize( - _fbb, - _outputDataType); -} - -inline TransposeT *Transpose::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TransposeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Transpose::UnPackTo(TransposeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = Tperm(); _o->Tperm = _e; }; -} - -inline flatbuffers::Offset Transpose::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTranspose(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTranspose(flatbuffers::FlatBufferBuilder &_fbb, const TransposeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _Tperm = _o->Tperm; - return MNN::CreateTranspose( - _fbb, - _Tperm); -} - -inline SliceTfT *SliceTf::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SliceTfT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SliceTf::UnPackTo(SliceTfT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = T(); _o->T = _e; }; -} - -inline flatbuffers::Offset SliceTf::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceTfT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSliceTf(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSliceTf(flatbuffers::FlatBufferBuilder &_fbb, const SliceTfT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SliceTfT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _T = _o->T; - return MNN::CreateSliceTf( - _fbb, - _T); -} - -inline QuantizeMaxMinT *QuantizeMaxMin::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizeMaxMinT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizeMaxMin::UnPackTo(QuantizeMaxMinT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = T(); _o->T = _e; }; -} - -inline flatbuffers::Offset QuantizeMaxMin::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeMaxMinT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizeMaxMin(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizeMaxMin(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeMaxMinT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizeMaxMinT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _T = _o->T; - return MNN::CreateQuantizeMaxMin( - _fbb, - _T); -} - -inline CropT *Crop::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CropT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Crop::UnPackTo(CropT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; }; - { auto _e = offset(); if (_e) { _o->offset.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->offset[_i] = _e->Get(_i); } } }; -} - -inline flatbuffers::Offset Crop::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CropT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCrop(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCrop(flatbuffers::FlatBufferBuilder &_fbb, const CropT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CropT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - auto _offset = _o->offset.size() ? _fbb.CreateVector(_o->offset) : 0; - return MNN::CreateCrop( - _fbb, - _axis, - _offset); -} - -inline SpaceBatchT *SpaceBatch::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SpaceBatchT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SpaceBatch::UnPackTo(SpaceBatchT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = blockShape(); if (_e) _o->blockShape = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = padding(); if (_e) _o->padding = std::unique_ptr(_e->UnPack(_resolver)); }; -} - -inline flatbuffers::Offset SpaceBatch::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceBatchT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSpaceBatch(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSpaceBatch(flatbuffers::FlatBufferBuilder &_fbb, const SpaceBatchT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceBatchT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _blockShape = _o->blockShape ? CreateBlob(_fbb, _o->blockShape.get(), _rehasher) : 0; - auto _padding = _o->padding ? CreateBlob(_fbb, _o->padding.get(), _rehasher) : 0; - return MNN::CreateSpaceBatch( - _fbb, - _blockShape, - _padding); -} - -inline MatMulT *MatMul::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MatMulT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void MatMul::UnPackTo(MatMulT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = T(); _o->T = _e; }; - { auto _e = transposeA(); _o->transposeA = _e; }; - { auto _e = transposeB(); _o->transposeB = _e; }; - { auto _e = weight(); if (_e) { _o->weight.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->weight[_i] = _e->Get(_i); } } }; - { auto _e = bias(); if (_e) { _o->bias.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->bias[_i] = _e->Get(_i); } } }; -} - -inline flatbuffers::Offset MatMul::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatMulT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMatMul(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMatMul(flatbuffers::FlatBufferBuilder &_fbb, const MatMulT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatMulT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _T = _o->T; - auto _transposeA = _o->transposeA; - auto _transposeB = _o->transposeB; - auto _weight = _o->weight.size() ? _fbb.CreateVector(_o->weight) : 0; - auto _bias = _o->bias.size() ? _fbb.CreateVector(_o->bias) : 0; - return MNN::CreateMatMul( - _fbb, - _T, - _transposeA, - _transposeB, - _weight, - _bias); -} - -inline MomentsParamT *MomentsParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MomentsParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void MomentsParam::UnPackTo(MomentsParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = dim(); if (_e) { _o->dim.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dim[_i] = _e->Get(_i); } } }; - { auto _e = keepDims(); _o->keepDims = _e; }; - { auto _e = dType(); _o->dType = _e; }; -} - -inline flatbuffers::Offset MomentsParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MomentsParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMomentsParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMomentsParam(flatbuffers::FlatBufferBuilder &_fbb, const MomentsParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MomentsParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _dim = _o->dim.size() ? _fbb.CreateVector(_o->dim) : 0; - auto _keepDims = _o->keepDims; - auto _dType = _o->dType; - return MNN::CreateMomentsParam( - _fbb, - _dim, - _keepDims, - _dType); -} - -inline RNNParamT *RNNParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RNNParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void RNNParam::UnPackTo(RNNParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = numUnits(); _o->numUnits = _e; }; - { auto _e = isBidirectionalRNN(); _o->isBidirectionalRNN = _e; }; - { auto _e = keepAllOutputs(); _o->keepAllOutputs = _e; }; - { auto _e = fwGateWeight(); if (_e) _o->fwGateWeight = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = fwGateBias(); if (_e) _o->fwGateBias = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = fwCandidateWeight(); if (_e) _o->fwCandidateWeight = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = fwCandidateBias(); if (_e) _o->fwCandidateBias = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = bwGateWeight(); if (_e) _o->bwGateWeight = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = bwGateBias(); if (_e) _o->bwGateBias = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = bwCandidateWeight(); if (_e) _o->bwCandidateWeight = std::unique_ptr(_e->UnPack(_resolver)); }; - { auto _e = bwCandidateBias(); if (_e) _o->bwCandidateBias = std::unique_ptr(_e->UnPack(_resolver)); }; -} - -inline flatbuffers::Offset RNNParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRNNParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRNNParam(flatbuffers::FlatBufferBuilder &_fbb, const RNNParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RNNParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _numUnits = _o->numUnits; - auto _isBidirectionalRNN = _o->isBidirectionalRNN; - auto _keepAllOutputs = _o->keepAllOutputs; - auto _fwGateWeight = _o->fwGateWeight ? CreateBlob(_fbb, _o->fwGateWeight.get(), _rehasher) : 0; - auto _fwGateBias = _o->fwGateBias ? CreateBlob(_fbb, _o->fwGateBias.get(), _rehasher) : 0; - auto _fwCandidateWeight = _o->fwCandidateWeight ? CreateBlob(_fbb, _o->fwCandidateWeight.get(), _rehasher) : 0; - auto _fwCandidateBias = _o->fwCandidateBias ? CreateBlob(_fbb, _o->fwCandidateBias.get(), _rehasher) : 0; - auto _bwGateWeight = _o->bwGateWeight ? CreateBlob(_fbb, _o->bwGateWeight.get(), _rehasher) : 0; - auto _bwGateBias = _o->bwGateBias ? CreateBlob(_fbb, _o->bwGateBias.get(), _rehasher) : 0; - auto _bwCandidateWeight = _o->bwCandidateWeight ? CreateBlob(_fbb, _o->bwCandidateWeight.get(), _rehasher) : 0; - auto _bwCandidateBias = _o->bwCandidateBias ? CreateBlob(_fbb, _o->bwCandidateBias.get(), _rehasher) : 0; - return MNN::CreateRNNParam( - _fbb, - _numUnits, - _isBidirectionalRNN, - _keepAllOutputs, - _fwGateWeight, - _fwGateBias, - _fwCandidateWeight, - _fwCandidateBias, - _bwGateWeight, - _bwGateBias, - _bwCandidateWeight, - _bwCandidateBias); -} - -inline BatchMatMulParamT *BatchMatMulParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BatchMatMulParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BatchMatMulParam::UnPackTo(BatchMatMulParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = adjX(); _o->adjX = _e; }; - { auto _e = adjY(); _o->adjY = _e; }; -} - -inline flatbuffers::Offset BatchMatMulParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBatchMatMulParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBatchMatMulParam(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchMatMulParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _adjX = _o->adjX; - auto _adjY = _o->adjY; - return MNN::CreateBatchMatMulParam( - _fbb, - _adjX, - _adjY); -} - -inline DepthSpaceParamT *DepthSpaceParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DepthSpaceParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DepthSpaceParam::UnPackTo(DepthSpaceParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = blockSize(); _o->blockSize = _e; }; -} - -inline flatbuffers::Offset DepthSpaceParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthSpaceParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDepthSpaceParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDepthSpaceParam(flatbuffers::FlatBufferBuilder &_fbb, const DepthSpaceParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthSpaceParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _blockSize = _o->blockSize; - return MNN::CreateDepthSpaceParam( - _fbb, - _blockSize); -} - -inline ReverseSequenceParamT *ReverseSequenceParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReverseSequenceParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ReverseSequenceParam::UnPackTo(ReverseSequenceParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = batchDim(); _o->batchDim = _e; }; - { auto _e = seqDim(); _o->seqDim = _e; }; -} - -inline flatbuffers::Offset ReverseSequenceParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReverseSequenceParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReverseSequenceParam(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseSequenceParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _batchDim = _o->batchDim; - auto _seqDim = _o->seqDim; - return MNN::CreateReverseSequenceParam( - _fbb, - _batchDim, - _seqDim); -} - -inline DetectionPostProcessParamT *DetectionPostProcessParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DetectionPostProcessParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DetectionPostProcessParam::UnPackTo(DetectionPostProcessParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = maxDetections(); _o->maxDetections = _e; }; - { auto _e = maxClassesPerDetection(); _o->maxClassesPerDetection = _e; }; - { auto _e = detectionsPerClass(); _o->detectionsPerClass = _e; }; - { auto _e = nmsScoreThreshold(); _o->nmsScoreThreshold = _e; }; - { auto _e = iouThreshold(); _o->iouThreshold = _e; }; - { auto _e = numClasses(); _o->numClasses = _e; }; - { auto _e = useRegularNMS(); _o->useRegularNMS = _e; }; - { auto _e = centerSizeEncoding(); if (_e) { _o->centerSizeEncoding.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->centerSizeEncoding[_i] = _e->Get(_i); } } }; -} - -inline flatbuffers::Offset DetectionPostProcessParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DetectionPostProcessParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDetectionPostProcessParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDetectionPostProcessParam(flatbuffers::FlatBufferBuilder &_fbb, const DetectionPostProcessParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DetectionPostProcessParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _maxDetections = _o->maxDetections; - auto _maxClassesPerDetection = _o->maxClassesPerDetection; - auto _detectionsPerClass = _o->detectionsPerClass; - auto _nmsScoreThreshold = _o->nmsScoreThreshold; - auto _iouThreshold = _o->iouThreshold; - auto _numClasses = _o->numClasses; - auto _useRegularNMS = _o->useRegularNMS; - auto _centerSizeEncoding = _o->centerSizeEncoding.size() ? _fbb.CreateVector(_o->centerSizeEncoding) : 0; - return MNN::CreateDetectionPostProcessParam( - _fbb, - _maxDetections, - _maxClassesPerDetection, - _detectionsPerClass, - _nmsScoreThreshold, - _iouThreshold, - _numClasses, - _useRegularNMS, - _centerSizeEncoding); -} - -inline OneHotParamT *OneHotParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OneHotParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void OneHotParam::UnPackTo(OneHotParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = dType(); _o->dType = _e; }; - { auto _e = axis(); _o->axis = _e; }; -} - -inline flatbuffers::Offset OneHotParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOneHotParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateOneHotParam(flatbuffers::FlatBufferBuilder &_fbb, const OneHotParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OneHotParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _dType = _o->dType; - auto _axis = _o->axis; - return MNN::CreateOneHotParam( - _fbb, - _dType, - _axis); -} - -inline PadParamT *PadParam::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PadParamT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void PadParam::UnPackTo(PadParamT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = mode(); _o->mode = _e; }; -} - -inline flatbuffers::Offset PadParam::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadParamT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePadParam(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePadParam(flatbuffers::FlatBufferBuilder &_fbb, const PadParamT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadParamT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _mode = _o->mode; - return MNN::CreatePadParam( - _fbb, - _mode); -} - -inline const flatbuffers::TypeTable *BinaryOpOperationTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - BinaryOpOperationTypeTable - }; - static const int64_t values[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 19, 20, 21, 22 }; - static const char * const names[] = { - "ADD", - "SUB", - "MUL", - "DIV", - "MAX_TEMP", - "MIN_TEMP", - "POW", - "REALDIV", - "MINIMUM", - "MAXIMUM", - "GREATER", - "GREATER_EQUAL", - "LESS", - "FLOORDIV", - "SquaredDifference", - "EQUAL", - "LESS_EQUAL", - "FLOORMOD", - "MOD", - "ATAN2", - "LOGICALOR", - "NOTEQUAL" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 22, type_codes, type_refs, values, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ReductionTypeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ReductionTypeTypeTable - }; - static const char * const names[] = { - "SUM", - "ASUM", - "SUMSQ", - "MEAN", - "MAXIMUM", - "MINIMUM", - "PROD", - "ANY", - "ALL" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 9, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *UnaryOpOperationTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - UnaryOpOperationTypeTable - }; - static const char * const names[] = { - "ABS", - "NEG", - "FLOOR", - "CEIL", - "SQUARE", - "SQRT", - "RSQRT", - "EXP", - "LOG", - "SIN", - "COS", - "TAN", - "ASIN", - "ACOS", - "ATAN", - "RECIPROCAL", - "LOG1P", - "BNLL", - "ACOSH", - "SINH", - "ASINH", - "ATANH", - "SIGN", - "ROUND", - "COSH", - "ERF", - "ERFC", - "ERFINV", - "EXPM1" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 29, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *CropAndResizeMethodTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - CropAndResizeMethodTypeTable - }; - static const char * const names[] = { - "BILINEAR", - "NEAREST" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *PadValueModeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PadValueModeTypeTable - }; - static const char * const names[] = { - "CONSTANT", - "REFLECT", - "SYMMETRIC" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *BinaryOpTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "opType", - "T" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *PackParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "dataType", - "axis" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *StridedSliceParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "Index", - "T", - "beginMask", - "endMask", - "ellipsisMask", - "newAxisMask", - "shrinkAxisMask" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 7, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *SqueezeParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 } - }; - static const char * const names[] = { - "squeezeDims" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *CastParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "srcT", - "dstT" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ReductionParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_INT, 0, 1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - ReductionTypeTypeTable, - DataTypeTypeTable - }; - static const char * const names[] = { - "operation", - "dim", - "coeff", - "keepDims", - "dType" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *GatherTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "Tindices", - "Tparams", - "validateIndices", - "axis" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 4, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ExpandDimsTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "T", - "Tdim", - "axis" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *SeluTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 } - }; - static const char * const names[] = { - "scale", - "alpha" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *AsStringTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_STRING, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "T", - "precision", - "scientific", - "shortest", - "width", - "fillString" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 6, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ReduceJoinTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_STRING, 0, -1 } - }; - static const char * const names[] = { - "keepDims", - "separator" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *UnaryOpTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - UnaryOpOperationTypeTable, - DataTypeTypeTable - }; - static const char * const names[] = { - "opType", - "T" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *TopKV2TypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "T", - "sorted" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *CropAndResizeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - CropAndResizeMethodTypeTable - }; - static const char * const names[] = { - "extrapolationValue", - "method" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *FillTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; -} - -inline const flatbuffers::TypeTable *GatherV2TypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "Taxis", - "Tindices", - "Tparams" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *NonMaxSuppressionV2TypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; -} - -inline const flatbuffers::TypeTable *RangeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "Tidx" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *RankTypeTable() { - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 0, nullptr, nullptr, nullptr, nullptr - }; - return &tt; -} - -inline const flatbuffers::TypeTable *SizeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "outputDataType" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *TransposeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "Tperm" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *SliceTfTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "T" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *QuantizeMaxMinTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "T" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *CropTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 1, -1 } - }; - static const char * const names[] = { - "axis", - "offset" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *SpaceBatchTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - BlobTypeTable - }; - static const char * const names[] = { - "blockShape", - "padding" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *MatMulTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "T", - "transposeA", - "transposeB", - "weight", - "bias" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 5, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *MomentsParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 1, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "dim", - "keepDims", - "dType" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 3, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *RNNParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 }, - { flatbuffers::ET_SEQUENCE, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - BlobTypeTable - }; - static const char * const names[] = { - "numUnits", - "isBidirectionalRNN", - "keepAllOutputs", - "fwGateWeight", - "fwGateBias", - "fwCandidateWeight", - "fwCandidateBias", - "bwGateWeight", - "bwGateBias", - "bwCandidateWeight", - "bwCandidateBias" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 11, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *BatchMatMulParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 } - }; - static const char * const names[] = { - "adjX", - "adjY" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *DepthSpaceParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 } - }; - static const char * const names[] = { - "blockSize" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *ReverseSequenceParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const char * const names[] = { - "batchDim", - "seqDim" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *DetectionPostProcessParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_FLOAT, 0, -1 }, - { flatbuffers::ET_INT, 0, -1 }, - { flatbuffers::ET_BOOL, 0, -1 }, - { flatbuffers::ET_FLOAT, 1, -1 } - }; - static const char * const names[] = { - "maxDetections", - "maxClassesPerDetection", - "detectionsPerClass", - "nmsScoreThreshold", - "iouThreshold", - "numClasses", - "useRegularNMS", - "centerSizeEncoding" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 8, type_codes, nullptr, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *OneHotParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, -1 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "dType", - "axis" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *PadParamTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - PadValueModeTypeTable - }; - static const char * const names[] = { - "mode" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 1, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -} // namespace MNN - -#endif // FLATBUFFERS_GENERATED_TENSORFLOWOP_MNN_H_ diff --git a/schema/current/Type_generated.h b/schema/current/Type_generated.h deleted file mode 100644 index f444072f..00000000 --- a/schema/current/Type_generated.h +++ /dev/null @@ -1,219 +0,0 @@ -// automatically generated by the FlatBuffers compiler, do not modify - - -#ifndef FLATBUFFERS_GENERATED_TYPE_MNN_H_ -#define FLATBUFFERS_GENERATED_TYPE_MNN_H_ - -#include "flatbuffers/flatbuffers.h" - -namespace MNN { - -enum NetSource { - NetSource_CAFFE = 0, - NetSource_TENSORFLOW = 1, - NetSource_TFLITE = 2, - NetSource_ONNX = 3, - NetSource_MIN = NetSource_CAFFE, - NetSource_MAX = NetSource_ONNX -}; - -inline const NetSource (&EnumValuesNetSource())[4] { - static const NetSource values[] = { - NetSource_CAFFE, - NetSource_TENSORFLOW, - NetSource_TFLITE, - NetSource_ONNX - }; - return values; -} - -inline const char * const *EnumNamesNetSource() { - static const char * const names[] = { - "CAFFE", - "TENSORFLOW", - "TFLITE", - "ONNX", - nullptr - }; - return names; -} - -inline const char *EnumNameNetSource(NetSource e) { - if (e < NetSource_CAFFE || e > NetSource_ONNX) return ""; - const size_t index = static_cast(e); - return EnumNamesNetSource()[index]; -} - -enum DataType { - DataType_DT_INVALID = 0, - DataType_DT_FLOAT = 1, - DataType_DT_DOUBLE = 2, - DataType_DT_INT32 = 3, - DataType_DT_UINT8 = 4, - DataType_DT_INT16 = 5, - DataType_DT_INT8 = 6, - DataType_DT_STRING = 7, - DataType_DT_COMPLEX64 = 8, - DataType_DT_INT64 = 9, - DataType_DT_BOOL = 10, - DataType_DT_QINT8 = 11, - DataType_DT_QUINT8 = 12, - DataType_DT_QINT32 = 13, - DataType_DT_BFLOAT16 = 14, - DataType_DT_QINT16 = 15, - DataType_DT_QUINT16 = 16, - DataType_DT_UINT16 = 17, - DataType_DT_COMPLEX128 = 18, - DataType_DT_HALF = 19, - DataType_DT_RESOURCE = 20, - DataType_DT_VARIANT = 21, - DataType_MIN = DataType_DT_INVALID, - DataType_MAX = DataType_DT_VARIANT -}; - -inline const DataType (&EnumValuesDataType())[22] { - static const DataType values[] = { - DataType_DT_INVALID, - DataType_DT_FLOAT, - DataType_DT_DOUBLE, - DataType_DT_INT32, - DataType_DT_UINT8, - DataType_DT_INT16, - DataType_DT_INT8, - DataType_DT_STRING, - DataType_DT_COMPLEX64, - DataType_DT_INT64, - DataType_DT_BOOL, - DataType_DT_QINT8, - DataType_DT_QUINT8, - DataType_DT_QINT32, - DataType_DT_BFLOAT16, - DataType_DT_QINT16, - DataType_DT_QUINT16, - DataType_DT_UINT16, - DataType_DT_COMPLEX128, - DataType_DT_HALF, - DataType_DT_RESOURCE, - DataType_DT_VARIANT - }; - return values; -} - -inline const char * const *EnumNamesDataType() { - static const char * const names[] = { - "DT_INVALID", - "DT_FLOAT", - "DT_DOUBLE", - "DT_INT32", - "DT_UINT8", - "DT_INT16", - "DT_INT8", - "DT_STRING", - "DT_COMPLEX64", - "DT_INT64", - "DT_BOOL", - "DT_QINT8", - "DT_QUINT8", - "DT_QINT32", - "DT_BFLOAT16", - "DT_QINT16", - "DT_QUINT16", - "DT_UINT16", - "DT_COMPLEX128", - "DT_HALF", - "DT_RESOURCE", - "DT_VARIANT", - nullptr - }; - return names; -} - -inline const char *EnumNameDataType(DataType e) { - if (e < DataType_DT_INVALID || e > DataType_DT_VARIANT) return ""; - const size_t index = static_cast(e); - return EnumNamesDataType()[index]; -} - -inline const flatbuffers::TypeTable *NetSourceTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - NetSourceTypeTable - }; - static const char * const names[] = { - "CAFFE", - "TENSORFLOW", - "TFLITE", - "ONNX" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 4, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -inline const flatbuffers::TypeTable *DataTypeTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 }, - { flatbuffers::ET_INT, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - DataTypeTypeTable - }; - static const char * const names[] = { - "DT_INVALID", - "DT_FLOAT", - "DT_DOUBLE", - "DT_INT32", - "DT_UINT8", - "DT_INT16", - "DT_INT8", - "DT_STRING", - "DT_COMPLEX64", - "DT_INT64", - "DT_BOOL", - "DT_QINT8", - "DT_QUINT8", - "DT_QINT32", - "DT_BFLOAT16", - "DT_QINT16", - "DT_QUINT16", - "DT_UINT16", - "DT_COMPLEX128", - "DT_HALF", - "DT_RESOURCE", - "DT_VARIANT" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_ENUM, 22, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -} // namespace MNN - -#endif // FLATBUFFERS_GENERATED_TYPE_MNN_H_ diff --git a/schema/current/UserDefine_generated.h b/schema/current/UserDefine_generated.h deleted file mode 100644 index 7935cf21..00000000 --- a/schema/current/UserDefine_generated.h +++ /dev/null @@ -1,136 +0,0 @@ -// automatically generated by the FlatBuffers compiler, do not modify - - -#ifndef FLATBUFFERS_GENERATED_USERDEFINE_MNN_H_ -#define FLATBUFFERS_GENERATED_USERDEFINE_MNN_H_ - - -#include "Tensor_generated.h" -#include "Type_generated.h" - -namespace MNN { - -struct TensorConvertInfo; -struct TensorConvertInfoT; - -inline const flatbuffers::TypeTable *TensorConvertInfoTypeTable(); - -struct TensorConvertInfoT : public flatbuffers::NativeTable { - typedef TensorConvertInfo TableType; - MNN_DATA_FORMAT source; - MNN_DATA_FORMAT dest; - TensorConvertInfoT() - : source(MNN_DATA_FORMAT_NCHW), - dest(MNN_DATA_FORMAT_NCHW) { - } -}; - -struct TensorConvertInfo FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TensorConvertInfoT NativeTableType; - static const flatbuffers::TypeTable *MiniReflectTypeTable() { - return TensorConvertInfoTypeTable(); - } - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SOURCE = 4, - VT_DEST = 6 - }; - MNN_DATA_FORMAT source() const { - return static_cast(GetField(VT_SOURCE, 0)); - } - MNN_DATA_FORMAT dest() const { - return static_cast(GetField(VT_DEST, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SOURCE) && - VerifyField(verifier, VT_DEST) && - verifier.EndTable(); - } - TensorConvertInfoT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TensorConvertInfoT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorConvertInfoT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TensorConvertInfoBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_source(MNN_DATA_FORMAT source) { - fbb_.AddElement(TensorConvertInfo::VT_SOURCE, static_cast(source), 0); - } - void add_dest(MNN_DATA_FORMAT dest) { - fbb_.AddElement(TensorConvertInfo::VT_DEST, static_cast(dest), 0); - } - explicit TensorConvertInfoBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TensorConvertInfoBuilder &operator=(const TensorConvertInfoBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTensorConvertInfo( - flatbuffers::FlatBufferBuilder &_fbb, - MNN_DATA_FORMAT source = MNN_DATA_FORMAT_NCHW, - MNN_DATA_FORMAT dest = MNN_DATA_FORMAT_NCHW) { - TensorConvertInfoBuilder builder_(_fbb); - builder_.add_dest(dest); - builder_.add_source(source); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTensorConvertInfo(flatbuffers::FlatBufferBuilder &_fbb, const TensorConvertInfoT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -inline TensorConvertInfoT *TensorConvertInfo::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TensorConvertInfoT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TensorConvertInfo::UnPackTo(TensorConvertInfoT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = source(); _o->source = _e; }; - { auto _e = dest(); _o->dest = _e; }; -} - -inline flatbuffers::Offset TensorConvertInfo::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorConvertInfoT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTensorConvertInfo(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTensorConvertInfo(flatbuffers::FlatBufferBuilder &_fbb, const TensorConvertInfoT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorConvertInfoT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _source = _o->source; - auto _dest = _o->dest; - return MNN::CreateTensorConvertInfo( - _fbb, - _source, - _dest); -} - -inline const flatbuffers::TypeTable *TensorConvertInfoTypeTable() { - static const flatbuffers::TypeCode type_codes[] = { - { flatbuffers::ET_CHAR, 0, 0 }, - { flatbuffers::ET_CHAR, 0, 0 } - }; - static const flatbuffers::TypeFunction type_refs[] = { - MNN_DATA_FORMATTypeTable - }; - static const char * const names[] = { - "source", - "dest" - }; - static const flatbuffers::TypeTable tt = { - flatbuffers::ST_TABLE, 2, type_codes, type_refs, nullptr, names - }; - return &tt; -} - -} // namespace MNN - -#endif // FLATBUFFERS_GENERATED_USERDEFINE_MNN_H_ diff --git a/schema/default/CaffeOp.fbs b/schema/default/CaffeOp.fbs index c120b48d..abfd977a 100644 --- a/schema/default/CaffeOp.fbs +++ b/schema/default/CaffeOp.fbs @@ -263,6 +263,7 @@ table Interp { outputHeight:int; resizeType:int; alignCorners:bool; + halfPixelCenters:bool = false; } table Resize { diff --git a/schema/default/MNN.fbs b/schema/default/MNN.fbs index 5412f737..2dbd080e 100644 --- a/schema/default/MNN.fbs +++ b/schema/default/MNN.fbs @@ -157,6 +157,9 @@ enum OpType : int { TrainableParam, BatchNorm, + // Use for self defined grad + ZeroGrad, + Extra = 512, // quantization ConvInt8 = 513, diff --git a/source/backend/cpu/CPUBackend.cpp b/source/backend/cpu/CPUBackend.cpp index b000dad1..f1a38f7f 100644 --- a/source/backend/cpu/CPUBackend.cpp +++ b/source/backend/cpu/CPUBackend.cpp @@ -132,7 +132,7 @@ bool CPUBackend::onAcquireBuffer(const MNN::Tensor* nativeTensorConst, StorageTy } switch (storageType) { case STATIC: { - buffer.host = (uint8_t*)mStaticAllocator->alloc(size, true); + buffer.host = (uint8_t*)mStaticAllocator->alloc(size, false); break; } case DYNAMIC: { @@ -164,7 +164,7 @@ bool CPUBackend::onReleaseBuffer(const MNN::Tensor* nativeTensor, StorageType st return false; } if (STATIC == storageType) { - mStaticAllocator->free(nativeTensor->buffer().host, true); + mStaticAllocator->free(nativeTensor->buffer().host); return true; } if (DYNAMIC_SEPERATE == storageType) { @@ -262,11 +262,13 @@ Execution* CPUBackend::onCreate(const std::vector& inputs, const std::v } bool CPUBackend::onAllocateBuffer() { + mStaticAllocator->release(false); return true; } bool CPUBackend::onClearBuffer() { - mDynamicAllocator->release(); + mDynamicAllocator->release(true); + mStaticAllocator->release(false); return true; } diff --git a/source/backend/cpu/CPUBinary.cpp b/source/backend/cpu/CPUBinary.cpp index 4f9e0e5e..3023adf2 100644 --- a/source/backend/cpu/CPUBinary.cpp +++ b/source/backend/cpu/CPUBinary.cpp @@ -6,13 +6,15 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "backend/cpu/CPUBinary.hpp" +#include "CPUBinary.hpp" #include #include -#include "backend/cpu/CPUBackend.hpp" -#include "backend/cpu/compute/CommonOptFunction.h" +#include "CPUBackend.hpp" +#include "compute/CommonOptFunction.h" +#include "compute/ConvOpt.h" #include "core/Macro.h" -#include "backend/cpu/CPUEltwise.hpp" +#include "core/Concurrency.h" +#include "CPUEltwise.hpp" namespace MNN { template @@ -25,23 +27,37 @@ ErrorCode CPUBinary::onResize(const std::vector& inputs, const std:: MNN_ASSERT(1 == outputs.size()); const int input0DataCount = inputs[0]->elementSize(); const int input1DataCount = inputs[1]->elementSize(); - mEltWise = nullptr; - if (input0DataCount == input1DataCount && outputs[0]->getType().code == halide_type_float && input1DataCount >= 4) { - switch (mType) { - case BinaryOpOperation_ADD: - mEltWise.reset(new CPUEltwise(backend(), EltwiseType_SUM, {})); - break; - case BinaryOpOperation_MAXIMUM: - mEltWise.reset(new CPUEltwise(backend(), EltwiseType_MAXIMUM, {})); - break; - case BinaryOpOperation_SUB: - mEltWise.reset(new CPUEltwise(backend(), EltwiseType_SUB, {})); - break; - case BinaryOpOperation_MUL: - mEltWise.reset(new CPUEltwise(backend(), EltwiseType_PROD, {})); - break; - default: - break; + mElementProc = nullptr; + mSupportScale = false; + int maxCount = input0DataCount > input1DataCount ? input0DataCount : input1DataCount; + if (outputs[0]->getType().code == halide_type_float && maxCount >= 4) { + if (input1DataCount == input0DataCount) { + switch (mType) { + case BinaryOpOperation_MUL: + mElementProc = MNNMatrixProdCommon; + break; + case BinaryOpOperation_ADD: + mElementProc = MNNMatrixAddCommon; + break; + case BinaryOpOperation_MAXIMUM: + mElementProc = MNNMatrixMaxCommon; + break; + case BinaryOpOperation_SUB: + mElementProc = MNNMatrixSubCommon; + break; + default: + break; + } + } else if (input1DataCount == 1 || input0DataCount == 1) { + switch (mType) { + case BinaryOpOperation_MUL: + case BinaryOpOperation_ADD: + case BinaryOpOperation_SUB: + mSupportScale = true; + break; + default: + break; + } } } return NO_ERROR; @@ -262,12 +278,84 @@ struct BinaryNotEqual : std::binary_function<_Arg1, _Arg2, _ErrorCode> { template ErrorCode CPUBinary::onExecute(const std::vector& inputs, const std::vector& outputs) { - if (nullptr != mEltWise.get()) { - return mEltWise->onExecute(inputs, outputs); - } auto input = inputs[0]; auto input1 = inputs[1]; auto output = outputs[0]; + + if (nullptr != mElementProc || mSupportScale) { + auto numberThread = ((CPUBackend*)backend())->threadNumber(); + auto i1Size = input->elementSize(); + auto i2Size = input1->elementSize(); + auto size = i1Size; + if (size == 1) { + size = i2Size; + } + int sizeDivide = size / numberThread; + sizeDivide = UP_DIV(sizeDivide, 4) * 4; + int scheduleNumber = 1; + if (sizeDivide > 0) { + scheduleNumber = UP_DIV(size, sizeDivide); + } + if (nullptr != mElementProc) { + MNN_CONCURRENCY_BEGIN(tId, scheduleNumber) { + int start = sizeDivide * (int)tId; + int realSize = sizeDivide; + if (tId == scheduleNumber -1 ) { + realSize = size - start; + } + if (realSize > 0) { + mElementProc(output->host() + start, input->host() + start, input1->host() + start, realSize, 0, 0, 0, 1); + } + } + MNN_CONCURRENCY_END(); + } else { + float scale; + float bias; + float scalar; + float* inputPtr; + if (i1Size == 1) { + scalar = input->host()[0]; + inputPtr = input1->host(); + } else { + scalar = input1->host()[0]; + inputPtr = input->host(); + } + switch (mType) { + case BinaryOpOperation_MUL: + scale = scalar; + bias = 0.0f; + break; + case BinaryOpOperation_ADD: + scale = 1.0f; + bias = scalar; + break; + case BinaryOpOperation_SUB: + if (1 == i2Size) { + scale = 1.0f; + bias = -scalar; + } else { + scale = -1.0f; + bias = scalar; + } + break; + default: + break; + } + + MNN_CONCURRENCY_BEGIN(tId, scheduleNumber) { + int start = sizeDivide * (int)tId; + int realSize = sizeDivide; + if (tId == scheduleNumber -1 ) { + realSize = size - start; + } + if (realSize > 0) { + MNNScaleAndAddBiasScalar(output->host() + start, inputPtr + start, bias, scale, realSize); + } + } + MNN_CONCURRENCY_END(); + } + return NO_ERROR; + } switch (mType) { case BinaryOpOperation_MUL: diff --git a/source/backend/cpu/CPUBinary.hpp b/source/backend/cpu/CPUBinary.hpp index 92436216..87b20302 100644 --- a/source/backend/cpu/CPUBinary.hpp +++ b/source/backend/cpu/CPUBinary.hpp @@ -23,7 +23,8 @@ public: protected: int32_t mType; - std::shared_ptr mEltWise; + void (*mElementProc)(float* C, const float* A, const float* B, size_t width, size_t cStride, size_t aStride, size_t bStride, size_t height) = nullptr; + bool mSupportScale = false; }; } // namespace MNN #endif /* CPUBinary_hpp */ diff --git a/source/backend/cpu/CPUCast.cpp b/source/backend/cpu/CPUCast.cpp index a8b9e7d4..f0216e40 100644 --- a/source/backend/cpu/CPUCast.cpp +++ b/source/backend/cpu/CPUCast.cpp @@ -114,6 +114,12 @@ Execution *CPUCastCreator::onCreate(const std::vector &inputs, const s if (dstT == MNN::DataType_DT_FLOAT && halide_type_of() == inputDataType) { return new CastDataType(backend); } + if (dstT == MNN::DataType_DT_FLOAT && halide_type_of() == inputDataType) { + return new CastDataType(backend); + } + if (dstT == MNN::DataType_DT_INT8 && halide_type_of() == inputDataType) { + return new CastDataType(backend); + } if (dstT == MNN::DataType_DT_INT32 && halide_type_of() == inputDataType) { return new CastDataType(backend); } diff --git a/source/backend/cpu/CPUInterp.cpp b/source/backend/cpu/CPUInterp.cpp index 79f44874..8e908789 100644 --- a/source/backend/cpu/CPUInterp.cpp +++ b/source/backend/cpu/CPUInterp.cpp @@ -22,12 +22,13 @@ static int CLAMP(int v, int min, int max) { return v; } -CPUInterp::CPUInterp(Backend *backend, float widthScale, float heightScale, int resizeType, bool AlignCorners) +CPUInterp::CPUInterp(Backend *backend, float widthScale, float heightScale, int resizeType, bool AlignCorners, bool halfPixelCenters) : CPUResizeCommon(backend), mWidthScale(widthScale), mHeightScale(heightScale), mResizeType(resizeType), - mAlignCorners(AlignCorners) { + mAlignCorners(AlignCorners), + mHalfPixelCenters(halfPixelCenters) { // nothing to do } @@ -88,7 +89,12 @@ ErrorCode CPUInterp::onResize(const std::vector &inputs, const std::ve // Compute Line Position for (int x = 0; x < outW; ++x) { - float srcX = x * xScaling; + float srcX; + if (mHalfPixelCenters) { + srcX = (x + 0.5) * xScaling - 0.5; + } else { + srcX = x * xScaling; + } int x1 = floor(srcX); float x2Factor = srcX - x1; @@ -111,7 +117,12 @@ ErrorCode CPUInterp::onResize(const std::vector &inputs, const std::ve auto _hFactor = mHeightFactor.host(); for (int y = 0; y < outH; ++y) { - float srcY = y * yScaling; + float srcY; + if (mHalfPixelCenters) { + srcY = (y + 0.5) * yScaling - 0.5; + } else { + srcY = y * yScaling; + } int y1 = floor(srcY); float y2Factor = srcY - y1; @@ -137,7 +148,7 @@ public: const MNN::Op *op, Backend *backend) const { auto interp = op->main_as_Interp(); return new CPUInterp(backend, interp->widthScale(), interp->heightScale(), interp->resizeType(), - interp->alignCorners()); + interp->alignCorners(), interp->halfPixelCenters()); } }; REGISTER_CPU_OP_CREATOR(CPUInterpCreator, OpType_Interp); diff --git a/source/backend/cpu/CPUInterp.hpp b/source/backend/cpu/CPUInterp.hpp index 9a3b1524..37b8b1e0 100644 --- a/source/backend/cpu/CPUInterp.hpp +++ b/source/backend/cpu/CPUInterp.hpp @@ -15,7 +15,7 @@ namespace MNN { class CPUInterp : public CPUResizeCommon { public: - CPUInterp(Backend *backend, float widthScale, float heightScale, int resizeType, bool AlignCorners); + CPUInterp(Backend *backend, float widthScale, float heightScale, int resizeType, bool AlignCorners, bool halfPixelCenters); virtual ~CPUInterp(); virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; virtual ErrorCode onResize(const std::vector &inputs, const std::vector &outputs) override; @@ -30,6 +30,7 @@ private: float mHeightScale; int mResizeType; // 1:near 2: bilinear 3: cubic bool mAlignCorners; + bool mHalfPixelCenters; }; } // namespace MNN diff --git a/source/backend/cpu/CPULSTM.cpp b/source/backend/cpu/CPULSTM.cpp index bc7d972c..3dede528 100644 --- a/source/backend/cpu/CPULSTM.cpp +++ b/source/backend/cpu/CPULSTM.cpp @@ -45,7 +45,7 @@ static void copyWeightAlignUp4x4(float* dst, const float* src, int numUnits, int } } if (w < numFeatures) { - for (int h = 0, inputIndex = w, ww; h < numUnits; ++h, inputIndex += numUnits) { + for (int h = 0, inputIndex = w, ww; h < numUnits; ++h, inputIndex += numFeatures) { for (ww = 0; ww < numFeatures - w; ++ww) { dstData[outputIndex++] = srcData[inputIndex + ww]; } diff --git a/source/backend/cpu/CPUOPRegister.cpp b/source/backend/cpu/CPUOPRegister.cpp index 653a5966..f5066b31 100644 --- a/source/backend/cpu/CPUOPRegister.cpp +++ b/source/backend/cpu/CPUOPRegister.cpp @@ -111,6 +111,7 @@ extern void ___CPUUnpackCreator__OpType_Unpack__(); extern void ___CPUUnravelIndexCreator__OpType_UnravelIndex__(); extern void ___CPUWhereCreator__OpType_Where__(); extern void ___CPUZeroLikeCreator__OpType_ZerosLike__(); +extern void ___CPUZeroLikeCreator__OpType_ZeroGrad__(); void registerCPUOps() { ___CPUArgMaxCreator__OpType_ArgMax__(); @@ -223,6 +224,7 @@ ___CPUUnpackCreator__OpType_Unpack__(); ___CPUUnravelIndexCreator__OpType_UnravelIndex__(); ___CPUWhereCreator__OpType_Where__(); ___CPUZeroLikeCreator__OpType_ZerosLike__(); +___CPUZeroLikeCreator__OpType_ZeroGrad__(); } #endif } diff --git a/source/backend/cpu/CPUOneHot.cpp b/source/backend/cpu/CPUOneHot.cpp index bc08a7fe..768de3dc 100644 --- a/source/backend/cpu/CPUOneHot.cpp +++ b/source/backend/cpu/CPUOneHot.cpp @@ -21,7 +21,12 @@ void OneHotImpl(int depth, int outerSize, int innerSize, const int* indices, con for (int i = 0; i < outerSize; ++i) { for (int j = 0; j < depth; ++j) { for (int k = 0; k < innerSize; ++k) { - *outputPtr = indices[i * innerSize + k] == j ? onValue : offValue; + auto index = indices[i * innerSize + k]; + if (index == j) { + *outputPtr = onValue; + } else { + *outputPtr = offValue; + } outputPtr++; } } diff --git a/source/backend/cpu/CPUPoolGrad.cpp b/source/backend/cpu/CPUPoolGrad.cpp index 1dc4ed98..e8f9c63a 100644 --- a/source/backend/cpu/CPUPoolGrad.cpp +++ b/source/backend/cpu/CPUPoolGrad.cpp @@ -9,6 +9,8 @@ #include "backend/cpu/CPUPoolGrad.hpp" #include "core/Macro.h" #include "math/Vec4.hpp" +#include "core/Concurrency.h" + namespace MNN { using namespace Math; class CPUMaxPoolGrad : public CPUCommonPoolGrad { @@ -30,16 +32,14 @@ public: auto channelC4 = UP_DIV(inputDiff->channel(), 4); auto batch = inputDiff->batch(); - for (int batchIndex = 0; batchIndex < batch; ++batchIndex) { - auto input0Ptr = origin->host() + batchIndex * origin->stride(0); - auto input1Ptr = inputDiff->host() + batchIndex * inputDiff->stride(0); - auto outputOriginPtr = outputOrigin->host() + batchIndex * outputOrigin->stride(0); - auto outputPtr = outputDiff->host() + batchIndex * outputDiff->stride(0); - for (int z = 0; z < channelC4; ++z) { - auto inputZ0 = input0Ptr + z * iw * ih * 4; - auto inputZ1 = input1Ptr + z * ow * oh * 4; - auto outputOriZ = outputOriginPtr + z * ow * oh * 4; - auto outputZ = outputPtr + z * iw * ih * 4; + auto totalChannelC4 = batch * channelC4; + auto threadNumber = ((CPUBackend*)(backend()))->threadNumber(); + MNN_CONCURRENCY_BEGIN(tId, threadNumber) { + for (int z = tId; z < totalChannelC4; z+=threadNumber) { + auto inputZ0 = origin->host() + z * iw * ih * 4; + auto inputZ1 = inputDiff->host() + z * ow * oh * 4; + auto outputOriZ = outputOrigin->host() + z * ow * oh * 4; + auto outputZ = outputDiff->host() + z * iw * ih * 4; ::memset(outputZ, 0, sizeof(float) * iw * ih * 4); for (int y = 0; y < oh; ++y) { @@ -70,7 +70,9 @@ public: } } } - } + }; + MNN_CONCURRENCY_END(); + return NO_ERROR; } }; @@ -92,12 +94,12 @@ public: auto channelC4 = UP_DIV(inputDiff->channel(), 4); auto batch = inputDiff->batch(); auto factor = Vec4(1.0f/((float)mKernelY*mKernelX)); - for (int batchIndex = 0; batchIndex < batch; ++batchIndex) { - auto input1Ptr = inputDiff->host() + batchIndex * inputDiff->stride(0); - auto outputPtr = outputDiff->host() + batchIndex * outputDiff->stride(0); - for (int z = 0; z < channelC4; ++z) { - auto inputZ1 = input1Ptr + z * ow * oh * 4; - auto outputZ = outputPtr + z * iw * ih * 4; + auto totalChannelC4 = batch * channelC4; + auto threadNumber = ((CPUBackend*)(backend()))->threadNumber(); + MNN_CONCURRENCY_BEGIN(tId, threadNumber) { + for (int z = tId; z < totalChannelC4; z+=threadNumber) { + auto inputZ1 = inputDiff->host() + z * ow * oh * 4; + auto outputZ = outputDiff->host() + z * iw * ih * 4; ::memset(outputZ, 0, sizeof(float) * iw * ih * 4); for (int y = 0; y < oh; ++y) { @@ -120,7 +122,8 @@ public: } } } - } + }; + MNN_CONCURRENCY_END(); return NO_ERROR; } }; diff --git a/source/backend/cpu/CPUReduction.cpp b/source/backend/cpu/CPUReduction.cpp index 6e5a5174..bdf1456c 100644 --- a/source/backend/cpu/CPUReduction.cpp +++ b/source/backend/cpu/CPUReduction.cpp @@ -20,7 +20,6 @@ class Reduction : public Execution { public: Reduction(Backend* backend, const Op* op) : Execution(backend) { auto reduct = op->main_as_ReductionParam(); - mdataType = reduct->dType(); if (nullptr == reduct->dim()) { return; @@ -54,11 +53,12 @@ public: virtual ErrorCode onExecute(const std::vector& inputs, const std::vector& outputs) override { auto input = inputs[0]; auto output = outputs[0]; + auto typeCode = input->getType().code; if (mAxis.empty()) { int size = (int)input->size() / input->buffer().type.bytes(); - if (MNN::DataType_DT_FLOAT == mdataType) { + if (halide_type_float == typeCode) { this->onReduce(input->host(), output->host(), 1, 1, size); - } else if (MNN::DataType_DT_INT32 == mdataType) { + } else if (halide_type_int == typeCode) { this->onReduce(input->host(), output->host(), 1, 1, size); } return NO_ERROR; @@ -122,7 +122,6 @@ protected: virtual void onReduce(const float* src, float* dst, int inside, int outside, int axis) const = 0; virtual void onReduce(const int32_t* src, int32_t* dst, int inside, int outsize, int axis) const = 0; std::vector mAxis; - MNN::DataType mdataType; std::vector> mMidBuffer; }; diff --git a/source/backend/cpu/CPUScale.cpp b/source/backend/cpu/CPUScale.cpp index 5b0b737d..ea88d094 100644 --- a/source/backend/cpu/CPUScale.cpp +++ b/source/backend/cpu/CPUScale.cpp @@ -6,48 +6,68 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "backend/cpu/CPUScale.hpp" -#include "backend/cpu/CPUBackend.hpp" -#include "backend/cpu/compute/CommonOptFunction.h" +#include "CPUScale.hpp" +#include "CPUBackend.hpp" +#include "compute/CommonOptFunction.h" #include "core/Macro.h" #include "core/TensorUtils.hpp" +#include "core/Concurrency.h" namespace MNN { CPUScale::CPUScale(const Op* op, Backend* bn) : MNN::Execution(bn) { auto scale = op->main_as_Scale(); int outputCount = scale->scaleData()->size(); - mScale.reset(ALIGN_UP4(outputCount)); - mScale.clear(); - ::memcpy(mScale.get(), scale->scaleData()->data(), outputCount * sizeof(float)); - - mBias.reset(ALIGN_UP4(outputCount)); - mBias.clear(); + mScaleBias.reset( + Tensor::createDevice( + {2, ALIGN_UP4(outputCount)} + )); + auto res = bn->onAcquireBuffer(mScaleBias.get(), Backend::STATIC); + if (!res) { + MNN_ERROR("Error for alloc buffer for CPUScale\n"); + mScaleBias = nullptr; + mValid = false; + return; + } + ::memset(mScaleBias->host(), 0, mScaleBias->size()); + ::memcpy(mScaleBias->host(), scale->scaleData()->data(), outputCount * sizeof(float)); if (nullptr != scale->biasData() && nullptr != scale->biasData()->data()) { - ::memcpy(mBias.get(), scale->biasData()->data(), outputCount * sizeof(float)); + ::memcpy(mScaleBias->host() + ALIGN_UP4(outputCount), scale->biasData()->data(), outputCount * sizeof(float)); + } +} +CPUScale::~CPUScale() { + if (nullptr != mScaleBias) { + backend()->onReleaseBuffer(mScaleBias.get(), Backend::STATIC); } } ErrorCode CPUScale::onExecute(const std::vector& inputs, const std::vector& outputs) { auto input = inputs[0]; auto output = outputs[0]; + auto scalePtr = mScaleBias->host(); + auto biasPtr = mScaleBias->host() + 1 * mScaleBias->length(1); if (TensorUtils::getDescribe(input)->dimensionFormat == MNN_DATA_FORMAT_NC4HW4) { - auto batchSize = input->buffer().dim[0].stride; auto batch = input->buffer().dim[0].extent; auto depthQuad = UP_DIV(input->channel(), 4); int planeNumber = 1; for (int i = 2; i < input->buffer().dimensions; ++i) { planeNumber *= input->length(i); } - for (int i = 0; i < batch; ++i) { - MNNScaleAndAddBias(output->host() + batchSize * i, input->host() + batchSize * i, mBias.get(), - mScale.get(), planeNumber, depthQuad); + auto depthStride = planeNumber * 4; + auto totalDepth = batch * depthQuad; + int numberThread = ((CPUBackend*)backend())->threadNumber(); + MNN_CONCURRENCY_BEGIN(tId, numberThread) { + for (int i = tId; i < totalDepth; i+=numberThread) { + MNNScaleAndAddBias(output->host() + depthStride * i, input->host() + depthStride * i, biasPtr + 4 * i, + scalePtr + 4 * i, planeNumber, 1); + } } + MNN_CONCURRENCY_END(); return NO_ERROR; } MNN_ASSERT(TensorUtils::getDescribe(input)->dimensionFormat == MNN_DATA_FORMAT_NHWC); auto channel = input->channel(); auto outside = input->elementSize() / channel; - MNNScaleAndAddBiasOutside(output->host(), input->host(), mBias.get(), mScale.get(), outside, channel); + MNNScaleAndAddBiasOutside(output->host(), input->host(), biasPtr, scalePtr, outside, channel); return NO_ERROR; } diff --git a/source/backend/cpu/CPUScale.hpp b/source/backend/cpu/CPUScale.hpp index 3c0eb908..fd29c2fa 100644 --- a/source/backend/cpu/CPUScale.hpp +++ b/source/backend/cpu/CPUScale.hpp @@ -9,19 +9,18 @@ #ifndef CPUScale_hpp #define CPUScale_hpp -#include "core/AutoStorage.h" +#include #include "core/Execution.hpp" namespace MNN { class CPUScale : public Execution { public: CPUScale(const Op *op, Backend *bn); - virtual ~CPUScale() = default; + virtual ~CPUScale(); virtual ErrorCode onExecute(const std::vector &inputs, const std::vector &outputs) override; private: - AutoStorage mScale; - AutoStorage mBias; + std::shared_ptr mScaleBias; }; } // namespace MNN diff --git a/source/backend/cpu/CPUUnary.cpp b/source/backend/cpu/CPUUnary.cpp index e3bb9e5b..c0c67fdc 100644 --- a/source/backend/cpu/CPUUnary.cpp +++ b/source/backend/cpu/CPUUnary.cpp @@ -10,6 +10,7 @@ #include #include "backend/cpu/CPUBackend.hpp" #include "core/Macro.h" +#include "core/Concurrency.h" #include #include @@ -26,18 +27,20 @@ ErrorCode CPUUnary::onResize(const std::vector &inputs, const std::vec } template -static ErrorCode _unaryOp(Tensor *input, Tensor *output) { +static ErrorCode _unaryOp(void* inputPtr, void* outputPtr, int elementSize, Backend* bn) { Func f; - - const T *inputData = input->host(); - T *outputData = (T *)output->buffer().host; - - auto elementSize = input->elementSize(); - - for (int i = 0; i < elementSize; i++) { - outputData[i] = f(inputData[i]); + auto backend = [bn]() { + return bn; + }; + const T *inputData = (T*)inputPtr; + T *outputData = (T *)outputPtr; + auto numberThread = ((CPUBackend*)bn)->threadNumber(); + MNN_CONCURRENCY_BEGIN(tId, numberThread) { + for (int i=tId; i &inputs, const std::ve if (dtype == halide_type_int) { switch (mType) { case UnaryOpOperation_ABS: - return _unaryOp, int32_t>(input, output); + return _unaryOp, int32_t>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_NEG: - return _unaryOp, int32_t>(input, output); + return _unaryOp, int32_t>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_SQUARE: - return _unaryOp, int32_t>(input, output); + return _unaryOp, int32_t>(input->host(), output->host(), input->elementSize(), backend()); default: MNN_ERROR("Int-Unary not support %d\n", mType); break; @@ -369,63 +372,63 @@ ErrorCode CPUUnary::onExecute(const std::vector &inputs, const std::ve } switch (mType) { case UnaryOpOperation_SQUARE: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_RSQRT: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_NEG: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_EXP: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_COS: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_SIN: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_TAN: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_ATAN: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_SQRT: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_ABS: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_CEIL: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_RECIPROCAL: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_LOG1P: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_LOG: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_FLOOR: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_BNLL: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_ACOSH: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_SINH: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_ASINH: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_ATANH: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_SIGN: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_ROUND: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_COSH: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_ERF: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_ERFC: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_ERFINV: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_EXPM1: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_ASIN: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); case UnaryOpOperation_ACOS: - return _unaryOp, float>(input, output); + return _unaryOp, float>(input->host(), output->host(), input->elementSize(), backend()); default: MNN_ASSERT(false); break; diff --git a/source/backend/cpu/CPUZeroLike.cpp b/source/backend/cpu/CPUZeroLike.cpp index cd58d0a6..2640b5aa 100644 --- a/source/backend/cpu/CPUZeroLike.cpp +++ b/source/backend/cpu/CPUZeroLike.cpp @@ -21,4 +21,5 @@ public: }; REGISTER_CPU_OP_CREATOR(CPUZeroLikeCreator, OpType_ZerosLike); +REGISTER_CPU_OP_CREATOR(CPUZeroLikeCreator, OpType_ZeroGrad); } // namespace MNN diff --git a/source/backend/cpu/compute/CommonOptFunction.cpp b/source/backend/cpu/compute/CommonOptFunction.cpp index ce975be7..1b649201 100644 --- a/source/backend/cpu/compute/CommonOptFunction.cpp +++ b/source/backend/cpu/compute/CommonOptFunction.cpp @@ -6,15 +6,17 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "backend/cpu/compute/CommonOptFunction.h" +#include "CommonOptFunction.h" #include #include #include "core/Macro.h" #include +#include "math/Vec4.hpp" #ifdef MNN_USE_NEON #include #endif #define UNIT 4 +using namespace MNN::Math; void MNNScaleAndAddBiasOutside(float* dst, const float* src, const float* bias, const float* alpha, size_t planeNumber, size_t biasNumber) { @@ -118,20 +120,17 @@ void MNNMinFloat(float* input, float* minBuffer, int32_t inputCountUnit) { } } } - void MNNScaleAndAddBias(float* dst, const float* src, const float* bias, const float* alpha, size_t planeNumber, size_t biasNumber) { for (int z = 0; z < biasNumber; ++z) { float* dstZ = dst + planeNumber * 4 * z; const float* srcZ = src + planeNumber * 4 * z; - const float* biasZ = bias + 4 * z; - const float* alphaZ = alpha + 4 * z; + auto biasZ = Vec4::load(bias + 4 * z); + auto alphaZ = Vec4::load(alpha + 4 * z); for (int p = 0; p < planeNumber; ++p) { float* dstX = dstZ + 4 * p; const float* srcX = srcZ + 4 * p; - for (int i = 0; i < 4; ++i) { - dstX[i] = srcX[i] * alphaZ[i] + biasZ[i]; - } + Vec4::save(dstX, (Vec4::load(srcX) * alphaZ) + biasZ); } } } @@ -644,3 +643,27 @@ void MNNReluWithSlope(float* dst, const float* src, size_t sizeQuad, float slope } MNNReluWithSlopeChannel(dst, src, slopeValue, sizeQuad, 1); } + +void MNNScaleAndAddBiasScalar(float* dst, const float* src, float bias, float alpha, size_t number) { + int numberC4 = (int)number / 4; + int start = 0; + if (numberC4 > 0) { + float biasC4[4] = { + bias, + bias, + bias, + bias + }; + float alphaC4[4] = { + alpha, + alpha, + alpha, + alpha + }; + MNNScaleAndAddBias(dst, src, biasC4, alphaC4, numberC4, 1); + start = numberC4 * 4; + } + for (int i=start; icontext()) { + return nullptr; + } + return bn; } }; void registerMetalBackendCreator() { - MNNInsertExtraBackendCreator(MNN_FORWARD_METAL, new MetalBackendCreator); + MNNInsertExtraBackendCreator(MNN_FORWARD_METAL, new MetalBackendCreator, true); } } // namespace MNN #else diff --git a/source/backend/metal/MetalSoftmax.mm b/source/backend/metal/MetalSoftmax.mm index 7cc321c6..bf78df60 100644 --- a/source/backend/metal/MetalSoftmax.mm +++ b/source/backend/metal/MetalSoftmax.mm @@ -85,8 +85,9 @@ ErrorCode MetalSoftmax::onExecute(const std::vector &inputs, const std class MetalSoftmaxCreator : public MetalBackend::Creator { public: virtual Execution *onCreate(const std::vector &inputs, const MNN::Op *op, Backend *backend) const { - auto softmax = op->main_as_Axis(); - return new MetalSoftmax(backend, softmax->axis()); + return nullptr; +// auto softmax = op->main_as_Axis(); +// return new MetalSoftmax(backend, softmax->axis()); } }; REGISTER_METAL_OP_CREATOR(MetalSoftmaxCreator, OpType_Softmax); diff --git a/source/backend/opencl/core/OpenCLBackend.cpp b/source/backend/opencl/core/OpenCLBackend.cpp index 9cc0c138..2bf97c86 100644 --- a/source/backend/opencl/core/OpenCLBackend.cpp +++ b/source/backend/opencl/core/OpenCLBackend.cpp @@ -172,7 +172,11 @@ Execution* OpenCLBackend::onCreate(const std::vector& inputs, const std auto creators = gCreator(); auto iter = creators->find(op->type()); if (iter == creators->end()) { - MNN_PRINT("Don't support type %d, %s\n", op->type(), op->name()->c_str()); + if (nullptr != op->name()) { + MNN_PRINT("Don't support type %s, %s\n", EnumNameOpType(op->type()), op->name()->c_str()); + } else { + MNN_PRINT("Don't support type %s\n", EnumNameOpType(op->type())); + } return NULL; } diff --git a/source/backend/opencl/core/runtime/OpenCLWrapper.cpp b/source/backend/opencl/core/runtime/OpenCLWrapper.cpp index 9646069e..371eb6a0 100644 --- a/source/backend/opencl/core/runtime/OpenCLWrapper.cpp +++ b/source/backend/opencl/core/runtime/OpenCLWrapper.cpp @@ -92,7 +92,7 @@ bool OpenCLSymbols::LoadLibraryFromPath(const std::string &library_path) { MNN_LOAD_FUNCTION_PTR(clReleaseKernel); MNN_LOAD_FUNCTION_PTR(clCreateProgramWithSource); MNN_LOAD_FUNCTION_PTR(clCreateBuffer); - MNN_LOAD_FUNCTION_PTR(clCreateImage); + //MNN_LOAD_FUNCTION_PTR(clCreateImage); MNN_LOAD_FUNCTION_PTR(clCreateImage2D); MNN_LOAD_FUNCTION_PTR(clRetainKernel); MNN_LOAD_FUNCTION_PTR(clCreateKernel); @@ -122,8 +122,8 @@ bool OpenCLSymbols::LoadLibraryFromPath(const std::string &library_path) { MNN_LOAD_FUNCTION_PTR(clReleaseMemObject); MNN_LOAD_FUNCTION_PTR(clGetDeviceInfo); MNN_LOAD_FUNCTION_PTR(clGetDeviceIDs); - MNN_LOAD_FUNCTION_PTR(clRetainDevice); - MNN_LOAD_FUNCTION_PTR(clReleaseDevice); + //MNN_LOAD_FUNCTION_PTR(clRetainDevice); + //MNN_LOAD_FUNCTION_PTR(clReleaseDevice); MNN_LOAD_FUNCTION_PTR(clRetainEvent); MNN_LOAD_FUNCTION_PTR(clGetKernelWorkGroupInfo); MNN_LOAD_FUNCTION_PTR(clGetEventInfo); diff --git a/source/backend/opengl/AllShader.c b/source/backend/opengl/AllShader.c deleted file mode 100644 index f98133ee..00000000 --- a/source/backend/opengl/AllShader.c +++ /dev/null @@ -1,1119 +0,0 @@ -#include "AllShader.hpp" -const char* glsl_convlutionDepthwise_glsl = -"layout(std430) buffer;\n" -"layout(FORMAT, binding=0) writeonly uniform mediump image3D uOutput;\n" -"layout(location=1) uniform mediump sampler3D uInput;\n" -"layout(location=2) uniform mediump sampler3D uKernel;\n" -"layout(binding=3) readonly buffer bias{\n" -" vec4 data[];\n" -"} uBias;\n" -"layout(location=4) uniform ivec2 uPad;\n" -"layout(location=5) uniform ivec2 uKernelSize;\n" -"layout(location=6) uniform ivec2 uStride;\n" -"layout(location=7) uniform ivec2 uDilate;\n" -"// layout(location=8) uniform ivec2 uOffset;\n" -"// layout(location=9) uniform float uReluRate;\n" -"layout(location=10) uniform ivec3 uOutputSize;\n" -"layout(location=11) uniform ivec3 uInputSize;\n" -"#define UP_DIV(x, y) (((x)+(y)-1)/(y))\n" -"layout (local_size_x = XLOCAL, local_size_y = YLOCAL, local_size_z = ZLOCAL) in;\n" -"void main()\n" -"{\n" -" ivec3 pos = ivec3(gl_GlobalInvocationID)*ivec3(1, 1, 1);\n" -" ivec3 outputSize = uOutputSize;\n" -" if (all(lessThan(pos, outputSize)))\n" -" {\n" -" int KSIZE_Y = uKernelSize.y;\n" -" int KSIZE_X = uKernelSize.x;\n" -" ivec3 inputSize = uInputSize;\n" -" ivec2 s0 = pos.xy*uStride-uPad;\n" -" int fx, fy, fz;\n" -" ivec2 sfxy = max(ivec2(0), (UP_DIV(-s0, uDilate)));\n" -" ivec2 efxy = min(uKernelSize, UP_DIV(inputSize.xy-s0, uDilate));\n" -" vec4 color = uBias.data[pos.z];\n" -" for (fy=sfxy.y; fy oc/4 ic/4 ky kx ic4 oc4\n" -"//kernel image : oc/4, ky * kx * ic/4 * ic4\n" -"layout (local_size_x = 4, local_size_y = 4, local_size_z = 1) in;\n" -"void main()\n" -"{\n" -" ivec3 pos = ivec3(gl_GlobalInvocationID);\n" -" if (pos.x < width && pos.y < height)\n" -" {\n" -" vec4 res = uKernel.data[pos.x+pos.y*width];\n" -" imageStore(uOutput, ivec2(pos.x, pos.y), res);\n" -" }\n" -"}\n" -; -const char* glsl_convolution1x1_glsl = -"layout(std430) buffer;\n" -"layout(FORMAT, binding=0) writeonly uniform PRECISION image3D uOutput;\n" -"layout(location=1) uniform mediump sampler3D uInput;\n" -"layout(location=2) uniform mediump sampler3D uKernel;\n" -"layout(binding=3) readonly buffer bias{\n" -" vec4 data[];\n" -"} uBias;\n" -"layout(location=8) uniform int uUnroll;\n" -"layout(location=10) uniform ivec3 uOutputSize;\n" -"layout(location=11) uniform ivec3 uInputSize;\n" -"#define UP_DIV(x, y) (((x)+(y)-1)/(y))\n" -"layout (local_size_x = XLOCAL, local_size_y = YLOCAL, local_size_z = ZLOCAL) in;\n" -"void main()\n" -"{\n" -" ivec3 outputSize = uOutputSize;\n" -" if (all(lessThan(ivec3(gl_GlobalInvocationID), outputSize)))\n" -" {\n" -" ivec3 pos = ivec3(gl_GlobalInvocationID)*ivec3(uUnroll, 1, 1);\n" -" ivec3 inputSize = uInputSize;\n" -" int sy = pos.y;\n" -" int sx = pos.x;\n" -" int fx, fy, fz;\n" -" vec4 color = uBias.data[pos.z];\n" -" vec4 color2 = color;\n" -" vec4 color3 = color;\n" -" vec4 color4 = color;\n" -" int kernelY = pos.z;\n" -" for (fz=0; fz oc/4, ic/4, ky kx ic4 oc4\n" -"layout (local_size_x = XLOCAL, local_size_y = YLOCAL, local_size_z = ZLOCAL) in;\n" -"void main()\n" -"{\n" -" if (all(lessThan(ivec3(gl_GlobalInvocationID), uOutputSize)))\n" -" {\n" -" ivec3 pos = ivec3(gl_GlobalInvocationID)*ivec3(uUnroll, 1, 1);\n" -" int kernelX = uKernelSize.x;\n" -" ivec3 inputSize = uInputSize;\n" -" ivec2 s0 = pos.xy*uStride-uPad;\n" -" int fx, fy, fz;\n" -" ivec2 sfxy = max(ivec2(0), (UP_DIV(-s0, uDilate)));\n" -" ivec2 efxy = min(uKernelSize, UP_DIV(inputSize.xy-s0, uDilate));\n" -" vec4 color = uBias.data[pos.z];\n" -" vec4 color2 = color;\n" -" vec4 color3 = color;\n" -" vec4 color4 = color;\n" -" int kernelY = pos.z;\n" -" for (fy=sfxy.y; fy= 0&& sx1 < inputSize.x ? 1.0 : 0.0;\n" -" float m2 = sx2 >= 0&& sx2 < inputSize.x ? 1.0 : 0.0;\n" -" float m3 = sx3 >= 0&& sx3 < inputSize.x ? 1.0 : 0.0;\n" -" float m4 = sx4 >= 0&& sx4 < inputSize.x ? 1.0 : 0.0;\n" -" fz = 0;\n" -" for (; fz oc/4, ic/4, ky kx ic4 oc4\n" -"//index : ky kx, oc/4, ic/4\n" -"//weight image : ky kx, oc/4, ic/4*ic4 oc4\n" -"void main()\n" -"{\n" -" ivec3 pos = ivec3(gl_GlobalInvocationID) * ivec3(4, 1, 1);\n" -" int kernelPos = 0\n" -" + pos.x * uFxFy\n" -" + 4*pos.y * uIc_4 * uFxFy\n" -" + 4*pos.z\n" -" ;\n" -" vec4 color0 = uKernel.data[kernelPos+0];\n" -" vec4 color1 = uKernel.data[kernelPos+1];\n" -" vec4 color2 = uKernel.data[kernelPos+2];\n" -" vec4 color3 = uKernel.data[kernelPos+3];\n" -" \n" -" imageStore(uOutput, ivec3(pos.x+0, pos.y, pos.z), color0);\n" -" imageStore(uOutput, ivec3(pos.x+1, pos.y, pos.z), color1);\n" -" imageStore(uOutput, ivec3(pos.x+2, pos.y, pos.z), color2);\n" -" imageStore(uOutput, ivec3(pos.x+3, pos.y, pos.z), color3);\n" -"}\n" -; -const char* glsl_binary_glsl = -"layout(FORMAT, binding=0) writeonly uniform PRECISION image3D uOutput;\n" -"layout(location=1) uniform mediump sampler3D uInput0;\n" -"layout(location=2) uniform mediump sampler3D uInput1;\n" -"layout(location=3) uniform ivec4 imgSize;\n" -"layout (local_size_x = XLOCAL, local_size_y = YLOCAL, local_size_z = ZLOCAL) in;\n" -"void main()\n" -"{\n" -" ivec3 pos = ivec3(gl_GlobalInvocationID);\n" -" ivec3 inSize = imgSize.xyz;\n" -" if(all(lessThan(pos, inSize)))\n" -" {\n" -"#ifdef ADD\n" -" vec4 sum = texelFetch(uInput0, pos, 0) + texelFetch(uInput1, pos, 0);\n" -"#endif\n" -"#ifdef MUL\n" -" vec4 sum = texelFetch(uInput0, pos, 0) * texelFetch(uInput1, pos, 0);\n" -"#endif\n" -"#ifdef SUB\n" -" vec4 sum = texelFetch(uInput0, pos, 0) - texelFetch(uInput1, pos, 0);\n" -"#endif\n" -"#ifdef REALDIV\n" -" vec4 sum = texelFetch(uInput0, pos, 0) / texelFetch(uInput1, pos, 0);\n" -"#endif\n" -" imageStore(uOutput, pos, sum);\n" -" }\n" -"}\n" -; -const char* glsl_relu_glsl = -"layout(FORMAT, binding=0) writeonly uniform PRECISION image3D uOutput;\n" -"layout(location=1) uniform mediump sampler3D uInput;\n" -"layout(location=2) uniform ivec4 imgSize;\n" -"layout(location=3) uniform float slope;\n" -"layout (local_size_x = XLOCAL, local_size_y = YLOCAL, local_size_z = ZLOCAL) in;\n" -"void main()\n" -"{\n" -" ivec3 pos = ivec3(gl_GlobalInvocationID);\n" -" ivec3 imgSize = imgSize.xyz;\n" -" if(pos.x < imgSize.x && pos.y < imgSize.y)\n" -" {\n" -" vec4 dataIn = texelFetch(uInput, pos, 0);\n" -" bvec4 lessZero = bvec4(lessThan(dataIn, vec4(0.0)));\n" -" vec4 dataTemp = dataIn * vec4(slope);\n" -" imageStore(uOutput, pos, mix(dataIn, dataTemp, lessZero));\n" -" }\n" -"}\n" -; -const char* glsl_nc4hw4_buffer_to_image_glsl = -"layout(FORMAT, binding=0) writeonly uniform PRECISION image3D uImage;\n" -"layout(binding=1) readonly buffer destBuffer{\n" -" vec4 data[];\n" -"} uInBuffer;\n" -"layout(location = 2) uniform int uWidth;\n" -"layout(location = 3) uniform int uHeight;\n" -"layout (local_size_x = 8, local_size_y = 8, local_size_z = 1) in;\n" -"void main()\n" -"{\n" -" ivec3 pos = ivec3(gl_GlobalInvocationID);\n" -" if (pos.x < uWidth && pos.y < uHeight)\n" -" {\n" -" vec4 color = uInBuffer.data[uWidth*pos.y+pos.x+pos.z*uWidth*uHeight];\n" -" imageStore(uImage, pos, color);\n" -" }\n" -"}\n" -; -const char* glsl_nhwc_buffer_to_image_glsl = -"layout(FORMAT, binding=0) writeonly uniform PRECISION image3D uImage;\n" -"layout(binding=1) readonly buffer destBuffer{\n" -" float data[];\n" -"} uInBuffer;\n" -"layout(location = 2) uniform int uWidth;\n" -"layout(location = 3) uniform int uHeight;\n" -"layout(location = 4) uniform int uChannel;\n" -"layout (local_size_x = XLOCAL, local_size_y = YLOCAL, local_size_z = ZLOCAL) in;\n" -"void main()\n" -"{\n" -" ivec3 pos = ivec3(gl_GlobalInvocationID);\n" -" if (pos.x < uWidth && pos.y < uHeight)\n" -" {\n" -" vec4 color;\n" -" int z = pos.z*4;\n" -" color.r = uInBuffer.data[pos.y*uWidth*uChannel + pos.x*uChannel + (z+0)];\n" -" color.g = uInBuffer.data[pos.y*uWidth*uChannel + pos.x*uChannel + (z+1)];\n" -" color.b = uInBuffer.data[pos.y*uWidth*uChannel + pos.x*uChannel + (z+2)];\n" -" color.a = uInBuffer.data[pos.y*uWidth*uChannel + pos.x*uChannel + (z+3)];\n" -" imageStore(uImage, pos, color);\n" -" }\n" -"}\n" -; -const char* glsl_im2col_glsl = -"layout(std430) buffer;\n" -"layout(binding=0, FORMAT) writeonly mediump uniform image2D uOutput;\n" -"layout(location=1) uniform mediump sampler3D uInput;\n" -"layout(location=2) uniform ivec2 pad;\n" -"layout(location=3) uniform ivec2 kernelSize;\n" -"layout(location=4) uniform ivec2 stride;\n" -"layout(location=5) uniform ivec2 dilate;\n" -"layout(location=6) uniform ivec4 inputSize;\n" -"layout(location=7) uniform ivec4 outputSize;\n" -"layout (local_size_x = XLOCAL, local_size_y = YLOCAL, local_size_z = ZLOCAL) in;\n" -"#define UP_DIV(x, y) (((x)+(y)-1)/(y))\n" -"//index : ib*ic/4, oh, ow\n" -"//input image ic/4, ih, iw * ic4\n" -"//inputsize : ic/4, ih, iw\n" -"//ouputsize : oc/4, oh, ow\n" -"//output : temp image : (ib*oh*ow)/ 4, ic/4*ky*kx*(ib*oh*ow)%4*ic4\n" -"void main()\n" -"{\n" -" ivec3 index = ivec3(gl_GlobalInvocationID);\n" -" if (index.x < outputSize.x && index.y < outputSize.y)\n" -" {\n" -" ivec2 s0 = index.xy*stride-pad;\n" -" ivec2 sfxy = max(ivec2(0), (UP_DIV(-s0, dilate)));\n" -" ivec2 efxy = min(kernelSize, UP_DIV(inputSize.xy-s0, dilate));\n" -" int ic_4 = index.z % inputSize.z; //input channel\n" -" int ib = index.z / inputSize.z; // input batch\n" -" \n" -" int destYOrigin = ib*outputSize.x*outputSize.y + index.y*outputSize.x + index.x;\n" -" int destY = destYOrigin / 4;\n" -" int destXOffset = destYOrigin % 4;\n" -" for (int fy=0; fy) list(APPEND MNN_TARGETS MNNOpenGL) SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE) diff --git a/source/backend/opengl/GLBinary.cpp b/source/backend/opengl/GLBinary.cpp index 97d0c082..048ca7ae 100644 --- a/source/backend/opengl/GLBinary.cpp +++ b/source/backend/opengl/GLBinary.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLBinary.hpp" +#include "backend/opengl/GLBinary.hpp" #include -#include "backend/opengl/shaders/AllShader.h" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" #include "core/TensorUtils.hpp" diff --git a/source/backend/opengl/GLConcat.cpp b/source/backend/opengl/GLConcat.cpp index f5bfd562..f108b93e 100644 --- a/source/backend/opengl/GLConcat.cpp +++ b/source/backend/opengl/GLConcat.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLConcat.hpp" -#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLConcat.hpp" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" namespace MNN { diff --git a/source/backend/opengl/GLConverter.cpp b/source/backend/opengl/GLConverter.cpp index 9cd20751..9f831d8f 100644 --- a/source/backend/opengl/GLConverter.cpp +++ b/source/backend/opengl/GLConverter.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLConverter.hpp" -#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLConverter.hpp" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" #include "core/TensorUtils.hpp" diff --git a/source/backend/opengl/GLConvolution.cpp b/source/backend/opengl/GLConvolution.cpp index a4c57cd1..31d40dba 100644 --- a/source/backend/opengl/GLConvolution.cpp +++ b/source/backend/opengl/GLConvolution.cpp @@ -6,13 +6,13 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLConvolution.hpp" +#include "backend/opengl/GLConvolution.hpp" #include #include -#include "backend/opengl/shaders/AllShader.h" +#include "AllShader.hpp" #include "core/Macro.h" -#include "GLConvolutionIm2col.hpp" +#include "backend/opengl/GLConvolutionIm2col.hpp" namespace MNN { namespace OpenGL { diff --git a/source/backend/opengl/GLConvolutionDepthwise.cpp b/source/backend/opengl/GLConvolutionDepthwise.cpp index a22e9b51..4c31e045 100644 --- a/source/backend/opengl/GLConvolutionDepthwise.cpp +++ b/source/backend/opengl/GLConvolutionDepthwise.cpp @@ -6,11 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLConvolutionDepthwise.hpp" +#include "backend/opengl/GLConvolutionDepthwise.hpp" #include #include -#include "backend/opengl/shaders/AllShader.h" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" namespace MNN { diff --git a/source/backend/opengl/GLConvolutionDepthwise.hpp b/source/backend/opengl/GLConvolutionDepthwise.hpp index 14347de4..e75eaf3a 100644 --- a/source/backend/opengl/GLConvolutionDepthwise.hpp +++ b/source/backend/opengl/GLConvolutionDepthwise.hpp @@ -10,7 +10,7 @@ #define MNNDEMO_GLCONVOLUTIONDEPTHWISE_H #include "core/Execution.hpp" -#include "GLConvolution.hpp" +#include "backend/opengl/GLConvolution.hpp" #include "MNN_generated.h" namespace MNN { diff --git a/source/backend/opengl/GLConvolutionIm2col.cpp b/source/backend/opengl/GLConvolutionIm2col.cpp index 4b2093c2..486de1f2 100644 --- a/source/backend/opengl/GLConvolutionIm2col.cpp +++ b/source/backend/opengl/GLConvolutionIm2col.cpp @@ -6,14 +6,14 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLConvolution.hpp" +#include "backend/opengl/GLConvolution.hpp" #include #include -#include "backend/opengl/shaders/AllShader.h" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" -#include "GLConvolutionIm2col.hpp" +#include "backend/opengl/GLConvolutionIm2col.hpp" #include "backend/opengl/GLUtils.hpp" namespace MNN { namespace OpenGL { diff --git a/source/backend/opengl/GLEltwise.cpp b/source/backend/opengl/GLEltwise.cpp index 8d64ec69..b2d487bf 100644 --- a/source/backend/opengl/GLEltwise.cpp +++ b/source/backend/opengl/GLEltwise.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLEltwise.hpp" +#include "backend/opengl/GLEltwise.hpp" #include -#include "backend/opengl/shaders/AllShader.h" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" namespace MNN { diff --git a/source/backend/opengl/GLInterp.cpp b/source/backend/opengl/GLInterp.cpp index 3e6df84f..d1df1c37 100644 --- a/source/backend/opengl/GLInterp.cpp +++ b/source/backend/opengl/GLInterp.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLInterp.hpp" +#include "backend/opengl/GLInterp.hpp" #include -#include "backend/opengl/shaders/AllShader.h" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" namespace MNN { diff --git a/source/backend/opengl/GLPermute.cpp b/source/backend/opengl/GLPermute.cpp index 4fa182fe..ab0f08cd 100644 --- a/source/backend/opengl/GLPermute.cpp +++ b/source/backend/opengl/GLPermute.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLPermute.hpp" +#include "backend/opengl/GLPermute.hpp" #include -#include "backend/opengl/shaders/AllShader.h" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" #include "core/TensorUtils.hpp" diff --git a/source/backend/opengl/GLPool.cpp b/source/backend/opengl/GLPool.cpp index ac2133a6..6f0978de 100644 --- a/source/backend/opengl/GLPool.cpp +++ b/source/backend/opengl/GLPool.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLPool.hpp" -#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLPool.hpp" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" namespace MNN { diff --git a/source/backend/opengl/GLROIPooling.cpp b/source/backend/opengl/GLROIPooling.cpp index c9a6697f..4cbb8614 100644 --- a/source/backend/opengl/GLROIPooling.cpp +++ b/source/backend/opengl/GLROIPooling.cpp @@ -6,8 +6,8 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLROIPooling.hpp" -#include "backend/opengl/shaders/AllShader.h" +#include "backend/opengl/GLROIPooling.hpp" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" namespace MNN { diff --git a/source/backend/opengl/GLRelu.cpp b/source/backend/opengl/GLRelu.cpp index e2c36ef5..d0e5e4c0 100644 --- a/source/backend/opengl/GLRelu.cpp +++ b/source/backend/opengl/GLRelu.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLRelu.hpp" +#include "backend/opengl/GLRelu.hpp" #include -#include "backend/opengl/shaders/AllShader.h" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" #include "core/TensorUtils.hpp" diff --git a/source/backend/opengl/GLReshape.cpp b/source/backend/opengl/GLReshape.cpp index 0edc18cf..82e28415 100644 --- a/source/backend/opengl/GLReshape.cpp +++ b/source/backend/opengl/GLReshape.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLReshape.hpp" +#include "backend/opengl/GLReshape.hpp" #include -#include "backend/opengl/shaders/AllShader.h" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" #include "core/TensorUtils.hpp" diff --git a/source/backend/opengl/GLSoftmax.cpp b/source/backend/opengl/GLSoftmax.cpp index d3b56624..9be6dac7 100644 --- a/source/backend/opengl/GLSoftmax.cpp +++ b/source/backend/opengl/GLSoftmax.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLSoftmax.hpp" +#include "backend/opengl/GLSoftmax.hpp" #include -#include "backend/opengl/shaders/AllShader.h" +#include "AllShader.hpp" #include "core/Macro.h" #include "core/TensorUtils.hpp" diff --git a/source/backend/opengl/GLSqueeze.cpp b/source/backend/opengl/GLSqueeze.cpp index 5ce7e302..76602686 100644 --- a/source/backend/opengl/GLSqueeze.cpp +++ b/source/backend/opengl/GLSqueeze.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLSqueeze.hpp" +#include "backend/opengl/GLSqueeze.hpp" #include -#include "backend/opengl/shaders/AllShader.h" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" #include "core/TensorUtils.hpp" diff --git a/source/backend/opengl/GLUnary.cpp b/source/backend/opengl/GLUnary.cpp index 7041250a..f8f1a0a3 100644 --- a/source/backend/opengl/GLUnary.cpp +++ b/source/backend/opengl/GLUnary.cpp @@ -6,9 +6,9 @@ // Copyright © 2018, Alibaba Group Holding Limited // -#include "GLUnary.hpp" +#include "backend/opengl/GLUnary.hpp" #include -#include "backend/opengl/shaders/AllShader.h" +#include "AllShader.hpp" #include "backend/opengl/GLBackend.hpp" #include "core/Macro.h" #include "core/TensorUtils.hpp" diff --git a/source/backend/opengl/makeshader.py b/source/backend/opengl/makeshader.py index 48622170..342f9d75 100755 --- a/source/backend/opengl/makeshader.py +++ b/source/backend/opengl/makeshader.py @@ -14,7 +14,9 @@ def findAllShader(path): return output def getName(fileName): - s1 = fileName.replace("/", "_") + s1 = os.path.abspath(fileName).split("/")[-1] + s1 = "glsl/"+s1 + s1 = s1.replace("/", "_") s1 = s1.replace(".", "_") return s1 @@ -23,7 +25,6 @@ def generateFile(headfile, sourcefile, shaders): cpp = "#include \"AllShader.hpp\"\n" for s in shaders: name = getName(s) - print name h += "extern const char* " + name + ";\n"; cpp += "const char* " + name + " = \n"; with open(s) as f: diff --git a/source/backend/vulkan/component/VulkanDevice.cpp b/source/backend/vulkan/component/VulkanDevice.cpp index 42725620..c40130ec 100644 --- a/source/backend/vulkan/component/VulkanDevice.cpp +++ b/source/backend/vulkan/component/VulkanDevice.cpp @@ -16,8 +16,7 @@ VulkanDevice::VulkanDevice(std::shared_ptr instance, const std:: mQueueFamilyIndex(0), mPhysicalDevice(VK_NULL_HANDLE), mDevice(VK_NULL_HANDLE), - mQueue(VK_NULL_HANDLE), - mFenceFdSupport(false) { + mQueue(VK_NULL_HANDLE) { MNN_ASSERT(mInstance->success()); // Find one GPU to use: // On Android, every GPU device is equal -- supporting @@ -78,7 +77,6 @@ VulkanDevice::VulkanDevice(std::shared_ptr instance, const std:: CALL_VK(vkCreateDevice(mPhysicalDevice, &deviceCreateInfo, nullptr, &mDevice)); vkGetPhysicalDeviceProperties(mPhysicalDevice, &mDeviceProty); getDeviceQueue(mQueueFamilyIndex, 0, mQueue); - setupVkFenceConfInformation(); } VulkanDevice::VulkanDevice(std::shared_ptr instance, VkPhysicalDevice physicalDevice, VkDevice device, @@ -90,7 +88,6 @@ VulkanDevice::VulkanDevice(std::shared_ptr instance, VkPhysicalD mDevice(device), mQueue(queue) { vkGetPhysicalDeviceProperties(mPhysicalDevice, &mDeviceProty); - setupVkFenceConfInformation(); } VulkanDevice::~VulkanDevice() { @@ -100,29 +97,6 @@ VulkanDevice::~VulkanDevice() { } } -void VulkanDevice::setupVkFenceConfInformation() { - mFenceFdSupport = fenceFdSupported(); -#ifdef VK_USE_PLATFORM_WIN32_KHR - mVkGetFenceWin32HandleKHR = nullptr; -#else - mVkGetFenceFdKHR = nullptr; -#endif - if (supportFenceFd()) { -/* dynamic load KHR extension */ -#ifdef VK_USE_PLATFORM_WIN32_KHR - mGetFenceWin32HandleKHR = - PFN_vkGetFenceWin32HandleKHR(vkGetDeviceProcAddr(mDevice, "vkGetFenceWin32HandleKHR")); -#else - mVkGetFenceFdKHR = PFN_vkGetFenceFdKHR(vkGetDeviceProcAddr(mDevice, "vkGetFenceFdKHR")); -#endif - } -} - -// if fenceFd is support, we can use epoll or select wait for fence complete -const bool VulkanDevice::supportFenceFd() const { - return mFenceFdSupport; -} - void VulkanDevice::getDeviceQueue(const uint32_t familyIndex, const uint32_t queueIndex, VkQueue& queue) { vkGetDeviceQueue(get(), familyIndex, queueIndex, &queue); } @@ -255,9 +229,6 @@ const VkResult VulkanDevice::createFence(VkFence& fence, const VkAllocationCallb .pNext = nullptr, .flags = 0, }; - if (supportFenceFd()) { - fci.pNext = &efci; - } return vkCreateFence(mDevice, &fci, allocator, &fence); } @@ -279,72 +250,6 @@ const VkResult VulkanDevice::resetFences(const uint32_t fenceCount, const VkFenc const VkResult VulkanDevice::resetFence(const VkFence& fence) const { return resetFences(1, &fence); } - -#ifdef VK_USE_PLATFORM_WIN32_KHR -const VkResult VulkanDevice::fenceFd(const VkFence& fence, HANDLE& fd) const { - if (nullptr == mVkGetFenceWin32HandleKHR) { - return VK_ERROR_FEATURE_NOT_PRESENT; - } - VkFenceGetWin32HandleInfoKHR info; - info.sType = VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR; - info.fence = fence; - info.pNext = NULL; - info.handleType = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; - auto res = mVkGetFenceWin32HandleKHR(mDevice, &info, &fd); -#else -const VkResult VulkanDevice::fenceFd(const VkFence& fence, int& fd) const { - if (nullptr == mVkGetFenceFdKHR) { - return VK_ERROR_FEATURE_NOT_PRESENT; - } - VkFenceGetFdInfoKHR info; - info.sType = VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR; - info.fence = fence; - info.pNext = NULL; - // following https://www.khronos.org/registry/vulkan/specs/1.0-wsi_extensions/html/vkspec.html - // current android only support VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR - // If handleType is VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, - // the special value -1 for fd is treated like a valid sync file descriptor referring to an object that has already - // signaled. The import operation will succeed and the VkFence will have a temporarily imported payload as if a - // valid file descriptor had been provided. -#ifdef VK_USE_PLATFORM_ANDROID_KHR - info.handleType = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR; -#else - info.handleType = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; -#endif - auto res = mVkGetFenceFdKHR(mDevice, &info, &fd); -#endif - - return res; -} - -// if fenceFd is support, we can use epoll or select wait for fence complete -// following https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#VK_KHR_external_fence -// vulkan 1.1 support VK_KHR_external_fence default (Promoted to Vulkan 1.1) -// vulkan 1.0 ,need VK_KHR_external_fence extension (From Android 1.0.54 import this extension) -// following https://android.googlesource.com/platform/frameworks%2Fnative/+/9492f99cb57d97aa5df908773738fe7fe6a86acf -const bool VulkanDevice::fenceFdSupported() const { - auto props = proty(); - if (props.apiVersion >= VK_API_VERSION_1_1) { - return true; - } else { - std::vector avail_extensions; - auto result = enumerateDeviceExtensionProperties(mPhysicalDevice, avail_extensions); - if (VK_SUCCESS == result) { - for (int i = 0; i < avail_extensions.size(); i++) { -#ifdef VK_USE_PLATFORM_WIN32_KHR - if (0 == strcmp(avail_extensions[i].extensionName, VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME)) { -#else - if (0 == strcmp(avail_extensions[i].extensionName, VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME)) { -#endif - return true; - } - } - } - } - - return false; -} - const VkResult VulkanDevice::enumerateDeviceExtensionProperties(const VkPhysicalDevice& dev, std::vector& exts_props) const { uint32_t propertyCount = 0; diff --git a/source/backend/vulkan/component/VulkanDevice.hpp b/source/backend/vulkan/component/VulkanDevice.hpp index bb135820..86186e0c 100644 --- a/source/backend/vulkan/component/VulkanDevice.hpp +++ b/source/backend/vulkan/component/VulkanDevice.hpp @@ -73,14 +73,6 @@ public: void destroyFence(const VkFence& fence, const VkAllocationCallbacks* allocator = nullptr) const; const VkResult resetFences(const uint32_t fenceCount, const VkFence* fences) const; const VkResult resetFence(const VkFence& fence) const; -#ifdef VK_USE_PLATFORM_WIN32_KHR - const VkResult fenceFd(const VkFence& fence, HANDLE& fd) const; -#else - const VkResult fenceFd(const VkFence& fence, int& fd) const; -#endif - - // if fenceFd is support, we can use epoll or select wait for fence complete - const bool supportFenceFd() const; // VkSemaphore const VkResult createSemaphore(VkSemaphore& semaphore, const VkAllocationCallbacks* allocator = nullptr) const; @@ -171,8 +163,6 @@ public: private: const VkResult enumerateDeviceExtensionProperties(const VkPhysicalDevice& dev, std::vector& exts_props) const; - const bool fenceFdSupported() const; - void setupVkFenceConfInformation(); private: bool mOwner; @@ -182,12 +172,6 @@ private: VkDevice mDevice; VkPhysicalDeviceProperties mDeviceProty; VkQueue mQueue; - bool mFenceFdSupport; -#ifdef VK_USE_PLATFORM_WIN32_KHR - PFN_vkGetFenceWin32HandleKHR mVkGetFenceWin32HandleKHR; -#else - PFN_vkGetFenceFdKHR mVkGetFenceFdKHR; -#endif }; } // namespace MNN #endif /* VulkanDevice_hpp */ diff --git a/source/backend/vulkan/component/VulkanFence.cpp b/source/backend/vulkan/component/VulkanFence.cpp index e4467090..377a548d 100644 --- a/source/backend/vulkan/component/VulkanFence.cpp +++ b/source/backend/vulkan/component/VulkanFence.cpp @@ -30,74 +30,10 @@ VkResult VulkanFence::rawWait() const { return status; } -#if VK_FENCE_WAIT_FD_IF_SUPPORT -VkResult VulkanFence::pollWait(const int fd) const { - struct pollfd fds; - fds.events = POLLIN | POLLERR; - fds.fd = fd; - fds.revents = 0; - int ret = 0; - int err = 0; - do { - ret = poll(&fds, 1, -1); - err = errno; - } while ((ret < 0) && (EINTR == err)); - if (ret > 0) { - if ((fds.revents & POLLIN) && (fds.fd == fd) && (!(fds.revents & POLLERR))) { - return VK_SUCCESS; - } else { - MNN_ERROR("Fence Poll failed err=%d (%s)\n", err, strerror(err)); - return VK_ERROR_DEVICE_LOST; - } - } else { - MNN_ERROR("Fence Poll failed err=%d (%s)\n", err, strerror(err)); - return VK_ERROR_DEVICE_LOST; - } - return VK_SUCCESS; -} - -VkResult VulkanFence::fdWait() const { - int fd = 0; - VkResult res = fenceFd(fd); - if (VK_SUCCESS != res) { - return rawWait(); - } else { - // the special value -1 for fd is treated like a valid sync file descriptor referring to an object that has - // already signaled. The import operation will succeed and the VkFence will have a temporarily imported payload - // as if a valid file descriptor had been provided. - if (-1 == fd) { - return VK_SUCCESS; - } - return pollWait(fd); - } - return VK_SUCCESS; -} -#endif - VkResult VulkanFence::wait() const { -#if VK_FENCE_WAIT_FD_IF_SUPPORT - if (supportFenceFd()) { - return fdWait(); - } return rawWait(); -#else - return rawWait(); -#endif } VkResult VulkanFence::reset() const { return mDevice.resetFence(mFence); } -// if fenceFd is support, we can use epoll or select wait for fence complete -bool VulkanFence::supportFenceFd() const { - return mDevice.supportFenceFd(); -} - -#ifdef VK_USE_PLATFORM_WIN32_KHR -VkResult VulkanFence::fenceFd(HANDLE& fd) const { -#else -VkResult VulkanFence::fenceFd(int& fd) const { - return mDevice.fenceFd(mFence, fd); -#endif -} - } // namespace MNN diff --git a/source/backend/vulkan/component/VulkanFence.hpp b/source/backend/vulkan/component/VulkanFence.hpp index dd85b986..ffee690d 100644 --- a/source/backend/vulkan/component/VulkanFence.hpp +++ b/source/backend/vulkan/component/VulkanFence.hpp @@ -16,7 +16,6 @@ // if support Fence FD ,force use FD Wait function, this macro only used for test purpose, // if frameworks is blocked and not async , does not enable this macro -#define VK_FENCE_WAIT_FD_IF_SUPPORT (0) namespace MNN { class VulkanFence : public NonCopyable { @@ -36,20 +35,9 @@ public: // if fenceFd is support, we can use epoll or select wait for fence complete bool supportFenceFd() const; -#ifdef VK_USE_PLATFORM_WIN32_KHR - VkResult fenceFd(HANDLE& fd) const; -#else - VkResult fenceFd(int& fd) const; -#endif - private: VkResult rawWait() const; -#if VK_FENCE_WAIT_FD_IF_SUPPORT - VkResult fdWait() const; - VkResult pollWait(const int fd) const; -#endif - private: VkFence mFence; const VulkanDevice& mDevice; diff --git a/source/backend/vulkan/execution/VulkanConvolutionWinograd.cpp b/source/backend/vulkan/execution/VulkanConvolutionWinograd.cpp index 1472261f..b0113893 100644 --- a/source/backend/vulkan/execution/VulkanConvolutionWinograd.cpp +++ b/source/backend/vulkan/execution/VulkanConvolutionWinograd.cpp @@ -11,7 +11,7 @@ #include "core/Macro.h" #include "math/WingoradGenerater.hpp" #define COMPUT_SIZE 4 -#define COMPUT_SIZE2 1 +#define COMPUT_SIZE2 16 #include "backend/vulkan/execution/VulkanConvolution.hpp" namespace MNN { struct WinogradConst { diff --git a/source/core/BufferAllocator.cpp b/source/core/BufferAllocator.cpp index ac7e4f1e..14facd2b 100644 --- a/source/core/BufferAllocator.cpp +++ b/source/core/BufferAllocator.cpp @@ -117,10 +117,17 @@ bool BufferAllocator::free(void* pointer, bool needRelease) { return true; } -void BufferAllocator::release() { - mUsedList.clear(); +void BufferAllocator::release(bool allRelease) { + if (allRelease) { + mUsedList.clear(); + mFreeList.clear(); + mTotalSize = 0; + return; + } + for (auto f : mFreeList) { + mTotalSize -= f.first; + } mFreeList.clear(); - mTotalSize = 0; } void BufferAllocator::barrierBegin() { diff --git a/source/core/BufferAllocator.hpp b/source/core/BufferAllocator.hpp index 8240b51b..79522782 100644 --- a/source/core/BufferAllocator.hpp +++ b/source/core/BufferAllocator.hpp @@ -58,8 +58,9 @@ public: * @brief free all allocated memories. * @sa allocSeparate * @sa alloc + * if allRelease, clear all memory , otherwise delete freelist */ - void release(); + void release(bool allRelease = true); /** * @brief query total size allocated indeed. diff --git a/source/core/Pipeline.cpp b/source/core/Pipeline.cpp index ba81c9b5..ec5895b0 100644 --- a/source/core/Pipeline.cpp +++ b/source/core/Pipeline.cpp @@ -38,7 +38,8 @@ float OperatorInfo::flops() const { static Backend::StorageType _getTensorStorageType(const Tensor* tensor) { auto des = TensorUtils::getDescribe(tensor); - if (des->isConst || des->isInput || des->isTrainableParameter) { + auto usage = des->usage; + if (TensorUsage::CONST == usage || TensorUsage::INPUT == usage || TensorUsage::TRAINABLE == usage) { return Backend::DYNAMIC_SEPERATE; } if (des->handleType != Tensor::HANDLE_NONE) { @@ -49,10 +50,11 @@ static Backend::StorageType _getTensorStorageType(const Tensor* tensor) { static Backend::StorageType _getTensorReleaseStorageType(const Tensor* tensor) { auto des = TensorUtils::getDescribe(tensor); + auto usage = des->usage; if (des->handleType != Tensor::HANDLE_NONE) { return Backend::DYNAMIC_SEPERATE; } - if (des->isConst || des->isTrainableParameter) { + if (TensorUsage::CONST == usage || TensorUsage::TRAINABLE == usage) { return Backend::DYNAMIC_SEPERATE; } return Backend::DYNAMIC; @@ -217,21 +219,21 @@ ErrorCode Pipeline::Unit::prepare(Backend* bn, Backend* cpuBn) { // Check const mConst = true; for (int i = 0; i < mInputs.size(); ++i) { - if (SizeComputer::opNeedContent(mOriginOp->type(), i) && (!TensorUtils::getDescribe(mInputs[i])->isConst)) { + if (SizeComputer::opNeedContent(mOriginOp->type(), i) && (TensorUtils::getDescribe(mInputs[i])->usage != TensorUsage::CONST)) { mConst = false; break; } } if (mType == OpType_TrainableParam) { for (auto t : mOutputs) { - TensorUtils::getDescribe(t)->isTrainableParameter = true; + TensorUtils::getDescribe(t)->usage = TensorUsage::TRAINABLE; } mConst = false; } if (mConst) { for (auto t : mOutputs) { - TensorUtils::getDescribe(t)->isConst = true; + TensorUtils::getDescribe(t)->usage = TensorUsage::CONST; } bn = cpuBn; } diff --git a/source/core/Schedule.cpp b/source/core/Schedule.cpp index 0eee0371..35342b34 100644 --- a/source/core/Schedule.cpp +++ b/source/core/Schedule.cpp @@ -401,6 +401,7 @@ Schedule::ScheduleInfo Schedule::schedule(const Net* net, const std::vectortensorName()->GetAsString(index)->c_str(), allTensors[index].get())); + TensorUtils::getDescribe(allTensors[index].get())->usage = TensorUsage::INPUT; } for (auto index : outputIndexesDiff) { schedule.outputTensor.insert( @@ -422,6 +423,7 @@ Schedule::ScheduleInfo Schedule::schedule(const Net* net, const std::vectorusage = TensorUsage::OUTPUT; schedule.allTensors[outputIndex].first += 1; } return schedule; diff --git a/source/core/Session.cpp b/source/core/Session.cpp index 67cd8ef8..d07b14ed 100644 --- a/source/core/Session.cpp +++ b/source/core/Session.cpp @@ -51,14 +51,11 @@ Session::Session(const Schedule::ScheduleInfo& info) { } auto backend = mBackends.find(iter.first.type)->second.get(); auto cpuBackend = _getDefaultBackend(); - std::unique_ptr newPipeline(new Pipeline(iter.second, backend, cpuBackend)); + std::shared_ptr newPipeline(new Pipeline(iter.second, backend, cpuBackend)); mPipelines.emplace_back(std::move(newPipeline)); } mInputs = info.inputTensors; mOutputs = info.outputTensor; - for (auto& iter : mInputs) { - TensorUtils::getDescribe(iter.second)->isInput = true; - } } Session::~Session() { diff --git a/source/core/SizeComputer.cpp b/source/core/SizeComputer.cpp index a4146823..4b5216ec 100644 --- a/source/core/SizeComputer.cpp +++ b/source/core/SizeComputer.cpp @@ -58,6 +58,8 @@ float SizeComputer::onComputeFlops(const MNN::Op* op, const std::vector } bool SizeComputer::opNeedContent(OpType type, int index) { switch (type) { + case OpType_ZerosLike: + case OpType_ZeroGrad: case OpType_Shape: case OpType_Rank: case OpType_Const: diff --git a/source/core/TensorUtils.hpp b/source/core/TensorUtils.hpp index 28f7d2f9..6e2ca8c1 100644 --- a/source/core/TensorUtils.hpp +++ b/source/core/TensorUtils.hpp @@ -29,18 +29,21 @@ public: /** for HOST tensor only. host memory is owned by tensor or not */ bool ownHost = false; - - /** Whether the tensor is a trainable parameter. Trainable parameter should be stored in a different area. */ - bool isTrainableParameter = false; - /** for DEVICE tensor only. const data may be stored in different area on device. */ - bool isConst = false; /** for DEVICE tensor only. backend used to manage tensor's device memory. */ Backend* backend = nullptr; /** for DEVICE tensor only. */ int useCount = 0; - /** for DEVICE tensor only. */ - bool isInput = false; + enum Usage { + NORMAL, + INPUT, + OUTPUT, + CONST, + /** Whether the tensor is a trainable parameter. Trainable parameter should be stored in a different area. */ + TRAINABLE, + }; + Usage usage = NORMAL; }; +typedef Tensor::InsideDescribe::Usage TensorUsage; /** tensor utils */ class MNN_PUBLIC TensorUtils { diff --git a/source/core/WrapExecution.cpp b/source/core/WrapExecution.cpp index 0d7ad1d4..1970cc79 100644 --- a/source/core/WrapExecution.cpp +++ b/source/core/WrapExecution.cpp @@ -51,7 +51,7 @@ ErrorCode WrapExecution::onResize(const std::vector& inputs, const std: std::shared_ptr wrapTensor(new Tensor); TensorUtils::copyShape(inputTensor, midTensor.get(), true); TensorUtils::copyShape(inputTensor, wrapTensor.get(), true); - TensorUtils::getDescribe(midTensor.get())->isConst = TensorUtils::getDescribe(inputTensor)->isConst; + TensorUtils::getDescribe(midTensor.get())->usage = TensorUtils::getDescribe(inputTensor)->usage; midTensor->buffer().type = inputTensor->buffer().type; wrapTensor->buffer().type = inputTensor->buffer().type; mInputMaps.emplace_back(std::make_tuple(mCPUBackend, srcBackend, inputTensor, midTensor)); @@ -71,10 +71,11 @@ ErrorCode WrapExecution::onResize(const std::vector& inputs, const std: auto src = std::get<2>(iter); auto dst = std::get<3>(iter).get(); - if (TensorUtils::getDescribe(src)->isConst) { + if (TensorUtils::getDescribe(src)->usage == TensorUsage::CONST) { memoryAllocSuccess = backend->onAcquireBuffer(dst, Backend::DYNAMIC_SEPERATE); if (memoryAllocSuccess) { converter->onCopyBuffer(src, dst); + TensorUtils::getDescribe(dst)->usage = TensorUtils::getDescribe(src)->usage; } } else { memoryAllocSuccess = backend->onAcquireBuffer(dst, Backend::DYNAMIC); @@ -92,7 +93,7 @@ ErrorCode WrapExecution::onResize(const std::vector& inputs, const std: auto backend = std::get<0>(iter); auto dst = std::get<3>(iter).get(); - if (TensorUtils::getDescribe(dst)->isConst) { + if (TensorUtils::getDescribe(dst)->usage == TensorUsage::CONST) { backend->onReleaseBuffer(dst, Backend::DYNAMIC_SEPERATE); } else { backend->onReleaseBuffer(dst, Backend::DYNAMIC); @@ -109,7 +110,7 @@ ErrorCode WrapExecution::onExecute(const std::vector& inputs, const std auto converter = std::get<1>(iter); auto src = std::get<2>(iter); auto dst = std::get<3>(iter).get(); - if (!TensorUtils::getDescribe(src)->isConst) { + if (TensorUtils::getDescribe(src)->usage != TensorUsage::CONST) { converter->onCopyBuffer(src, dst); } } diff --git a/source/shape/ShapeConvolution.cpp b/source/shape/ShapeConvolution.cpp index 2341542a..69fb3b9b 100644 --- a/source/shape/ShapeConvolution.cpp +++ b/source/shape/ShapeConvolution.cpp @@ -16,6 +16,9 @@ public: const std::vector& outputs) const override { MNN_ASSERT(inputs.size() >= 1); MNN_ASSERT(1 == outputs.size()); + if (TensorUtils::getDescribe(inputs[0])->dimensionFormat != MNN_DATA_FORMAT_NC4HW4) { + return false; + } auto layer = op->main_as_Convolution2D()->common(); int kernel_width = layer->dilateX() * (layer->kernelX() - 1) + 1; int kernel_height = layer->dilateY() * (layer->kernelY() - 1) + 1; @@ -55,6 +58,7 @@ public: outputBuffer.dim[2].extent = output_height; outputBuffer.dim[3].extent = output_width; outputBuffer.type = input->getType(); + //MNN_PRINT("%d, %d, %d, %d\n", outputs[0]->length(0), outputs[0]->length(1), outputs[0]->length(2), outputs[0]->length(3)); TensorUtils::getDescribe(outputs[0])->dimensionFormat = TensorUtils::getDescribe(inputs[0])->dimensionFormat; return true; diff --git a/source/shape/ShapeGatherND.cpp b/source/shape/ShapeGatherND.cpp index 0e395131..0cc4fa6f 100644 --- a/source/shape/ShapeGatherND.cpp +++ b/source/shape/ShapeGatherND.cpp @@ -43,5 +43,5 @@ public: } }; -REGISTER_SHAPE_INPUTS(GatherNDComputer, OpType_GatherND, (std::vector{1})); +REGISTER_SHAPE(GatherNDComputer, OpType_GatherND); } // namespace MNN diff --git a/source/shape/ShapeGatherV2.cpp b/source/shape/ShapeGatherV2.cpp index 006a2a6b..32ff7bee 100644 --- a/source/shape/ShapeGatherV2.cpp +++ b/source/shape/ShapeGatherV2.cpp @@ -57,5 +57,5 @@ class GatherV2Computer : public SizeComputer { } }; -REGISTER_SHAPE_INPUTS(GatherV2Computer, OpType_GatherV2, (std::vector{1, 2})); +REGISTER_SHAPE_INPUTS(GatherV2Computer, OpType_GatherV2, (std::vector{2})); } // namespace MNN diff --git a/source/shape/ShapeInterp.cpp b/source/shape/ShapeInterp.cpp index 70647033..7d504e93 100644 --- a/source/shape/ShapeInterp.cpp +++ b/source/shape/ShapeInterp.cpp @@ -56,6 +56,7 @@ class InterpComputer : public SizeComputer { if (0 == w || 0 == h) { return false; } + outputs[0]->buffer().type = inputs[0]->getType(); TensorUtils::getDescribe(outputs[0])->dimensionFormat = TensorUtils::getDescribe(inputs[0])->dimensionFormat; return true; diff --git a/source/shape/ShapeOneHot.cpp b/source/shape/ShapeOneHot.cpp index d6fa8382..fc88f032 100644 --- a/source/shape/ShapeOneHot.cpp +++ b/source/shape/ShapeOneHot.cpp @@ -34,6 +34,7 @@ public: } auto output = outputs[0]; output->buffer().dimensions = outputDimension; + output->buffer().type = inputs[2]->buffer().type; for (int i = 0; i < outputDimension; ++i) { if (i < axis) { output->setLength(i, indices->length(i)); diff --git a/source/shape/ShapeRange.cpp b/source/shape/ShapeRange.cpp index f0fcf0d6..f50660bf 100644 --- a/source/shape/ShapeRange.cpp +++ b/source/shape/ShapeRange.cpp @@ -22,9 +22,9 @@ static int computeSize(const MNN::Op* op, const std::vector& inputs, co MNN_ASSERT((1 == limit_in->buffer().dimensions) || (0 == limit_in->buffer().dimensions)); MNN_ASSERT((1 == delta_in->buffer().dimensions) || (0 == delta_in->buffer().dimensions)); - const T start = start_in->host()[0]; - const T limit = limit_in->host()[0]; - const T delta = delta_in->host()[0]; + const float start = (float)start_in->host()[0]; + const float limit = (float)limit_in->host()[0]; + const float delta = (float)delta_in->host()[0]; MNN_ASSERT(0 != delta); if (delta > 0) { @@ -33,8 +33,7 @@ static int computeSize(const MNN::Op* op, const std::vector& inputs, co MNN_ASSERT(start >= limit); } - int64_t size = (std::is_integral::value ? ((abs(limit - start) + abs(delta) - 1) / abs(delta)) - : ceil(abs((limit - start) / delta))); + int32_t size = ceilf(fabsf((limit - start) / delta)); return (int)size; } diff --git a/source/shape/ShapeResize.cpp b/source/shape/ShapeResize.cpp index 4da38e2a..c3301210 100644 --- a/source/shape/ShapeResize.cpp +++ b/source/shape/ShapeResize.cpp @@ -26,6 +26,7 @@ class ResizeComputer : public SizeComputer { // set dims output.dim[3].extent = input.dim[3].extent * resize->xScale(); output.dim[2].extent = input.dim[2].extent * resize->yScale(); + output.type = inputs[0]->getType(); TensorUtils::getDescribe(outputs[0])->dimensionFormat = TensorUtils::getDescribe(inputs[0])->dimensionFormat; return true; diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 673e84a7..4b29f378 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -14,4 +14,7 @@ IF(MNN_BUILD_TEST) find_library(FOUNDATION Foundation REQUIRED) target_link_libraries(run_test.out ${FOUNDATION}) endif() + IF(WIN32 OR MSVC) + target_compile_options(run_test.out PRIVATE /wd4244 /wd4267 /wd4305 /wd4251) + ENDIF() ENDIF() diff --git a/test/core/ScheduleTest.cpp b/test/core/ScheduleTest.cpp index 30986aae..16535bfd 100644 --- a/test/core/ScheduleTest.cpp +++ b/test/core/ScheduleTest.cpp @@ -5,9 +5,15 @@ // Created by MNN on 2019/01/30. // Copyright © 2018, Alibaba Group Holding Limited // +#if defined(_MSC_VER) +#include +#undef min +#undef max +#else +#include +#endif #include -#include #include #include #include @@ -683,10 +689,17 @@ bool ScheduleTest::run() { bool squeezeNetCont = true; string path_join = "../"; string path = path_join + const_model_file; - if (-1 == access(path.c_str(), 0)) { +#if defined(_MSC_VER) + if (INVALID_FILE_ATTRIBUTES != GetFileAttributes(path.c_str()) && GetLastError() != ERROR_FILE_NOT_FOUND) { path_join = "./"; path = path_join + const_model_file; - if (-1 == access(path.c_str(), 0)) { + if (INVALID_FILE_ATTRIBUTES != GetFileAttributes(path.c_str()) && GetLastError() != ERROR_FILE_NOT_FOUND) { +#else + if (-1 == access(path.c_str(), F_OK)) { + path_join = "./"; + path = path_join + const_model_file; + if (-1 == access(path.c_str(), F_OK)) { +#endif squeezeNetCont = false; MNN_ERROR("[FAIL] TestSqueezeNet %s fail to run.Model file not found\n", const_model_file.c_str()); } diff --git a/test/expr/ConvInt8Test.cpp b/test/expr/ConvInt8Test.cpp new file mode 100644 index 00000000..b4e1cee7 --- /dev/null +++ b/test/expr/ConvInt8Test.cpp @@ -0,0 +1,117 @@ +// +// ConvInt8Test.cpp +// MNNTests +// +// Created by MNN on 2019/010/24. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include "MNNTestSuite.h" +using namespace MNN::Express; +inline int8_t int32ToInt8(int data, int bias, float scale) { + float value = roundf((float)(data + bias) * scale); + value = std::max(value, -127.0f); + value = std::min(value, 127.0f); + return static_cast(value); +} + +class ConvInt8Test : public MNNTestCase { +public: + static bool _testKernel(INTS kernel) { + INTS strides = {1, 1}; + INTS dilate = {1, 1}; + INTS channel = {11, 7}; + INTS pad = {3, 4}; + std::vector bias(channel[1]); + std::vector scale(channel[1]); + std::vector weight(channel[1] * channel[0] * kernel[0] * kernel[1]); + VARP x = _Input({1, channel[0], 204, 215}, NC4HW4, halide_type_of()); + auto xInfo = x->getInfo(); + auto xPtr = x->writeMap(); + for (int i=0; isize; ++i) { + xPtr[i] = (i % 254) - 127; + } + for (int i=0; i originWeight = weight; + auto originScale = scale; + auto originBias = bias; + auto y = _Conv(std::move(weight), std::move(bias), std::move(scale), x, channel, kernel, PaddingMode::CAFFE, strides, dilate, 1, pad); + auto yInfo = y->getInfo(); + auto yPtr = y->readMap(); + auto ow = yInfo->dim[3]; + auto oh = yInfo->dim[2]; + auto iw = xInfo->dim[3]; + auto ih = xInfo->dim[2]; + for (int oz=0; oz= ih || sy < 0) { + continue; + } + for (int kx=0; kx= iw || sx < 0) { + continue; + } + sum += (int)srcPtr[sx*4+sy*iw*4] * + (int)originWeight[(( + oz* channel[0]+sz) + *kernel[1]+ky) + *kernel[0]+kx]; + } + } + } + auto targetValue = int32ToInt8(sum, biasValue, scaleValue); + if (targetValue != computeResult) { + return false; + } + } + } + } + + return true; + } + virtual bool run() { + auto res = _testKernel({3, 3}); + if (!res) { + MNN_ERROR("Error for test kernel 3x3 for convint8\n"); + return false; + } + res = _testKernel({1, 3}); + if (!res) { + MNN_ERROR("Error for test kernel 1x3 for convint8\n"); + return false; + } + res = _testKernel({1, 1}); + if (!res) { + MNN_ERROR("Error for test kernel 1x1 for convint8\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(ConvInt8Test, "expr/ConvInt8"); diff --git a/test/expr/ExprResizeTest.cpp b/test/expr/ExprResizeTest.cpp index e292fda8..40ce4ff7 100644 --- a/test/expr/ExprResizeTest.cpp +++ b/test/expr/ExprResizeTest.cpp @@ -26,6 +26,13 @@ public: x->resize({5}); ::memcpy(x->writeMap(), x0.data(), x->getInfo()->size*sizeof(int32_t)); if (30 != y->readMap()[0]) { + MNN_PRINT("%d - Error: %d\n", 30, y->readMap()[0]); + return false; + } + auto z = _Cast(_ReduceMean(_Cast(x+x))); + z.fix(VARP::CONST); + if (4 != z->readMap()[0]) { + MNN_PRINT("%d - Error = %d\n", 4, z->readMap()[0]); return false; } return true; diff --git a/test/expr/ReplaceTest.cpp b/test/expr/ReplaceTest.cpp index 76b7d8b0..1bdcd024 100644 --- a/test/expr/ReplaceTest.cpp +++ b/test/expr/ReplaceTest.cpp @@ -40,6 +40,14 @@ public: MNN_PRINT("1 + 5 x 4 = %f\n", r3[0]); return false; } + auto d0 = _Const(7.f, {1, 3, 1, 1}, NHWC); + auto d = _Split(d0, {1, 1, 1}, 1)[0]; + Variable::replace(c3, d); + r3 = b1->readMap(); + if (29.0f != r3[0]) { + MNN_PRINT("1 + 7 x 4 = %f\n", r3[0]); + return false; + } return true; } }; diff --git a/test/op/ConcatTest.cpp b/test/op/ConcatTest.cpp index 8fa60495..4abe8af9 100644 --- a/test/op/ConcatTest.cpp +++ b/test/op/ConcatTest.cpp @@ -5,227 +5,44 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; -static Interpreter *create(int axis, int n, int b, int c, int h, int w, bool tensorflow) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - std::vector> ns; - for (int i = 0; i < n; i++) { - auto dims = fbb.CreateVector(tensorflow ? std::vector({b, h, w, c}) : std::vector({b, c, h, w})); - auto name = fbb.CreateString(std::to_string(i)); - auto iv = fbb.CreateVector(std::vector({i})); - auto ov = fbb.CreateVector(std::vector({i})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - ns.push_back(name); - } - { - auto name = fbb.CreateString("concat"); - std::vector ips; - for (int i = 0; i < n; i++) { - ips.push_back(i); - } - auto iv = fbb.CreateVector(ips); - auto ov = fbb.CreateVector(std::vector({n})); - AxisBuilder ab(fbb); - ab.add_axis(axis); - auto concat = ab.Finish(); - - OpBuilder builder(fbb); - builder.add_type(OpType_Concat); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Axis); - builder.add_main(flatbuffers::Offset(concat.o)); - vec.push_back(builder.Finish()); - ns.push_back(name); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVector(ns); - - if (tensorflow) { - BlobBuilder bb(fbb); - bb.add_dataType(DataType_DT_FLOAT); - bb.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto blob = bb.Finish(); - - std::vector> desc; - for (int i = 0; i < n; i++) { - TensorDescribeBuilder tdb(fbb); - tdb.add_blob(flatbuffers::Offset(blob.o)); - tdb.add_index(i); - desc.push_back(tdb.Finish()); - } - TensorDescribeBuilder tdb(fbb); - tdb.add_blob(flatbuffers::Offset(blob.o)); - tdb.add_index(n); - desc.push_back(tdb.Finish()); - - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - } else { - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - } - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class ConcatCaffeTest : public MNNTestCase { +using namespace MNN::Express; +class ConcatTest : public MNNTestCase { public: - virtual ~ConcatCaffeTest() = default; + virtual ~ConcatTest() = default; virtual bool run() { - for (int axis = 1; axis <= 3; axis++) { - for (int n = 2; n <= 4; n++) { - for (int b = 1; b <= 2; b++) { - for (int c = 1; c <= 8; c *= 2) { - for (int h = 1; h <= 8; h *= 2) { - for (int w = 1; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(axis, n, b, c, h, w, false); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - for (int i = 0; i < n; i++) { - auto input = new Tensor(4); - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int j = 0; j < b * c * h * w; j++) { - input->host()[j] = rand() % 255 / 255.f; - } - - auto host = net->getSessionInput(CPU, std::to_string(i).c_str()); - auto device = net->getSessionInput(GPU, std::to_string(i).c_str()); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - - // clean up - free(input->buffer().host); - delete input; - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - delete net; - }); - } - } - } - } - } + auto input1 = _Input({2,2}, NCHW); + input1->setName("input1"); + // set input data + const float input1_data[] = {1.0, 2.0, 3.0, 4.0}; + auto input1Ptr = input1->writeMap(); + memcpy(input1Ptr, input1_data, 4 * sizeof(float)); + input1->unMap(); + auto input2 = _Input({2,2}, NCHW); + input2->setName("input2"); + // set input data + const float input2_data[] = {5.0, 6.0, 7.0, 8.0}; + auto input2Ptr = input2->writeMap(); + memcpy(input2Ptr, input2_data, 4 * sizeof(float)); + input2->unMap(); + auto output = _Concat({input1, input2}, 1); + const std::vector expectedOutput = {1.0 , 2.0, 5.0, 6.0, 3.0, 4.0, 7.0, 8.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 8, 0.0001)) { + MNN_ERROR("ConcatTest test failed!\n"); + return false; + } + const std::vector expectedDim = {2, 4}; + auto gotDim = output->getInfo()->dim; + if (!checkVector(gotDim.data(), expectedDim.data(), 2, 0)) { + MNN_ERROR("ConcatTest test failed!\n"); + return false; } return true; } }; - -class ConcatTensorflowTest : public MNNTestCase { -public: - virtual ~ConcatTensorflowTest() = default; - virtual bool run() { - for (int axis = 1; axis <= 3; axis++) { - for (int n = 2; n <= 4; n++) { - for (int b = 1; b <= 2; b++) { - for (int c = 1; c <= 8; c *= 2) { - for (int h = 1; h <= 8; h *= 2) { - for (int w = 1; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(axis, n, b, c, h, w, true); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - for (int i = 0; i < n; i++) { - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int j = 0; j < b * c * h * w; j++) { - input->host()[j] = rand() % 255 / 255.f; - } - } - - auto host = net->getSessionInput(CPU, std::to_string(i).c_str()); - auto device = net->getSessionInput(GPU, std::to_string(i).c_str()); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - - // clean up - free(input->buffer().host); - delete input; - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - delete net; - }); - } - } - } - } - } - } - return true; - } -}; - -MNNTestSuiteRegister(ConcatCaffeTest, "op/concat/caffe"); -MNNTestSuiteRegister(ConcatTensorflowTest, "op/concat/tf"); +MNNTestSuiteRegister(ConcatTest, "op/concat"); diff --git a/test/op/CropTest.cpp b/test/op/CropTest.cpp index b0551b19..db69d903 100644 --- a/test/op/CropTest.cpp +++ b/test/op/CropTest.cpp @@ -5,201 +5,40 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" - -using namespace MNN; - -static Interpreter *create(int axis, std::vector offsets, int b, int c, int h0, int w0, int h1, int w1) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(std::vector({b, c, h0, w0})); - InputBuilder ib(fbb); - ib.add_dims(dims); - - auto input = ib.Finish(); - auto name = fbb.CreateString("input0"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto dims = fbb.CreateVector(std::vector({b, c, h1, w1})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input1"); - auto iv = fbb.CreateVector(std::vector({1})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto data = fbb.CreateVector(offsets); - auto cb = CropBuilder(fbb); - cb.add_axis(axis); - cb.add_offset(data); - auto crop = cb.Finish(); - auto name = fbb.CreateString("crop"); - auto iv = fbb.CreateVector(std::vector({0, 1})); - auto ov = fbb.CreateVector(std::vector({2})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Crop); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Crop); - builder.add_main(flatbuffers::Offset(crop.o)); - vec.push_back(builder.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input0", "input1", "output"}); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class CropWTest : public MNNTestCase { +using namespace MNN::Express; +class CropTest : public MNNTestCase { public: - virtual ~CropWTest() = default; + virtual ~CropTest() = default; virtual bool run() { - int b = 3, c = 5, h0 = 7, w0 = 9; - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - std::vector offsets = {2}; - int h1 = h0, w1 = w0 - offsets[0]; - - // nets - auto net = create(3, offsets, b, c, h0, w0, h1, w1); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input0 - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h0; - input->buffer().dim[3].extent = w0; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h0 * w0; i++) { - input->host()[i] = i + 1; - } - auto host = net->getSessionInput(CPU, "input0"); - auto device = net->getSessionInput(GPU, "input0"); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.015)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - return true; - } -}; - -class CropHTest : public MNNTestCase { -public: - virtual ~CropHTest() = default; - virtual bool run() { - int b = 3, c = 5, h0 = 7, w0 = 9; - for (int i = 0; i < 2; i++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - std::vector offsets; - int h1 = h0, w1 = w0; - if (i == 0) { - offsets.push_back(1); - offsets.push_back(2); - h1 -= 1; - w1 -= 2; - } else { - offsets.push_back(2); - h1 -= 2; - w1 -= 2; - } - - // nets - auto net = create(3, offsets, b, c, h0, w0, h1, w1); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h0; - input->buffer().dim[3].extent = w0; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int j = 0; j < b * c * h0 * w0; j++) { - input->host()[j] = j + 1; - } - auto host = net->getSessionInput(CPU, "input0"); - auto device = net->getSessionInput(GPU, "input0"); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.015)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); + auto input = _Input({1, 1, 4, 4}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 16 * sizeof(float)); + input->unMap(); + const float size_data[] = {0.0, 0.0, 0.0, 0.0}; + auto size = _Const(size_data, {1, 1, 2, 2}, NCHW); + input = _Convert(input, NC4HW4); + auto output = _Crop(input, size, 2, {1, 1}); + output = _Convert(output, NCHW); + const std::vector expectedOutput = {6.0, 7.0, 10.0, 11.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("CropTest test failed!\n"); + return false; + } + const std::vector expectedDim = {1, 1, 2, 2}; + auto gotDim = output->getInfo()->dim; + if (!checkVector(gotDim.data(), expectedDim.data(), 4, 0)) { + MNN_ERROR("CropTest test failed!\n"); + return false; } return true; } }; -MNNTestSuiteRegister(CropWTest, "op/crop/w"); -MNNTestSuiteRegister(CropHTest, "op/crop/h"); +MNNTestSuiteRegister(CropTest, "op/crop"); diff --git a/test/op/DepthToSpaceTest.cpp b/test/op/DepthToSpaceTest.cpp new file mode 100644 index 00000000..2b9b8b71 --- /dev/null +++ b/test/op/DepthToSpaceTest.cpp @@ -0,0 +1,49 @@ +// +// DepthToSpaceTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/24. +// Copyright © 2018, Alibaba Group Holding Limited +// + + +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" +using namespace MNN::Express; +class DepthToSpaceTest : public MNNTestCase { +public: + virtual ~DepthToSpaceTest() = default; + virtual bool run() { + auto input = _Input({4, 1, 1, 4}, NHWC); + input->setName("input"); + // set input data + const float input_data[] = {-1.0, 2.0, -3.0, 4.0, + 5.0, 6.0, 7.0, -8.0, + -9.0, -10.0, 11.0, 12.0, + 13.0, 14.0, -15.0, -16.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, input_data, 16 * sizeof(float)); + input->unMap(); + auto output = _DepthToSpace(input, 2); + const std::vector expectedOutput = {-1.0, 2.0, -3.0, 4.0, + 5.0, 6.0, 7.0, -8.0, + -9.0, -10.0, 11.0, 12.0, + 13.0, 14.0, -15.0, -16.0}; + const std::vector expectedDim = {4, 2, 2, 1}; + auto gotOutput = output->readMap(); + auto gotDim = output->getInfo()->dim; + if (!checkVector(gotOutput, expectedOutput.data(), 16, 0)) { + MNN_ERROR("DepthToSpaceTest test failed!\n"); + return false; + } + if (!checkVector(gotDim.data(), expectedDim.data(), 4, 0)) { + MNN_ERROR("DepthToSpaceTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(DepthToSpaceTest, "op/depthtospace"); + diff --git a/test/op/ExpandDimsTest.cpp b/test/op/ExpandDimsTest.cpp new file mode 100644 index 00000000..0eb1358b --- /dev/null +++ b/test/op/ExpandDimsTest.cpp @@ -0,0 +1,41 @@ +// +// ExpandDimsTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/26. +// Copyright © 2018, Alibaba Group Holding Limited +// +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; +class ExpandDimsTest : public MNNTestCase { +public: + virtual ~ExpandDimsTest() = default; + virtual bool run() { + auto input = _Input({4}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _ExpandDims(input, 0); + const std::vector expectedOutput = {-1.0, -2.0, 3.0, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.0001)) { + MNN_ERROR("ExpandDimsTest test failed!\n"); + return false; + } + const std::vector expectedDim = {1, 4}; + auto gotDim = output->getInfo()->dim; + if (!checkVector(gotDim.data(), expectedDim.data(), 2, 0)) { + MNN_ERROR("ExpandDimsTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(ExpandDimsTest, "op/expand_dims"); diff --git a/test/op/FillTest.cpp b/test/op/FillTest.cpp index 3145ec0a..49dc680d 100644 --- a/test/op/FillTest.cpp +++ b/test/op/FillTest.cpp @@ -5,193 +5,33 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -#define kDim 4 - -static Interpreter *create(std::vector dims, int scalar, bool tensorflow) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto val = fbb.CreateVector(dims); - auto idims = fbb.CreateVector(std::vector({(int)dims.size()})); - BlobBuilder ib(fbb); - ib.add_dims(idims); - ib.add_dataType(DataType_DT_INT32); - ib.add_dataFormat(MNN_DATA_FORMAT_NCHW); - ib.add_int32s(flatbuffers::Offset>(val.o)); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({0})); - OpBuilder builder(fbb); - builder.add_type(OpType_Const); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Blob); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto sdims = fbb.CreateVector(std::vector({})); - auto val = fbb.CreateVector(std::vector({scalar})); - BlobBuilder bb(fbb); - bb.add_dims(sdims); - bb.add_dataType(DataType_DT_INT32); - bb.add_dataFormat(MNN_DATA_FORMAT_NCHW); - bb.add_int32s(flatbuffers::Offset>(val.o)); - auto blob = bb.Finish(); - - auto name = fbb.CreateString("scalar"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({1})); - OpBuilder builder(fbb); - builder.add_type(OpType_Const); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Blob); - builder.add_main(flatbuffers::Offset(blob.o)); - vec.push_back(builder.Finish()); - } - { - auto name = fbb.CreateString("fill"); - auto iv = fbb.CreateVector(std::vector({0, 1})); - auto ov = fbb.CreateVector(std::vector({2})); - OpBuilder builder(fbb); - builder.add_type(OpType_Fill); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Fill); - builder.add_main(flatbuffers::Offset(FillBuilder(fbb).Finish().o)); - vec.push_back(builder.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "scalar", "output"}); - - if (tensorflow) { - BlobBuilder bb(fbb); - bb.add_dataType(DataType_DT_INT32); - bb.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto blob = bb.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_blob(flatbuffers::Offset(blob.o)); - tdb.add_index(2); - desc.push_back(tdb.Finish()); - } - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - } else { - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - } - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class FillCaffeTest : public MNNTestCase { +using namespace MNN::Express; +class FillTest : public MNNTestCase { public: - virtual ~FillCaffeTest() = default; + virtual ~FillTest() = default; virtual bool run() { - for (int i = 1; i <= 10; i++) { - std::vector dims; - for (int j = 0; j < kDim; j++) - dims.push_back(1 + rand() % 16); - - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto scalar = rand() % 255; - auto net = create(dims, scalar, false); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU))); - - // clean up - delete net; - }); + auto input = _Input({4}, NCHW, halide_type_of()); + input->setName("input_tensor"); + //set input data + const int inputdata[] = {1, 1, 1, 4}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inputdata, 4 * sizeof(int)); + input->unMap(); + const int fill_data = 1; + auto fill = _Const(&fill_data,{},NCHW, halide_type_of()); + auto output = _Fill(input, fill); + const std::vector expectedOutput = {1, 1, 1, 1}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0)) { + MNN_ERROR("FillTest test failed!\n"); + return false; } return true; } }; - -class FillTensorflowTest : public MNNTestCase { -public: - virtual ~FillTensorflowTest() = default; - virtual bool run() { - for (int i = 1; i <= 10; i++) { - std::vector dims; - for (int j = 0; j < kDim; j++) - dims.push_back(1 + rand() % 16); - - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto scalar = rand() % 255; - auto net = create(dims, scalar, true); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU))); - - // clean up - delete net; - }); - } - return true; - } -}; - -MNNTestSuiteRegister(FillCaffeTest, "op/fill/caffe"); -MNNTestSuiteRegister(FillTensorflowTest, "op/fill/tf"); +MNNTestSuiteRegister(FillTest, "op/fill"); diff --git a/test/op/GatherTest.cpp b/test/op/GatherTest.cpp index e147aed7..0c601d0d 100644 --- a/test/op/GatherTest.cpp +++ b/test/op/GatherTest.cpp @@ -8,11 +8,7 @@ #include #include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" - using namespace MNN::Express; class GatherNDTest : public MNNTestCase { public: @@ -37,4 +33,28 @@ public: return true; } }; +class GatherTest : public MNNTestCase { +public: + virtual ~GatherTest() = default; + virtual bool run() { + auto params = _Input({4,3,2}, NCHW); + params->setName("input_tensor"); + // set input data + const float inpudata[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21,0, 22.0, 23.0, 24.0}; + auto inputPtr = params->writeMap(); + memcpy(inputPtr, inpudata, 24 * sizeof(float)); + params->unMap(); + const int indices_data[] = {1, 0, 1, 0}; + auto indices = _Const(indices_data, {4}, NCHW, halide_type_of()); + auto output = _Gather(params, indices); + const std::vector expectedOutput = {7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 24, 0.01)) { + MNN_ERROR("GatherTest test failed!\n"); + return false; + } + return true; + } +}; MNNTestSuiteRegister(GatherNDTest, "op/gather_nd"); +MNNTestSuiteRegister(GatherTest, "op/gather"); diff --git a/test/op/GatherV2Test.cpp b/test/op/GatherV2Test.cpp index fc5d8028..00acff57 100644 --- a/test/op/GatherV2Test.cpp +++ b/test/op/GatherV2Test.cpp @@ -5,212 +5,30 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" - -using namespace MNN; - -static Interpreter *create(DataType type, int o, int s, int b, int c, int h, int w) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(std::vector({o, s})); - InputBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dtype(type); - ib.add_dformat(MNN_DATA_FORMAT_NHWC); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto dims = fbb.CreateVector(std::vector({b, h, w, c})); - InputBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dtype(DataType_DT_INT32); - ib.add_dformat(MNN_DATA_FORMAT_NHWC); - auto input = ib.Finish(); - auto name = fbb.CreateString("indices"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto gb = GatherV2Builder(fbb); - gb.add_Taxis(DataType_DT_INT32); - gb.add_Tindices(DataType_DT_INT32); - gb.add_Tparams(type); - auto gatherV2 = gb.Finish(); - auto name = fbb.CreateString("GatherV2"); - auto iv = fbb.CreateVector(std::vector({0, 1})); - auto ov = fbb.CreateVector(std::vector({2})); - - OpBuilder builder(fbb); - builder.add_type(OpType_GatherV2); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_GatherV2); - builder.add_main(flatbuffers::Offset(gatherV2.o)); - vec.push_back(builder.Finish()); - } - - BlobBuilder db(fbb); - db.add_dataType(type); - db.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto desinated = db.Finish(); - BlobBuilder qb(fbb); - qb.add_dataType(DataType_DT_INT32); - qb.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto qnt = qb.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(desinated.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(qnt.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(2); - tdb.add_blob(flatbuffers::Offset(desinated.o)); - desc.push_back(tdb.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "indices", "output"}); - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - +using namespace MNN::Express; class GatherV2Test : public MNNTestCase { public: virtual ~GatherV2Test() = default; virtual bool run() { - DataType types[] = { - DataType_DT_INT32, DataType_DT_FLOAT, - }; - for (int t = 0; t < sizeof(types) / sizeof(DataType); t++) { - DataType type = (DataType)types[t]; - for (int o = 1; o <= 4; o *= 2) { - for (int s = 1; s <= 4; s *= 2) { - for (int b = 1; b <= 2; b *= 2) { - for (int h = 1; h <= 4; h *= 2) { - for (int w = 1; w <= 4; w *= 2) { - for (int c = 1; c <= 4; c *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(type, o, s, b, c, h, w); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - auto input = new Tensor(2, Tensor::TENSORFLOW); - { - input->setType(type); - input->buffer().dim[0].extent = o; - input->buffer().dim[1].extent = s; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - if (type == DataType_DT_FLOAT) { - for (int i = 0; i < o * s; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - } else if (type == DataType_DT_INT32) { - for (int i = 0; i < o * s; i++) { - input->host()[i] = rand() % 255; - } - } - - auto host = net->getSessionInput(CPU, "input"); - auto device = net->getSessionInput(GPU, "input"); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // indices - auto indices = new Tensor(4, Tensor::TENSORFLOW); - { - indices->setType(DataType_DT_INT32); - indices->buffer().dim[0].extent = b; - indices->buffer().dim[1].extent = h; - indices->buffer().dim[2].extent = w; - indices->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(indices); - indices->buffer().host = (uint8_t *)malloc(indices->size()); - for (int i = 0; i < b * c * h * w; i++) { - indices->host()[i] = rand() % o; - } - auto host = net->getSessionInput(CPU, "indices"); - auto device = net->getSessionInput(GPU, "indices"); - net->getBackend(CPU, host)->onCopyBuffer(indices, host); - net->getBackend(GPU, device)->onCopyBuffer(indices, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - free(indices->buffer().host); - delete input; - delete indices; - delete net; - }); - } - } - } - } - } - } + auto params = _Input({4,3,2}, NCHW); + params->setName("input_tensor"); + // set input data + const float inpudata[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21,0, 22.0, 23.0, 24.0}; + auto inputPtr = params->writeMap(); + memcpy(inputPtr, inpudata, 24 * sizeof(float)); + params->unMap(); + const int indices_data[] = {1, 0, 1, 0}; + auto indices = _Const(indices_data, {4}, NCHW, halide_type_of()); + auto output = _GatherV2(params, indices, nullptr); + const std::vector expectedOutput = {7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 24, 0.01)) { + MNN_ERROR("GatherV2Test test failed!\n"); + return false; } return true; } diff --git a/test/op/PReLUTest.cpp b/test/op/PReLUTest.cpp index aa0bb19e..d98fa214 100644 --- a/test/op/PReLUTest.cpp +++ b/test/op/PReLUTest.cpp @@ -5,128 +5,33 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(int b, int c, int h, int w) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(std::vector({b, c, h, w})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - std::vector slopes; - for (int i = 0; i < c; i++) { - slopes.push_back(i + 1); - } - auto data = fbb.CreateVector(slopes); - auto pb = PReluBuilder(fbb); - pb.add_slopeCount(c); - pb.add_slope(data); - auto prelu = pb.Finish(); - auto name = fbb.CreateString("prelu"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_PReLU); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_PRelu); - builder.add_main(flatbuffers::Offset(prelu.o)); - vec.push_back(builder.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "output"}); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class PReLUTest : public MNNTestCase { +using namespace MNN::Express; +class PreluTest : public MNNTestCase { public: - virtual ~PReLUTest() = default; + virtual ~PreluTest() = default; virtual bool run() { - for (int b = 1; b <= 2; b *= 2) { - for (int c = 1; c <= 16; c *= 2) { - for (int h = 1; h <= 16; h *= 2) { - for (int w = 1; w <= 16; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(b, c, h, w); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int j = 0; j < b * c * h * w; j++) { - input->host()[j] = rand() % 255 / 255.f; - } - - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } + auto input = _Input({1, 4, 1, 1}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, 2.0, -3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + input = _Convert(input, NC4HW4); + auto output = _PRelu(input, {3.0, 1.5, 1.5, 1.5}); + output = _Convert(output,NCHW); + const std::vector expectedOutput = {-3.0, 2.0, -4.5, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("PreluTest test failed!\n"); + return false; } return true; } }; -MNNTestSuiteRegister(PReLUTest, "op/prelu"); +MNNTestSuiteRegister(PreluTest, "op/prelu"); diff --git a/test/op/PadTest.cpp b/test/op/PadTest.cpp new file mode 100644 index 00000000..24565329 --- /dev/null +++ b/test/op/PadTest.cpp @@ -0,0 +1,36 @@ +// +// PadTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/31. +// Copyright © 2018, Alibaba Group Holding Limited +// +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" +using namespace MNN::Express; +class PadTest : public MNNTestCase { +public: + virtual ~PadTest() = default; + virtual bool run() { + auto input = _Input({1, 2, 2, 1}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + const int paddings_data[] = {0, 0, 1, 1, 1, 1, 0, 0}; + auto paddings = _Const(paddings_data, {4, 2}, NCHW, halide_type_of()); + auto output = _Pad(input, paddings); + const std::vector expectedOutput = {0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -2.0, 0.0, 0.0, 3.0, 4.0, 0.0, 0.0, 0.0, 0.0, 0.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 16, 0.01)) { + MNN_ERROR("PadTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(PadTest, "op/pad"); diff --git a/test/op/ReLU6Test.cpp b/test/op/ReLU6Test.cpp index 82e4d46e..e301c10e 100644 --- a/test/op/ReLU6Test.cpp +++ b/test/op/ReLU6Test.cpp @@ -5,209 +5,31 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(float slope, int b, int c, int h, int w, bool tensorflow) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(tensorflow ? std::vector({b, h, w, c}) : std::vector({b, c, h, w})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto rb = ReluBuilder(fbb); - rb.add_slope(slope); - auto relu6 = rb.Finish(); - auto name = fbb.CreateString("relu6"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_ReLU6); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Relu); - builder.add_main(flatbuffers::Offset(relu6.o)); - vec.push_back(builder.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "output"}); - - if (tensorflow) { - BlobBuilder builder(fbb); - builder.add_dataType(DataType_DT_FLOAT); - builder.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto blob = builder.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - } else { - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - } - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class ReLU6CaffeTest : public MNNTestCase { +using namespace MNN::Express; +class Relu6Test : public MNNTestCase { public: - virtual ~ReLU6CaffeTest() = default; + virtual ~Relu6Test() = default; virtual bool run() { - for (int b = 1; b <= 2; b++) { - for (int c = 1; c <= 8; c *= 2) { - for (int h = 1; h <= 8; h *= 2) { - for (int w = 1; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - float slope = rand() % 255 / 255.f; - auto net = create(slope, b, c, h, w, false); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 255 - 127; - } - } - - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, 3.0, 6.0, 9.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Relu6(input); + const std::vector expectedOutput = {0.0, 3.0, 6.0, 6.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("Relu6Test test failed!\n"); + return false; } return true; } }; - -class ReLU6TensorflowTest : public MNNTestCase { -public: - virtual ~ReLU6TensorflowTest() = default; - virtual bool run() { - for (int b = 1; b <= 2; b++) { - for (int c = 1; c <= 8; c *= 2) { - for (int h = 2; h <= 8; h *= 2) { // CPU relu6 take at least 4 input - -||| - for (int w = 2; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - float slope = rand() % 255 / 255.f; - auto net = create(slope, b, c, h, w, true); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 255 - 127; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } - return true; - } -}; -MNNTestSuiteRegister(ReLU6CaffeTest, "op/relu6/caffe"); -MNNTestSuiteRegister(ReLU6TensorflowTest, "op/relu6/tensorflow"); +MNNTestSuiteRegister(Relu6Test, "op/relu6"); diff --git a/test/op/ReLUTest.cpp b/test/op/ReLUTest.cpp index 1a278888..bad415c1 100644 --- a/test/op/ReLUTest.cpp +++ b/test/op/ReLUTest.cpp @@ -5,209 +5,31 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(float slope, int b, int c, int h, int w, bool tensorflow) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(tensorflow ? std::vector({b, h, w, c}) : std::vector({b, c, h, w})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto rb = ReluBuilder(fbb); - rb.add_slope(slope); - auto relu = rb.Finish(); - auto name = fbb.CreateString("relu"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_ReLU); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Relu); - builder.add_main(flatbuffers::Offset(relu.o)); - vec.push_back(builder.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "output"}); - - if (tensorflow) { - BlobBuilder builder(fbb); - builder.add_dataType(DataType_DT_FLOAT); - builder.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto blob = builder.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - } else { - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - } - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class ReLUCaffeTest : public MNNTestCase { +using namespace MNN::Express; +class ReluTest : public MNNTestCase { public: - virtual ~ReLUCaffeTest() = default; + virtual ~ReluTest() = default; virtual bool run() { - for (int b = 1; b <= 2; b++) { - for (int c = 1; c <= 8; c *= 2) { - for (int h = 1; h <= 8; h *= 2) { - for (int w = 1; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - float slope = rand() % 255 / 255.f; - auto net = create(slope, b, c, h, w, false); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 255 - 127; - } - } - - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Relu(input,0.5); + const std::vector expectedOutput = {-0.5, -1, 3.0, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("ReluTest test failed!\n"); + return false; } return true; } }; - -class ReLUTensorflowTest : public MNNTestCase { -public: - virtual ~ReLUTensorflowTest() = default; - virtual bool run() { - for (int b = 1; b <= 2; b++) { - for (int c = 1; c <= 8; c *= 2) { - for (int h = 2; h <= 8; h *= 2) { // CPU relu take at least 4 input - -||| - for (int w = 2; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - float slope = rand() % 255 / 255.f; - auto net = create(slope, b, c, h, w, true); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 255 - 127; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } - return true; - } -}; -MNNTestSuiteRegister(ReLUCaffeTest, "op/relu/caffe"); -MNNTestSuiteRegister(ReLUTensorflowTest, "op/relu/tensorflow"); +MNNTestSuiteRegister(ReluTest, "op/relu"); diff --git a/test/op/ReductionTest.cpp b/test/op/ReductionTest.cpp index 6ef542f5..55157b7b 100644 --- a/test/op/ReductionTest.cpp +++ b/test/op/ReductionTest.cpp @@ -5,170 +5,118 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" - -using namespace MNN; - -static Interpreter *create(ReductionType op, std::vector dims, bool kd, int b, int c, int h, int w) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto idims = fbb.CreateVector(std::vector({b, h, w, c})); - InputBuilder ib(fbb); - ib.add_dims(idims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto rdims = fbb.CreateVector(dims); - auto rpb = ReductionParamBuilder(fbb); - rpb.add_operation(op); - rpb.add_dType(DataType_DT_FLOAT); - rpb.add_dim(rdims); - rpb.add_coeff(0.f); - rpb.add_keepDims(kd); - auto reduction = rpb.Finish(); - - auto name = fbb.CreateString("reduction"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); - OpBuilder builder(fbb); - builder.add_type(OpType_Reduction); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_ReductionParam); - builder.add_main(flatbuffers::Offset(reduction.o)); - vec.push_back(builder.Finish()); - } - - BlobBuilder builder(fbb); - builder.add_dataType(DataType_DT_FLOAT); - builder.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto blob = builder.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "output"}); - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class ReductionTest : public MNNTestCase { +using namespace MNN::Express; +class ReduceSumTest : public MNNTestCase { public: - virtual ~ReductionTest() = default; + virtual ~ReduceSumTest() = default; virtual bool run() { - ReductionType ops[] = {ReductionType_SUM, ReductionType_MEAN, ReductionType_MAXIMUM, ReductionType_MINIMUM, - ReductionType_PROD}; - - for (int i = 0; i < sizeof(ops) / sizeof(ReductionType); i++) { - ReductionType op = ops[i]; - - for (int d = 1; d <= 0b1111; d++) { - for (int kd = 0; kd <= 1; kd++) { - for (int b = 3; b <= 3; b++) { - for (int c = 5; c <= 5; c++) { - for (int h = 7; h <= 7; h++) { - for (int w = 9; w <= 9; w++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - std::vector dims; - if (d & 0b0001) - dims.push_back(0); - if (d & 0b0010) - dims.push_back(1); - if (d & 0b0100) - dims.push_back(2); - if (d & 0b1000) - dims.push_back(3); - - auto net = create(op, dims, kd, b, c, h, w); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - } - - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } - } - } + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _ReduceSum(input); + const std::vector expectedOutput = {4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 1, 0.01)) { + MNN_ERROR("ReduceSumTest test failed!\n"); + return false; } return true; } }; -MNNTestSuiteRegister(ReductionTest, "op/reduction"); +class ReduceMeanTest : public MNNTestCase { +public: + virtual ~ReduceMeanTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _ReduceMean(input); + const std::vector expectedOutput = {1.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 1, 0.01)) { + MNN_ERROR("ReduceMeanTest test failed!\n"); + return false; + } + return true; + } +}; +class ReduceMaxTest : public MNNTestCase { +public: + virtual ~ReduceMaxTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _ReduceMax(input); + const std::vector expectedOutput = {4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 1, 0.01)) { + MNN_ERROR("ReduceMaxTest test failed!\n"); + return false; + } + return true; + } +}; +class ReduceMinTest : public MNNTestCase { +public: + virtual ~ReduceMinTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _ReduceMin(input); + const std::vector expectedOutput = {-2.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 1, 0.01)) { + MNN_ERROR("ReduceMinTest test failed!\n"); + return false; + } + return true; + } +}; +class ReduceProdTest : public MNNTestCase { +public: + virtual ~ReduceProdTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _ReduceProd(input); + const std::vector expectedOutput = {24.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 1, 0.01)) { + MNN_ERROR("ReduceProdTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(ReduceSumTest, "op/reduction/reduce_sum"); +MNNTestSuiteRegister(ReduceMeanTest, "op/reduction/reduce_mean"); +MNNTestSuiteRegister(ReduceMaxTest, "op/reduction/reduce_max"); +MNNTestSuiteRegister(ReduceMinTest, "op/reduction/reduce_min"); +MNNTestSuiteRegister(ReduceProdTest, "op/reduction/reduce_prod"); diff --git a/test/op/ReshapeTest.cpp b/test/op/ReshapeTest.cpp index 588b6dc0..be59b540 100644 --- a/test/op/ReshapeTest.cpp +++ b/test/op/ReshapeTest.cpp @@ -5,648 +5,117 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" - -using namespace MNN; - -static Interpreter *create(MNN_DATA_FORMAT fmt, std::vector inputs, std::vector outputs, bool dynamic, - bool tensorflow) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(inputs); - InputBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dtype(DataType_DT_FLOAT); - ib.add_dformat(tensorflow ? MNN_DATA_FORMAT_NHWC : MNN_DATA_FORMAT_NC4HW4); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - if (dynamic) { - auto dims = fbb.CreateVector(std::vector({(int)outputs.size()})); - auto data = fbb.CreateVector(outputs); - BlobBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dataType(DataType_DT_INT32); - ib.add_dataFormat(MNN_DATA_FORMAT_NHWC); - ib.add_int32s(flatbuffers::Offset>(data.o)); - auto input = ib.Finish(); - auto name = fbb.CreateString("shape"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Const); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Blob); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto dims = fbb.CreateVector(outputs); - auto rb = ReshapeBuilder(fbb); - rb.add_dims(dims); - rb.add_dimType(fmt); - auto reshape = rb.Finish(); - auto name = fbb.CreateString("reshape"); - auto iv = fbb.CreateVector(dynamic ? std::vector({0, 1}) : std::vector({0})); - auto ov = fbb.CreateVector(dynamic ? std::vector({2}) : std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Reshape); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Reshape); - builder.add_main(flatbuffers::Offset(reshape.o)); - vec.push_back(builder.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = dynamic ? fbb.CreateVectorOfStrings({"input", "shape", "output"}) - : fbb.CreateVectorOfStrings({"input", "output"}); - if (tensorflow) { - BlobBuilder fb(fbb); - fb.add_dataType(DataType_DT_FLOAT); - fb.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto flt = fb.Finish(); - BlobBuilder qb(fbb); - qb.add_dataType(DataType_DT_INT32); - qb.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto qnt = qb.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(flt.o)); - desc.push_back(tdb.Finish()); - } - if (dynamic) { - TensorDescribeBuilder tdb1(fbb); - tdb1.add_index(1); - tdb1.add_blob(flatbuffers::Offset(qnt.o)); - desc.push_back(tdb1.Finish()); - - TensorDescribeBuilder tdb2(fbb); - tdb2.add_index(2); - tdb2.add_blob(flatbuffers::Offset(flt.o)); - desc.push_back(tdb2.Finish()); - } else { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(flt.o)); - desc.push_back(tdb.Finish()); - } - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - } else { - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - } - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class ReshapeCaffe4Test : public MNNTestCase { +using namespace MNN::Express; +class ReshapeNCHWTest : public MNNTestCase { public: - virtual ~ReshapeCaffe4Test() = default; + virtual ~ReshapeNCHWTest() = default; virtual bool run() { - for (int f = MNN_DATA_FORMAT_NCHW; f <= MNN_DATA_FORMAT_NHWC; f++) { - for (int d = 0; d <= 1; d++) { - for (int i = 0; i < 24; i++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - int b = 3, c = 5, h = 7, w = 9; - std::vector inputs = {b, c, h, w}; - std::vector rest = inputs; - std::vector outputs; - - auto index = 0; - index = i / 6; - outputs.push_back(rest[index]); - rest.erase(rest.begin() + index); // 0 ~ 3 - index = (i % 6) / 2; - outputs.push_back(rest[index]); - rest.erase(rest.begin() + index); // 0 ~ 2 - index = i % 2; - outputs.push_back(rest[index]); - rest.erase(rest.begin() + index); // 0 ~ 1 - outputs.push_back(rest[0]); - - // nets - auto net = create((MNN_DATA_FORMAT)f, inputs, outputs, d, false); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int j = 0; j < b * c * h * w; j++) { - input->host()[j] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } + auto input = _Input({4}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + const int shape_data[] = {1, 4, 1, 1}; + auto shape = _Const(shape_data,{4}, NCHW, halide_type_of()); + auto output = _Reshape(input, shape ); + const std::vector expectedOutput = {-1.0, -2.0, 3.0, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("ReshapeNCHWTest test failed!\n"); + return false; + } + auto gotDim = output->getInfo()->dim; + if (!checkVector(gotDim.data(), shape_data, 4, 0)) { + MNN_ERROR("ReshapeNCHWTest test failed!\n"); + return false; + } + auto format = output->getInfo()->order; + if(NCHW != format) { + MNN_ERROR("ReshapeNCHWTest test failed!\n"); + return false; + } + return true; + } +}; +class ReshapeNHWCTest : public MNNTestCase { +public: + virtual ~ReshapeNHWCTest() = default; + virtual bool run() { + auto input = _Input({4}, NHWC); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + const int shape_data[] = {1, 1, 1, 4}; + auto shape = _Const(shape_data,{4}, NCHW, halide_type_of()); + auto output = _Reshape(input, shape ); + const std::vector expectedOutput = {-1.0, -2.0, 3.0, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.01)) { + MNN_ERROR("ReshapeNHWCTest test failed!\n"); + return false; + } + auto gotDim = output->getInfo()->dim; + if (!checkVector(gotDim.data(), shape_data, 4, 0)) { + MNN_ERROR("ReshapeNHWCTest test failed!\n"); + return false; + } + auto format = output->getInfo()->order; + if(NHWC != format) { + MNN_ERROR("ReshapeNHWCTest test failed!\n"); + return false; } return true; } }; - -class ReshapeTensorflow4Test : public MNNTestCase { +class ReshapeNC4HW4Test : public MNNTestCase { public: - virtual ~ReshapeTensorflow4Test() = default; + virtual ~ReshapeNC4HW4Test() = default; virtual bool run() { - for (int f = MNN_DATA_FORMAT_NCHW; f <= MNN_DATA_FORMAT_NHWC; f++) { - for (int d = 0; d <= 1; d++) { - for (int i = 0; i < 24; i++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - int b = 3, c = 5, h = 7, w = 9; - std::vector inputs = {b, h, w, c}; - std::vector rest = inputs; - std::vector outputs; - - auto index = 0; - index = i / 6; - outputs.push_back(rest[index]); - rest.erase(rest.begin() + index); // 0 ~ 3 - index = (i % 6) / 2; - outputs.push_back(rest[index]); - rest.erase(rest.begin() + index); // 0 ~ 2 - index = i % 2; - outputs.push_back(rest[index]); - rest.erase(rest.begin() + index); // 0 ~ 1 - outputs.push_back(rest[0]); - - // nets - auto net = create((MNN_DATA_FORMAT)f, inputs, outputs, d, true); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int j = 0; j < b * c * h * w; j++) { - input->host()[j] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } + auto input = _Input({1, 1, 1, 64}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, + 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 64 * sizeof(float)); + input->unMap(); + input = _Convert(input, NC4HW4); + auto output = _Reshape(input, {1, 4, 4, 4}, NCHW); + output = _Convert(output, NCHW); + const std::vector expectedOutput = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, + 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, + 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 64, 0.01)) { + MNN_ERROR("ReshapeNC4HW4Test test failed!\n"); + return false; + } + const std::vector expectedDim = {1, 4, 4, 4}; + auto gotDim = output->getInfo()->dim; + if (!checkVector(gotDim.data(), expectedDim.data(), 4, 0)) { + MNN_ERROR("ReshapeNHWCTest test failed!\n"); + return false; + } + auto format = output->getInfo()->order; + if(NCHW != format) { + MNN_ERROR("ReshapeNC4HW4Test test failed!\n"); + return false; } return true; } }; - -class ReshapeCaffe3Test : public MNNTestCase { -public: - virtual ~ReshapeCaffe3Test() = default; - virtual bool run() { - int f = MNN_DATA_FORMAT_NCHW; - for (int d = 0; d <= 1; d++) { - for (int i = 0; i < 24; i++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - int b = 3, c = 5, h = 7, w = 9; - std::vector inputs = {b, c, h, w}; - std::vector rest = inputs; - std::vector outputs; - - auto index = 0; - index = i / 6; - outputs.push_back(rest[index]); - rest.erase(rest.begin() + index); // 0 ~ 3 - index = (i % 6) / 2; - outputs.push_back(rest[index]); - rest.erase(rest.begin() + index); // 0 ~ 2 - outputs.push_back(rest[1] * rest[0]); - - // nets - auto net = create((MNN_DATA_FORMAT)f, inputs, outputs, d, false); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int j = 0; j < b * c * h * w; j++) { - input->host()[j] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - return true; - } -}; - -class ReshapeTensorflow3Test : public MNNTestCase { -public: - virtual ~ReshapeTensorflow3Test() = default; - virtual bool run() { - for (int f = MNN_DATA_FORMAT_NCHW; f <= MNN_DATA_FORMAT_NHWC; f++) { - for (int d = 0; d <= 1; d++) { - for (int i = 0; i < 24; i++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - int b = 3, c = 5, h = 7, w = 9; - std::vector inputs = {b, h, w, c}; - std::vector rest = inputs; - std::vector outputs; - - auto index = 0; - index = i / 6; - outputs.push_back(rest[index]); - rest.erase(rest.begin() + index); // 0 ~ 3 - index = (i % 6) / 2; - outputs.push_back(rest[index]); - rest.erase(rest.begin() + index); // 0 ~ 2 - outputs.push_back(rest[1] * rest[0]); - - // nets - auto net = create((MNN_DATA_FORMAT)f, inputs, outputs, d, true); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int j = 0; j < b * c * h * w; j++) { - input->host()[j] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - return true; - } -}; - -class ReshapeCaffe2Test : public MNNTestCase { -public: - virtual ~ReshapeCaffe2Test() = default; - virtual bool run() { - int f = MNN_DATA_FORMAT_NCHW; - for (int d = 0; d <= 1; d++) { - for (int i = 0; i < 24; i++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - int b = 3, c = 5, h = 7, w = 9; - std::vector inputs = {b, c, h, w}; - std::vector rest = inputs; - std::vector outputs; - - auto index0 = i / 6, index1 = (i % 6) / 2; - int tmp = rest[index0]; - rest.erase(rest.begin() + index0); - outputs.push_back(tmp * rest[index1]); - rest.erase(rest.begin() + index1); - outputs.push_back(rest[1] * rest[0]); - - // nets - auto net = create((MNN_DATA_FORMAT)f, inputs, outputs, d, false); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int j = 0; j < b * c * h * w; j++) { - input->host()[j] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - return true; - } -}; - -class ReshapeTensorflow2Test : public MNNTestCase { -public: - virtual ~ReshapeTensorflow2Test() = default; - virtual bool run() { - for (int f = MNN_DATA_FORMAT_NCHW; f <= MNN_DATA_FORMAT_NHWC; f++) { - for (int d = 0; d <= 1; d++) { - for (int i = 0; i < 24; i++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - int b = 3, c = 5, h = 7, w = 9; - std::vector inputs = {b, h, w, c}; - std::vector rest = inputs; - std::vector outputs; - - auto index0 = i / 6, index1 = (i % 6) / 2; - int tmp = rest[index0]; - rest.erase(rest.begin() + index0); - outputs.push_back(tmp * rest[index1]); - rest.erase(rest.begin() + index1); - outputs.push_back(rest[1] * rest[0]); - - // nets - auto net = create((MNN_DATA_FORMAT)f, inputs, outputs, d, true); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int j = 0; j < b * c * h * w; j++) { - input->host()[j] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - return true; - } -}; - -class ReshapeCaffe1Test : public MNNTestCase { -public: - virtual ~ReshapeCaffe1Test() = default; - virtual bool run() { - for (int f = MNN_DATA_FORMAT_NCHW; f <= MNN_DATA_FORMAT_NHWC; f++) { - for (int d = 0; d <= 1; d++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - int b = 3, c = 5, h = 7, w = 9; - std::vector inputs = {b, c, h, w}; - std::vector outputs = {b * c * h * w}; - - // nets - auto net = create((MNN_DATA_FORMAT)f, inputs, outputs, d, false); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - return true; - } -}; - -class ReshapeTensorflow1Test : public MNNTestCase { -public: - virtual ~ReshapeTensorflow1Test() = default; - virtual bool run() { - for (int f = MNN_DATA_FORMAT_NCHW; f <= MNN_DATA_FORMAT_NHWC; f++) { - for (int d = 0; d <= 1; d++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - int b = 3, c = 5, h = 7, w = 9; - std::vector inputs = {b, h, w, c}; - std::vector outputs = {b * c * h * w}; - - // nets - auto net = create((MNN_DATA_FORMAT)f, inputs, outputs, d, true); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - return true; - } -}; -MNNTestSuiteRegister(ReshapeCaffe4Test, "op/reshape/caffe_4"); -MNNTestSuiteRegister(ReshapeTensorflow4Test, "op/reshape/tensorflow_4"); -MNNTestSuiteRegister(ReshapeCaffe3Test, "op/reshape/caffe_3"); -MNNTestSuiteRegister(ReshapeTensorflow3Test, "op/reshape/tensorflow_3"); -MNNTestSuiteRegister(ReshapeCaffe2Test, "op/reshape/caffe_2"); -MNNTestSuiteRegister(ReshapeTensorflow2Test, "op/reshape/tensorflow_2"); -MNNTestSuiteRegister(ReshapeCaffe1Test, "op/reshape/caffe_1"); -MNNTestSuiteRegister(ReshapeTensorflow1Test, "op/reshape/tensorflow_1"); +MNNTestSuiteRegister(ReshapeNCHWTest, "op/reshape/nchw"); +MNNTestSuiteRegister(ReshapeNHWCTest, "op/reshape/nhwc"); +MNNTestSuiteRegister(ReshapeNC4HW4Test, "op/reshape/nc4hw4"); diff --git a/test/op/ResizeTest.cpp b/test/op/ResizeTest.cpp index 3b951ed8..55dd6054 100644 --- a/test/op/ResizeTest.cpp +++ b/test/op/ResizeTest.cpp @@ -5,124 +5,36 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" - -using namespace MNN; - -static Interpreter *create(int type, float ws, float hs, int w, int h, int c, int b) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(std::vector({b, c, h, w})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto rb = ResizeBuilder(fbb); - rb.add_xScale(ws); - rb.add_yScale(hs); - auto resize = rb.Finish(); - auto name = fbb.CreateString("resize"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Resize); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Resize); - builder.add_main(flatbuffers::Offset(resize.o)); - vec.push_back(builder.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "output"}); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - +using namespace MNN::Express; class ResizeTest : public MNNTestCase { public: virtual ~ResizeTest() = default; virtual bool run() { - for (int b = 1; b <= 2; b++) { - for (int c = 1; c <= 8; c++) { - for (int w = 1; w <= 8; w *= 2) { - for (int h = 1; h <= 8; h *= 2) { - for (float ws = 1; ws <= w; ws *= 2) { - for (float hs = 1; hs <= h; hs *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(3, ws, hs, w, h, c, b); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } - } + auto input = _Input({1, 2, 2, 1}, NHWC); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + input = _Convert(input, NC4HW4); + auto output = _Resize(input, 2.0, 2.0); + output = _Convert(output, NHWC); + const std::vector expectedOutput = {-1.0, -1.25, -1.75, -2.0, 0.0, -0.125, -0.375, -0.5, 2, 2.125, 2.375, 2.5, 3.0, 3.25, 3.75, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 16, 0.01)) { + MNN_ERROR("ResizeTest test failed!\n"); + return false; + } + const std::vector expectedDim = {1, 4, 4, 1}; + auto gotDim = output->getInfo()->dim; + if (!checkVector(gotDim.data(), expectedDim.data(), 4, 0)) { + MNN_ERROR("ResizeTest test failed!\n"); + return false; } return true; } diff --git a/test/op/ShapeTest.cpp b/test/op/ShapeTest.cpp new file mode 100644 index 00000000..26398f34 --- /dev/null +++ b/test/op/ShapeTest.cpp @@ -0,0 +1,35 @@ +// +// ShapeTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/26. +// Copyright © 2018, Alibaba Group Holding Limited +// +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; +class ShapeTest : public MNNTestCase { +public: + virtual ~ShapeTest() = default; + virtual bool run() { + auto input = _Input({1, 1, 1, 4}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Shape(input); + const std::vector expectedOutput = {1, 1, 1, 4}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0)) { + MNN_ERROR("ShapeTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(ShapeTest, "op/shape"); diff --git a/test/op/SoftmaxTest.cpp b/test/op/SoftmaxTest.cpp index fa995bf1..ac6f1fb4 100644 --- a/test/op/SoftmaxTest.cpp +++ b/test/op/SoftmaxTest.cpp @@ -5,228 +5,31 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(int axis, std::vector shape) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(shape); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto ab = AxisBuilder(fbb); - ab.add_axis(axis); - auto softmax = ab.Finish(); - auto name = fbb.CreateString("softmax"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Softmax); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Axis); - builder.add_main(flatbuffers::Offset(softmax.o)); - vec.push_back(builder.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "output"}); - NetBuilder builder(fbb); - builder.add_oplists(ops); - builder.add_tensorName(names); - fbb.Finish(builder.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - -class SoftmaxDim4Test : public MNNTestCase { +using namespace MNN::Express; +class SoftmaxTest : public MNNTestCase { public: - virtual ~SoftmaxDim4Test() = default; + virtual ~SoftmaxTest() = default; virtual bool run() { - for (int axis = 0; axis <= 3; axis++) { - for (int b = 1; b <= 1; b *= 2) { // 1 - for (int c = 1; c <= 8; c *= 2) { - for (int h = 1; h <= 8; h *= 2) { - for (int w = 1; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(axis, {b, c, h, w}); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = c; - input->buffer().dim[2].extent = h; - input->buffer().dim[3].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } + auto input = _Input({2,2}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Softmax(input); + const std::vector expectedOutput = {0.7310586 , 0.26894143, 0.26894143, 0.7310586}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.0001)) { + MNN_ERROR("SoftmaxTest test failed!\n"); + return false; } return true; } }; - -class SoftmaxDim3Test : public MNNTestCase { -public: - virtual ~SoftmaxDim3Test() = default; - virtual bool run() { - for (int axis = 0; axis <= 2; axis++) { - for (int c = 1; c <= 1; c *= 2) { // 1 - for (int h = 1; h <= 8; h *= 2) { - for (int w = 1; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(axis, {c, h, w}); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(3); - { - input->buffer().dim[0].extent = c; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < c * h * w; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - } - return true; - } -}; - -class SoftmaxDim2Test : public MNNTestCase { -public: - virtual ~SoftmaxDim2Test() = default; - virtual bool run() { - for (int axis = 0; axis <= 1; axis++) { - for (int h = 1; h <= 1; h *= 2) { // 1 - for (int w = 1; w <= 8; w *= 2) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(axis, {h, w}); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(2); - { - input->buffer().dim[0].extent = h; - input->buffer().dim[1].extent = w; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < h * w; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } - } - return true; - } -}; -MNNTestSuiteRegister(SoftmaxDim4Test, "op/softmax/dim4"); -MNNTestSuiteRegister(SoftmaxDim3Test, "op/softmax/dim3"); -MNNTestSuiteRegister(SoftmaxDim2Test, "op/softmax/dim2"); +MNNTestSuiteRegister(SoftmaxTest, "op/softmax"); diff --git a/test/op/SoftplusTest.cpp b/test/op/SoftplusTest.cpp new file mode 100644 index 00000000..f173bd9c --- /dev/null +++ b/test/op/SoftplusTest.cpp @@ -0,0 +1,35 @@ +// +// SoftplusTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/26. +// Copyright © 2018, Alibaba Group Holding Limited +// +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; +class SoftplusTest : public MNNTestCase { +public: + virtual ~SoftplusTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Softplus(input); + const std::vector expectedOutput = {0.31326166, 0.12692805, 3.0485873 , 4.01815}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.0001)) { + MNN_ERROR("SoftplusTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(SoftplusTest, "op/softplus"); diff --git a/test/op/SoftsignTest.cpp b/test/op/SoftsignTest.cpp new file mode 100644 index 00000000..76cc7e98 --- /dev/null +++ b/test/op/SoftsignTest.cpp @@ -0,0 +1,35 @@ +// +// SoftsignTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/26. +// Copyright © 2018, Alibaba Group Holding Limited +// +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; +class SoftsignTest : public MNNTestCase { +public: + virtual ~SoftsignTest() = default; + virtual bool run() { + auto input = _Input({4,}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Softsign(input); + const std::vector expectedOutput = {-0.5 , -0.6666667, 0.75 , 0.8}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.0001)) { + MNN_ERROR("SoftsignTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(SoftsignTest, "op/softsign"); diff --git a/test/op/SplitTest.cpp b/test/op/SplitTest.cpp new file mode 100644 index 00000000..86268c09 --- /dev/null +++ b/test/op/SplitTest.cpp @@ -0,0 +1,52 @@ +// +// SplitTest.cpp +// MNNTests +// +// Created by MNN on 2019/12/26. +// Copyright © 2018, Alibaba Group Holding Limited +// +#include +#include +#include "MNNTestSuite.h" +#include "TestUtils.h" + +using namespace MNN::Express; +class SplitTest : public MNNTestCase { +public: + virtual ~SplitTest() = default; + virtual bool run() { + auto input = _Input({2,4}, NCHW); + input->setName("input"); + // set input data + const float input_data[] = {1.0 , 2.0, 5.0, 6.0, 3.0, 4.0, 7.0, 8.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, input_data, 8 * sizeof(float)); + auto outputs = _Split(input, {1, 3}, 1); + const std::vector expectedOutput0 = {1.0 , 3.0}; + auto gotOutput0 = outputs[0]->readMap(); + if (!checkVector(gotOutput0, expectedOutput0.data(), 2, 0.0001)) { + MNN_ERROR("SplitTest test failed!\n"); + return false; + } + const std::vector expectedDim0 = {2, 1}; + auto gotDim0 = outputs[0]->getInfo()->dim; + if (!checkVector(gotDim0.data(), expectedDim0.data(), 2, 0)) { + MNN_ERROR("SplitTest test failed!\n"); + return false; + } + const std::vector expectedOutput1 = {2.0 , 5.0, 6.0, 4.0, 7.0, 8.0}; + auto gotOutput1 = outputs[1]->readMap(); + if (!checkVector(gotOutput1, expectedOutput1.data(), 6, 0.0001)) { + MNN_ERROR("SplitTest test failed!\n"); + return false; + } + const std::vector expectedDim1 = {2, 3}; + auto gotDim1 = outputs[1]->getInfo()->dim; + if (!checkVector(gotDim1.data(), expectedDim1.data(), 2, 0)) { + MNN_ERROR("SplitTest test failed!\n"); + return false; + } + return true; + } +}; +MNNTestSuiteRegister(SplitTest, "op/split"); diff --git a/test/op/SqueezeTest.cpp b/test/op/SqueezeTest.cpp index 3db72783..a615951c 100644 --- a/test/op/SqueezeTest.cpp +++ b/test/op/SqueezeTest.cpp @@ -2,165 +2,38 @@ // SqueezeTest.cpp // MNNTests // -// Created by MNN on 2019/01/15. +// Created by MNN on 2019/12/26. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "core/Session.hpp" -#include "TFQuantizeOp_generated.h" -#include "core/TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(DataType type, std::vector squeeze, int b, int c, int h, int w) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - std::vector> names; - - { - auto dims = fbb.CreateVector(std::vector({b, h, w, c})); - InputBuilder ib(fbb); - ib.add_dims(dims); - ib.add_dtype(type); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto dims = fbb.CreateVector(squeeze); - auto spb = SqueezeParamBuilder(fbb); - spb.add_squeezeDims(dims); - auto sp = spb.Finish(); - auto name = fbb.CreateString("squeeze"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({1})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Squeeze); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_SqueezeParam); - builder.add_main(flatbuffers::Offset(sp.o)); - vec.push_back(builder.Finish()); - } - - BlobBuilder builder(fbb); - builder.add_dataType(type); - builder.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto blob = builder.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(blob.o)); - desc.push_back(tdb.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto namesv = fbb.CreateVectorOfStrings({"input", "output"}); - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(namesv); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - +using namespace MNN::Express; class SqueezeTest : public MNNTestCase { public: virtual ~SqueezeTest() = default; virtual bool run() { - DataType types[] = { - DataType_DT_INT32, DataType_DT_FLOAT, - }; - - for (int t = 0; t < sizeof(types) / sizeof(DataType); t++) { - DataType type = types[t]; - int b = 3, c = 5, h = 7, w = 9; - { - for (int mask = 0b0001; mask <= 0b1110; mask++) { - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - std::vector squeeze; - for (int j = 0; j < 4; j++) { - if (mask & (1 << j)) - squeeze.push_back(j); - } - - // nets - auto net = create(type, squeeze, b, c, h, w); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input/output - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->setType(type); - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - - if (type == DataType_DT_FLOAT) { - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - } else if (type == DataType_DT_INT32) { - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 255; - } - } - auto host = net->getSessionInput(CPU, NULL); - auto device = net->getSessionInput(GPU, NULL); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); - } - } + auto input = _Input({1, 1, 1, 4}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + auto output = _Squeeze(input); + const std::vector expectedOutput = {-1.0, -2.0, 3.0, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 4, 0.0001)) { + MNN_ERROR("SqueezeTest test failed!\n"); + return false; + } + const std::vector expectedDim = {4}; + auto gotDim = output->getInfo()->dim; + if (!checkVector(gotDim.data(), expectedDim.data(), 1, 0)) { + MNN_ERROR("SqueezeTest test failed!\n"); + return false; } return true; } diff --git a/test/op/TileTest.cpp b/test/op/TileTest.cpp index 95a4f293..6400b0e9 100644 --- a/test/op/TileTest.cpp +++ b/test/op/TileTest.cpp @@ -5,161 +5,32 @@ // Created by MNN on 2019/01/15. // Copyright © 2018, Alibaba Group Holding Limited // - -#include +#include +#include #include "MNNTestSuite.h" -#include "MNN_generated.h" -#include "core/Session.hpp" -#include "core/TensorUtils.hpp" #include "TestUtils.h" -using namespace MNN; - -static Interpreter *create(int b, int c, int h, int w) { - flatbuffers::FlatBufferBuilder fbb; - std::vector> vec; - - { - auto dims = fbb.CreateVector(std::vector({b, h, w, c})); - InputBuilder ib(fbb); - ib.add_dims(dims); - auto input = ib.Finish(); - auto name = fbb.CreateString("input"); - auto iv = fbb.CreateVector(std::vector({0})); - auto ov = fbb.CreateVector(std::vector({0})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Input); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Input); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto dims = fbb.CreateVector(std::vector({4})); - auto data = fbb.CreateVector(std::vector({2, 4, 6, 8})); - BlobBuilder ib(fbb); - ib.add_dims(flatbuffers::Offset>(dims.o)); - ib.add_dataType(DataType_DT_INT32); - ib.add_dataFormat(MNN_DATA_FORMAT_NHWC); - ib.add_int32s(data); - auto input = ib.Finish(); - auto name = fbb.CreateString("multiply"); - auto iv = fbb.CreateVector(std::vector({})); - auto ov = fbb.CreateVector(std::vector({1})); - OpBuilder builder(fbb); - builder.add_type(OpType_Const); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - builder.add_main_type(OpParameter_Blob); - builder.add_main(flatbuffers::Offset(input.o)); - vec.push_back(builder.Finish()); - } - { - auto name = fbb.CreateString("tile"); - auto iv = fbb.CreateVector(std::vector({0, 1})); - auto ov = fbb.CreateVector(std::vector({2})); - - OpBuilder builder(fbb); - builder.add_type(OpType_Tile); - builder.add_name(name); - builder.add_inputIndexes(iv); - builder.add_outputIndexes(ov); - vec.push_back(builder.Finish()); - } - - BlobBuilder fb(fbb); - fb.add_dataType(DataType_DT_FLOAT); - fb.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto flt = fb.Finish(); - BlobBuilder qb(fbb); - qb.add_dataType(DataType_DT_INT32); - qb.add_dataFormat(MNN_DATA_FORMAT_NHWC); - auto itg = qb.Finish(); - - std::vector> desc; - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(0); - tdb.add_blob(flatbuffers::Offset(flt.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(1); - tdb.add_blob(flatbuffers::Offset(itg.o)); - desc.push_back(tdb.Finish()); - } - { - TensorDescribeBuilder tdb(fbb); - tdb.add_index(2); - tdb.add_blob(flatbuffers::Offset(flt.o)); - desc.push_back(tdb.Finish()); - } - - auto ops = fbb.CreateVector(vec); - auto names = fbb.CreateVectorOfStrings({"input", "multiply", "output"}); - auto extras = fbb.CreateVector(desc); - NetBuilder net(fbb); - net.add_oplists(ops); - net.add_tensorName(names); - net.add_extraTensorDescribe(extras); - net.add_sourceType(NetSource_TENSORFLOW); - fbb.Finish(net.Finish()); - return Interpreter::createFromBuffer((const char *)fbb.GetBufferPointer(), fbb.GetSize()); -} - -static Tensor *infer(const Interpreter *net, Session *session) { - net->runSession(session); - return net->getSessionOutputAll(session).begin()->second; -} - +using namespace MNN::Express; class TileTest : public MNNTestCase { public: virtual ~TileTest() = default; virtual bool run() { - int b = 3, c = 5, h = 7, w = 9; - dispatch([&](MNNForwardType backend) -> void { - if (backend == MNN_FORWARD_CPU) - return; - // nets - auto net = create(b, c, h, w); - auto CPU = createSession(net, MNN_FORWARD_CPU); - auto GPU = createSession(net, backend); - if (!CPU || !GPU) { - delete net; - return; - } - - // input - auto input = new Tensor(4, Tensor::TENSORFLOW); - { - input->buffer().dim[0].extent = b; - input->buffer().dim[1].extent = h; - input->buffer().dim[2].extent = w; - input->buffer().dim[3].extent = c; - TensorUtils::setLinearLayout(input); - input->buffer().host = (uint8_t *)malloc(input->size()); - for (int i = 0; i < b * c * h * w; i++) { - input->host()[i] = rand() % 255 / 255.f; - } - auto host = net->getSessionInput(CPU, "input"); - auto device = net->getSessionInput(GPU, "input"); - net->getBackend(CPU, host)->onCopyBuffer(input, host); - net->getBackend(GPU, device)->onCopyBuffer(input, device); - } - - // infer - assert(TensorUtils::compareTensors(infer(net, GPU), infer(net, CPU), 0.01)); - - // clean up - free(input->buffer().host); - delete input; - delete net; - }); + auto input = _Input({2,2}, NCHW); + input->setName("input_tensor"); + // set input data + const float inpudata[] = {-1.0, -2.0, 3.0, 4.0}; + auto inputPtr = input->writeMap(); + memcpy(inputPtr, inpudata, 4 * sizeof(float)); + input->unMap(); + const int mul_data [] = {2, 2}; + auto mul = _Const(mul_data, {2}, NCHW, halide_type_of()); + auto output = _Tile(input,mul); + const std::vector expectedOutput = {-1.0, -2.0, -1.0, -2.0, 3.0, 4.0, 3.0, 4.0, -1.0, -2.0, -1.0, -2.0, 3.0, 4.0, 3.0, 4.0}; + auto gotOutput = output->readMap(); + if (!checkVector(gotOutput, expectedOutput.data(), 16, 0.0001)) { + MNN_ERROR("TileTest test failed!\n"); + return false; + } return true; } }; diff --git a/test/speed/BinarySpeedTest.cpp b/test/speed/BinarySpeedTest.cpp index 428baa6c..239be7e5 100644 --- a/test/speed/BinarySpeedTest.cpp +++ b/test/speed/BinarySpeedTest.cpp @@ -20,6 +20,32 @@ using namespace MNN::Express; #define TIME 100 class BinarySpeedTest : public MNNTestCase { public: + void SubScalarTest() { + auto input0 = _Input({WIDTH, HEIGHT}, NCHW); + auto input1 = _Input({}, NCHW); + auto output = input0 - input1; + { + AUTOTIME; + for (int i=0; iwriteMap(); + input1->writeMap(); + output->readMap(); + } + } + } + void AddScalarTest() { + auto input0 = _Input({}, NCHW); + auto input1 = _Input({WIDTH, HEIGHT}, NCHW); + auto output = input0 + input1; + { + AUTOTIME; + for (int i=0; iwriteMap(); + input1->writeMap(); + output->readMap(); + } + } + } void SubTest() { auto input0 = _Input({WIDTH, HEIGHT}); auto input1 = _Input({WIDTH, HEIGHT}); @@ -83,6 +109,8 @@ public: } SubTest(); AddTest(); + SubScalarTest(); + AddScalarTest(); return true; } }; diff --git a/tools/converter/CMakeLists.txt b/tools/converter/CMakeLists.txt index 0574a56a..1f765365 100644 --- a/tools/converter/CMakeLists.txt +++ b/tools/converter/CMakeLists.txt @@ -19,30 +19,32 @@ IF(MNN_BUILD_CONVERTER) include(${CMAKE_CURRENT_LIST_DIR}/source/optimizer/CMakeLists.txt) include(${CMAKE_CURRENT_LIST_DIR}/source/tflite/CMakeLists.txt) - file(GLOB COMMON_SRC ${CMAKE_CURRENT_LIST_DIR}/source/common/*.cpp ${CMAKE_SOURCE_DIR}/3rd_party/flatbuffers/src/util.cpp) + file(GLOB COMMON_SRC ${CMAKE_CURRENT_LIST_DIR}/source/common/*.cpp) add_executable(MNNDump2Json ${CMAKE_CURRENT_LIST_DIR}/source/MNNDump2Json.cpp) add_dependencies(MNNDump2Json MNN_SCHEMA_GEN) add_executable(MNNConvert ${CMAKE_CURRENT_LIST_DIR}/source/MNNConverter.cpp ${CMAKE_CURRENT_LIST_DIR}/source/cli.cpp ${CMAKE_CURRENT_LIST_DIR}/source/config.cpp - ${SCHEMA_TARGETS} - ) + ) IF(MNN_BUILD_SHARED_LIBS) - add_library(MNNConvertDeps SHARED ${COMMON_SRC} ${MNN_CONVERTER_BACKENDS_OBJECTS} ${PROJECT_SOURCE_DIR}/cmake/dummy.cpp) - target_link_libraries(MNNConvertDeps PRIVATE ${MNN_DEPS} ${Protobuf_LIBRARIES}) + add_library(MNNConvertDeps SHARED ${COMMON_SRC} ${MNN_CONVERTER_BACKENDS_OBJECTS} ${CMAKE_SOURCE_DIR}/3rd_party/flatbuffers/src/util.cpp) + add_dependencies(MNNConvertDeps MNN) ELSE() - add_library(MNNConvertDeps STATIC ${COMMON_SRC} ${MNN_CONVERTER_BACKENDS_OBJECTS} ${PROJECT_SOURCE_DIR}/cmake/dummy.cpp) - target_link_libraries(MNNConvertDeps INTERFACE ${MNN_DEPS} ${Protobuf_LIBRARIES}) + add_library(MNNConvertDeps STATIC ${COMMON_SRC} ${MNN_CONVERTER_BACKENDS_OBJECTS} ${CMAKE_SOURCE_DIR}/3rd_party/flatbuffers/src/util.cpp) + ENDIF() + target_link_libraries(MNNConvertDeps PUBLIC ${MNN_DEPS} ${Protobuf_LIBRARIES}) + IF(MNN_PORTABLE_BUILD) + # protobuf::libprotobuf doesn't declare proper dependency on ZLIB + target_link_libraries(MNNConvertDeps PUBLIC z) ENDIF() - add_dependencies(MNNConvertDeps ${MNN_DEPS}) - target_link_libraries(MNNConvert ${MNN_DEPS} ${Protobuf_LIBRARIES}) - IF(NOT MNN_BUILD_SHARED_LIBS) if(APPLE) target_link_libraries(MNNConvert -Wl,-all_load MNNConvertDeps -Wl,-noall_load) elseif (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") target_link_libraries(MNNConvert -Wl,--whole-archive MNNConvertDeps -Wl,--no-whole-archive) + ELSE() + target_link_libraries(MNNConvert MNNConvertDeps) endif() ELSE() target_link_libraries(MNNConvert MNNConvertDeps) diff --git a/tools/converter/include/logkit.h b/tools/converter/include/logkit.h index 7c93f6a8..21b13f98 100644 --- a/tools/converter/include/logkit.h +++ b/tools/converter/include/logkit.h @@ -1,6 +1,6 @@ // // logkit.h -// MNN +// MNNConverter // // Created by MNN on 2019/01/31. // Copyright © 2018, Alibaba Group Holding Limited @@ -17,6 +17,18 @@ #include #include #include +/*! + * \brief exception class that will be thrown by + * default logger if DMLC_LOG_FATAL_THROW == 1 + */ +struct Error : public std::runtime_error { + /*! + * \brief constructor + * \param s the error message + */ + explicit Error(const std::string& s) : std::runtime_error(s) { + } +}; #if defined(_MSC_VER) #pragma warning(disable : 4722) @@ -139,7 +151,7 @@ public: LogMessage(const char* file, int line) : log_stream_(std::cout) { #ifdef NDEBUG log_stream_ << "[" << pretty_date_.HumanDate() << "] " - << "@ " << line << ": "; + << ":" << line << ": "; #else log_stream_ << "[" << pretty_date_.HumanDate() << "] " << file << ":" << line << ": "; #endif @@ -170,8 +182,9 @@ public: #else ~LogMessageFatal() noexcept(false) { #endif - std::cout << log_stream_.str(); + std::cout << log_stream_.str()< #endif +#include #include "config.hpp" #include "logkit.h" @@ -53,7 +54,9 @@ cxxopts::Options Cli::initializeMNNConvertArgs(modelConfig &modelPath, int argc, } if (result.count("version")) { - std::cout << "\tVersion:" << ProjectConfig::version << std::endl; + std::cout << "\tVersion:" << ProjectConfig::version << std::endl + << "\tURL:" << MNN_REPOSITORY << std::endl + << "\tRevision:" << MNN_REVISION << std::endl; exit(EXIT_SUCCESS); } diff --git a/tools/converter/source/config.cpp b/tools/converter/source/config.cpp index e339de39..212824dc 100644 --- a/tools/converter/source/config.cpp +++ b/tools/converter/source/config.cpp @@ -7,6 +7,7 @@ // #include "config.hpp" +#include const std::string ProjectConfig::version =MNN_VERSION; ProjectConfig *ProjectConfig::m_pConfig = nullptr; std::mutex ProjectConfig::m_mutex; diff --git a/tools/converter/source/onnx/ArgMaxOnnx.cpp b/tools/converter/source/onnx/ArgMaxOnnx.cpp new file mode 100644 index 00000000..463abc3b --- /dev/null +++ b/tools/converter/source/onnx/ArgMaxOnnx.cpp @@ -0,0 +1,52 @@ +// +// ArgMax.cpp +// MNNConverter +// +// Created by MNN on 2020/01/07. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "onnxOpConverter.hpp" +#include + +DECLARE_OP_CONVERTER(ArgMaxOnnx); + +MNN::OpType ArgMaxOnnx::opType(){ + return MNN::OpType_ArgMax; +} + +MNN::OpParameter ArgMaxOnnx::type(){ + return MNN::OpParameter_ArgMax; +} + +void ArgMaxOnnx::run(MNN::OpT *dstOp, const onnx::NodeProto *onnxNode, std::vector initializers){ + auto axisT = new MNN::ArgMaxT; + int axis = 0; + int keepdims = 1; + int selectLastIndex = 0; // Boolean value. Default to False. + + for (int i = 0; i < onnxNode->attribute_size(); ++i) { + const auto& attributeProto = onnxNode->attribute(i); + const auto& attributeName = attributeProto.name(); + + if (attributeName == "axis") { + axis = attributeProto.i(); + } + if (attributeName == "keepdims") { + keepdims = attributeProto.i(); + } + if (attributeName == "select_last_index") { + // Ignored for now. MNN argmax implementation does not support this yet. + selectLastIndex = attributeProto.i(); + } + } + if (keepdims == 1) { + MNN_ERROR("ONNX ArgMax with keepdims == true is currently not supported.\n"); + } + axisT->axis = axis; + axisT->topK = 1; + axisT->outMaxVal = 0; + dstOp->main.value = axisT; +} + +REGISTER_CONVERTER(ArgMaxOnnx, ArgMax); diff --git a/tools/converter/source/optimizer/PostConverter.cpp b/tools/converter/source/optimizer/PostConverter.cpp index fc0fabee..a36588bd 100644 --- a/tools/converter/source/optimizer/PostConverter.cpp +++ b/tools/converter/source/optimizer/PostConverter.cpp @@ -118,8 +118,8 @@ std::unique_ptr optimizeNet(std::unique_ptr& originNet, bo printedInputOutput = true; MNN_PRINT("The Model Has Control / Extra Op, Please Compile the Code of model.cpp\n"); std::ofstream code("model.cpp"); - code << "#include \"Expr.hpp\"\n"; - code << "#include \"ExprCreator.hpp\"\n"; + code << "#include \n"; + code << "#include \n"; code << "using namespace MNN::Express;\n"; code << "void extraCall(std::map& varMap) {\n"; program->emit(code); diff --git a/tools/converter/source/optimizer/Program.cpp b/tools/converter/source/optimizer/Program.cpp index 1d736780..99d7ad53 100644 --- a/tools/converter/source/optimizer/Program.cpp +++ b/tools/converter/source/optimizer/Program.cpp @@ -333,8 +333,8 @@ int main(int argc, const char* argv[]) { { std::ofstream output("model.cpp"); std::ofstream outputUtils("Utils.hpp"); - output << "#include \"Expr.hpp\"\n"; - output << "#include \"ExprCreator.hpp\"\n"; + output << "#include \n"; + output << "#include \n"; output << "using namespace MNN::Express;\n"; output << "int main() {\n"; output << "auto varMap = Variable::loadMap(\"support.mnn\");\n"; diff --git a/tools/converter/source/optimizer/merge/ConvBiasAdd.cpp b/tools/converter/source/optimizer/merge/ConvBiasAdd.cpp index 8472f5b2..7759428a 100644 --- a/tools/converter/source/optimizer/merge/ConvBiasAdd.cpp +++ b/tools/converter/source/optimizer/merge/ConvBiasAdd.cpp @@ -25,6 +25,9 @@ static auto gRegister = []() { } auto inputs = expr->inputs(); auto inputExpr = inputs[0]->expr().first; + if (nullptr == inputExpr->get()) { + return false; + } if (inputExpr->get()->main_type() != OpParameter_Convolution2D || inputExpr->outputs().size() != 1) { return false; } diff --git a/tools/converter/source/optimizer/onnxextra/OnnxConvolutionMerge.cpp b/tools/converter/source/optimizer/onnxextra/OnnxConvolutionMerge.cpp index e63827d3..f797e8e8 100644 --- a/tools/converter/source/optimizer/onnxextra/OnnxConvolutionMerge.cpp +++ b/tools/converter/source/optimizer/onnxextra/OnnxConvolutionMerge.cpp @@ -228,7 +228,7 @@ public: common->relu = false; common->group = group; common->outputCount = co; - common->inputCount = group == 1 ? ci : group; // conv set inputCount to be ci, dw to be group + common->inputCount = ci * group; // conv set inputCount to be ci, dw to be group common->kernelX = kw; common->kernelY = kh; common->dilateX = dilation_w; diff --git a/tools/converter/source/optimizer/onnxextra/OnnxUpsample.cpp b/tools/converter/source/optimizer/onnxextra/OnnxUpsample.cpp index 9ea12f18..06c015e9 100644 --- a/tools/converter/source/optimizer/onnxextra/OnnxUpsample.cpp +++ b/tools/converter/source/optimizer/onnxextra/OnnxUpsample.cpp @@ -24,6 +24,7 @@ public: auto extraParam = op->main_as_Extra(); const int attrSize = extraParam->attr()->size(); std::string interpMode; + std::string coordMode = ""; // detect align_corner attribute for (int i = 0; i < attrSize; ++i) { auto attr = extraParam->attr()->GetAs(i); const auto& key = attr->key()->str(); @@ -33,6 +34,8 @@ public: scalesSize = attr->list()->f()->size(); scales.resize(scalesSize); memcmp(scales.data(), attr->list()->f()->data(), sizeof(float) * scalesSize); + } else if (key == "coordinate_transformation_mode") { + coordMode = attr->s()->str(); } } @@ -73,7 +76,9 @@ public: } else { MNN_ERROR("MNN Not support Upsample when scale size = %d\n", scalesSize); } - + interpParam->alignCorners = (coordMode == "align_corners"); + interpParam->halfPixelCenters = (interpParam->alignCorners == false); + // 1:near 2: bilinear 3: cubic if (interpMode == "nearest") { interpParam->resizeType = 1; @@ -125,6 +130,7 @@ public: MNN_ERROR("Unsupported Upsample mode! ==> %s\n", resizeMode.c_str()); } resizeParam->alignCorners = (coordMode == "align_corners"); + resizeParam->halfPixelCenters = (resizeParam->alignCorners == false); auto sizes = inputs[3]; diff --git a/tools/converter/source/optimizer/postconvert/TransformGroupConvolution.cpp b/tools/converter/source/optimizer/postconvert/TransformGroupConvolution.cpp index 8d7be159..e487e1a3 100644 --- a/tools/converter/source/optimizer/postconvert/TransformGroupConvolution.cpp +++ b/tools/converter/source/optimizer/postconvert/TransformGroupConvolution.cpp @@ -141,6 +141,7 @@ public: newConvolutionT->common->padX = common->padX; newConvolutionT->common->padY = common->padY; newConvolutionT->common->relu = common->relu; + newConvolutionT->common->relu6 = common->relu6; int startWeight = partWeightSize * i; int startBias = partBiasSize * i; diff --git a/tools/converter/source/tensorflow/ResizeBilinearTf.cpp b/tools/converter/source/tensorflow/ResizeBilinearTf.cpp index 3905b860..fdc958c9 100644 --- a/tools/converter/source/tensorflow/ResizeBilinearTf.cpp +++ b/tools/converter/source/tensorflow/ResizeBilinearTf.cpp @@ -54,6 +54,11 @@ void InterpTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { if (find_attr_value(srcNode->tfNode, "align_corners", value)) { interpParam->alignCorners = value.b(); } + + interpParam->halfPixelCenters = false; // defalut false + if (find_attr_value(srcNode->tfNode, "half_pixel_centers", value)) { + interpParam->halfPixelCenters = value.b(); + } // TODO defalut interpParam->widthScale = 1.0; diff --git a/tools/converter/source/tflite/OneHotTflite.cpp b/tools/converter/source/tflite/OneHotTflite.cpp new file mode 100644 index 00000000..0e6db908 --- /dev/null +++ b/tools/converter/source/tflite/OneHotTflite.cpp @@ -0,0 +1,33 @@ +// +// OneHotTflite.cpp +// MNNConverter +// +// Created by MNN on 2020/01/02. +// Copyright © 2019, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" +using namespace tflite; + +DECLARE_OP_COVERTER(OneHotTflite); + +MNN::OpType OneHotTflite::opType(bool quantizedModel) { + return MNN::OpType_OneHot; +} +MNN::OpParameter OneHotTflite::type(bool quantizedModel) { + return MNN::OpParameter_OneHotParam; +} + +void OneHotTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel) { + auto ohParam = new MNN::OneHotParamT; + auto opt=tfliteOp->builtin_options.AsOneHotOptions(); + ohParam->axis = opt->axis; + dstOp->main.value = ohParam; + +} + +REGISTER_CONVERTER(OneHotTflite, BuiltinOperator_ONE_HOT); diff --git a/tools/converter/source/tflite/PadTflite.cpp b/tools/converter/source/tflite/PadTflite.cpp index 9b17722d..43fb73a8 100644 --- a/tools/converter/source/tflite/PadTflite.cpp +++ b/tools/converter/source/tflite/PadTflite.cpp @@ -22,7 +22,38 @@ void PadTflite::run(MNN::OpT* dstOp, const std::unique_ptr& t const std::vector>& tfliteTensors, const std::vector>& tfliteModelBuffer, const std::vector>& tfliteOpSet, bool quantizedModel) { + auto padparm = new MNN::PadParamT; + switch(tfliteOp->opcode_index){ + case BuiltinOperator_PADV2: + case BuiltinOperator_PAD:{ + padparm->mode = MNN::PadValueMode_CONSTANT; + break; + } + case BuiltinOperator_MIRROR_PAD:{ + auto opt=tfliteOp->builtin_options.AsMirrorPadOptions(); + switch(opt->mode){ + case MirrorPadMode_REFLECT:{ + padparm->mode = MNN::PadValueMode_REFLECT; + break; + } + case MirrorPadMode_SYMMETRIC:{ + padparm->mode = MNN::PadValueMode_SYMMETRIC; + break; + } + default:{ + DCHECK(false) << "Unknown Pad Value Mode!"; + } + } + break; + } + default:{ + DCHECK(false) << "Unknown Pad Operator"; + } + } + dstOp->main.value = padparm; } REGISTER_CONVERTER(PadTflite, BuiltinOperator_PAD); +REGISTER_CONVERTER(PadTflite, BuiltinOperator_PADV2); +REGISTER_CONVERTER(PadTflite,BuiltinOperator_MIRROR_PAD); diff --git a/tools/converter/source/tflite/ReductionTflite.cpp b/tools/converter/source/tflite/ReductionTflite.cpp index d8fe3ec7..b5e2df96 100644 --- a/tools/converter/source/tflite/ReductionTflite.cpp +++ b/tools/converter/source/tflite/ReductionTflite.cpp @@ -54,6 +54,10 @@ void ReductionTflite::run(MNN::OpT* dstOp, const std::unique_ptroperation=MNN::ReductionType_PROD; break; } + case tflite::BuiltinOperator_MEAN:{ + param->operation=MNN::ReductionType_MEAN; + break; + } default:{ LOG(ERROR) << "MNN Converter Not " "Supported!!! Reduction Op: " @@ -67,3 +71,4 @@ REGISTER_CONVERTER(ReductionTflite,BuiltinOperator_REDUCE_MAX); REGISTER_CONVERTER(ReductionTflite,BuiltinOperator_REDUCE_MIN); REGISTER_CONVERTER(ReductionTflite,BuiltinOperator_REDUCE_ANY); REGISTER_CONVERTER(ReductionTflite,BuiltinOperator_REDUCE_PROD); +REGISTER_CONVERTER(ReductionTflite,BuiltinOperator_MEAN); diff --git a/tools/converter/source/tflite/ShapeTflite.cpp b/tools/converter/source/tflite/ShapeTflite.cpp new file mode 100644 index 00000000..69cfe93f --- /dev/null +++ b/tools/converter/source/tflite/ShapeTflite.cpp @@ -0,0 +1,29 @@ +// +// ShapeTflite.cpp +// MNNConverter +// +// Created by MNN on 2020/01/02. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +DECLARE_OP_COVERTER(ShapeTflite); +MNN::OpType ShapeTflite::opType(bool quantizedModel) { + return MNN::OpType_Shape; +} +MNN::OpParameter ShapeTflite::type(bool quantizedModel) { + return MNN::OpParameter_NONE; +} + +void ShapeTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel){ + dstOp->main.value = nullptr; +} + + +using namespace tflite; +REGISTER_CONVERTER(ShapeTflite, BuiltinOperator_SHAPE); diff --git a/tools/converter/source/tflite/SpaceToDepthTflite.cpp b/tools/converter/source/tflite/SpaceToDepthTflite.cpp new file mode 100644 index 00000000..692c6313 --- /dev/null +++ b/tools/converter/source/tflite/SpaceToDepthTflite.cpp @@ -0,0 +1,33 @@ +// +// SpaceToDepthTflite.cpp +// MNNConverter +// +// Created by MNN on 2020/01/02. +// Copyright © 2019, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" +using namespace tflite; + +DECLARE_OP_COVERTER(SpaceToDepthTflite); + +MNN::OpType SpaceToDepthTflite::opType(bool quantizedModel) { + return MNN::OpType_SpaceToDepth; +} +MNN::OpParameter SpaceToDepthTflite::type(bool quantizedModel) { + return MNN::OpParameter_DepthSpaceParam; +} + +void SpaceToDepthTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel) { + auto spaceToDepthParam = new MNN::DepthSpaceParamT; + auto opt=tfliteOp->builtin_options.AsSpaceToDepthOptions(); + spaceToDepthParam->blockSize = opt->block_size; + dstOp->main.value = spaceToDepthParam; + +} + +REGISTER_CONVERTER(SpaceToDepthTflite, BuiltinOperator_SPACE_TO_DEPTH); diff --git a/tools/converter/source/tflite/TanHTflite.cpp b/tools/converter/source/tflite/TanHTflite.cpp new file mode 100644 index 00000000..5ac64784 --- /dev/null +++ b/tools/converter/source/tflite/TanHTflite.cpp @@ -0,0 +1,29 @@ +// +// TanHTflite.cpp +// MNNConverter +// +// Created by MNN on 2020/01/02. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +DECLARE_OP_COVERTER(TanHTflite); +MNN::OpType TanHTflite::opType(bool quantizedModel) { + return MNN::OpType_TanH; +} +MNN::OpParameter TanHTflite::type(bool quantizedModel) { + return MNN::OpParameter_NONE; +} + +void TanHTflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel){ + dstOp->main.value = nullptr; +} + + +using namespace tflite; +REGISTER_CONVERTER(TanHTflite, BuiltinOperator_TANH); diff --git a/tools/converter/source/tflite/TopKV2Tflite.cpp b/tools/converter/source/tflite/TopKV2Tflite.cpp new file mode 100644 index 00000000..76690e1d --- /dev/null +++ b/tools/converter/source/tflite/TopKV2Tflite.cpp @@ -0,0 +1,33 @@ +// +// TopKV2Tflite.cpp +// MNNConverter +// +// Created by MNN on 2020/01/02. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "liteOpConverter.hpp" + +DECLARE_OP_COVERTER(TopKV2Tflite); +MNN::OpType TopKV2Tflite::opType(bool quantizedModel) { + return MNN::OpType_TopKV2; +} +MNN::OpParameter TopKV2Tflite::type(bool quantizedModel) { + return MNN::OpParameter_TopKV2; +} + +void TopKV2Tflite::run(MNN::OpT* dstOp, const std::unique_ptr& tfliteOp, + const std::vector>& tfliteTensors, + const std::vector>& tfliteModelBuffer, + const std::vector>& tfliteOpSet, bool quantizedModel){ + + auto topkv2Param = new MNN::TopKV2T; + topkv2Param->sorted = false; + topkv2Param->T = MNN::DataType_DT_FLOAT; + dstOp->main.value = topkv2Param; +} + + +using namespace tflite; +REGISTER_CONVERTER(TopKV2Tflite, BuiltinOperator_TOPK_V2); diff --git a/tools/cpp/CMakeLists.txt b/tools/cpp/CMakeLists.txt index 15e780ea..af929bce 100644 --- a/tools/cpp/CMakeLists.txt +++ b/tools/cpp/CMakeLists.txt @@ -1,26 +1,36 @@ +set(MNN_CPP_TOOLS "") + add_executable(MNNV2Basic.out ${CMAKE_CURRENT_LIST_DIR}/MNNV2Basic.cpp ${CMAKE_CURRENT_LIST_DIR}/revertMNNModel.cpp) target_link_libraries(MNNV2Basic.out ${MNN_DEPS}) +list(APPEND MNN_CPP_TOOLS MNNV2Basic.out) add_executable(mobilenetTest.out ${CMAKE_CURRENT_LIST_DIR}/mobilenetTest.cpp ) target_link_libraries(mobilenetTest.out ${MNN_DEPS}) +list(APPEND MNN_CPP_TOOLS mobilenetTest.out) add_executable(backendTest.out ${CMAKE_CURRENT_LIST_DIR}/backendTest.cpp) target_link_libraries(backendTest.out ${MNN_DEPS}) +list(APPEND MNN_CPP_TOOLS backendTest.out) add_executable(testModel.out ${CMAKE_CURRENT_LIST_DIR}/testModel.cpp) target_link_libraries(testModel.out ${MNN_DEPS}) +list(APPEND MNN_CPP_TOOLS testModel.out) add_executable(testModelWithDescrisbe.out ${CMAKE_CURRENT_LIST_DIR}/testModelWithDescrisbe.cpp ${CMAKE_CURRENT_LIST_DIR}/Config.cpp) target_link_libraries(testModelWithDescrisbe.out ${MNN_DEPS}) +list(APPEND MNN_CPP_TOOLS testModelWithDescrisbe.out) add_executable(getPerformance.out ${CMAKE_CURRENT_LIST_DIR}/getPerformance.cpp) target_link_libraries(getPerformance.out ${MNN_DEPS}) +list(APPEND MNN_CPP_TOOLS getPerformance.out) add_executable(checkInvalidValue.out ${CMAKE_CURRENT_LIST_DIR}/checkInvalidValue.cpp) target_link_libraries(checkInvalidValue.out ${MNN_DEPS}) +list(APPEND MNN_CPP_TOOLS checkInvalidValue.out) add_executable(timeProfile.out ${CMAKE_CURRENT_LIST_DIR}/timeProfile.cpp ${CMAKE_CURRENT_LIST_DIR}/revertMNNModel.cpp ${CMAKE_CURRENT_LIST_DIR}/Profiler.cpp) target_link_libraries(timeProfile.out ${MNN_DEPS}) +list(APPEND MNN_CPP_TOOLS timeProfile.out) if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") add_executable(checkDir.out ${CMAKE_CURRENT_LIST_DIR}/checkDir.cpp) @@ -32,3 +42,10 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") add_executable(winogradGenerateCL.out ${CMAKE_CURRENT_LIST_DIR}/winogradGenerateCL.cpp) target_link_libraries(winogradGenerateCL.out ${MNN_DEPS}) endif() + +if (MSVC OR WIN32) + foreach(TARGET ${MNN_CPP_TOOLS}) + target_compile_definitions(${TARGET} PRIVATE "_CRT_SECURE_NO_WARNINGS") + target_compile_options(${TARGET} PRIVATE "/wd4244" "/wd4305" "/wd4129") + endforeach() +endif() diff --git a/tools/train/CMakeLists.txt b/tools/train/CMakeLists.txt index f69b3bff..9bab1be1 100644 --- a/tools/train/CMakeLists.txt +++ b/tools/train/CMakeLists.txt @@ -5,14 +5,16 @@ IF(MNN_BUILD_TRAIN) include_directories(${CMAKE_CURRENT_LIST_DIR}/source/module) include_directories(${CMAKE_CURRENT_LIST_DIR}/source/transformer) include_directories(${CMAKE_CURRENT_LIST_DIR}/source/data) + include_directories(${CMAKE_CURRENT_LIST_DIR}/source/models) file(GLOB GRAD ${CMAKE_CURRENT_LIST_DIR}/source/grad/*) file(GLOB TRANSFORMER ${CMAKE_CURRENT_LIST_DIR}/source/transformer/*) file(GLOB MODULES ${CMAKE_CURRENT_LIST_DIR}/source/module/*) file(GLOB PARAMETER ${CMAKE_CURRENT_LIST_DIR}/source/parameters/*) file(GLOB OPTIMIZER ${CMAKE_CURRENT_LIST_DIR}/source/optimizer/*) file(GLOB DATALOADER ${CMAKE_CURRENT_LIST_DIR}/source/data/*) + file(GLOB MODELS ${CMAKE_CURRENT_LIST_DIR}/source/models/*) - add_library(MNNTrain SHARED ${GRAD} ${BASIC_INCLUDE} ${PARAMETER} ${OPTIMIZER} ${MODULES} ${DATALOADER} ${TRANSFORMER}) + add_library(MNNTrain SHARED ${GRAD} ${BASIC_INCLUDE} ${PARAMETER} ${OPTIMIZER} ${MODULES} ${DATALOADER} ${TRANSFORMER} ${MODELS}) target_link_libraries(MNNTrain ${MNN_DEPS}) IF(CMAKE_BUILD_TYPE MATCHES Release) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") @@ -22,14 +24,15 @@ IF(MNN_BUILD_TRAIN) target_link_libraries(transformer.out MNNTrain) add_executable(train.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/train.cpp ${SCHEMA} ${BASIC_INCLUDE}) - target_link_libraries(train.out MNN) + target_link_libraries(train.out ${MNN_DEPS}) add_executable(rawDataTransform.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/rawDataTransform.cpp ${SCHEMA} ${BASIC_INCLUDE}) + target_link_libraries(rawDataTransform.out ${MNN_DEPS}) + include_directories(${CMAKE_SOURCE_DIR}/3rd_party/imageHelper/) - include_directories(../../3rd_party/imageHelper/) add_executable(dataTransformer.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/dataTransformer.cpp ${SCHEMA} ${BASIC_INCLUDE}) - target_link_libraries(dataTransformer.out MNN) + target_link_libraries(dataTransformer.out ${MNN_DEPS}) option(MNN_USE_OPENCV "Use opencv" OFF) diff --git a/tools/train/source/data/DataLoader.hpp b/tools/train/source/data/DataLoader.hpp index 4da67f06..53cfcc6e 100644 --- a/tools/train/source/data/DataLoader.hpp +++ b/tools/train/source/data/DataLoader.hpp @@ -16,12 +16,12 @@ #include "DataLoaderConfig.hpp" #include "Dataset.hpp" #include "Example.hpp" +#include "LambdaTransform.hpp" #include "RandomSampler.hpp" #include "Sampler.hpp" +#include "StackTransform.hpp" #include "Transform.hpp" #include "TransformDataset.hpp" -#include "StackTransform.hpp" -#include "LambdaTransform.hpp" namespace MNN { namespace Train { @@ -43,6 +43,13 @@ public: } } + /* + When use Windows v141 toolset to compile class having vector of non-copyable element (std::thread, for example), + copy constructor (or assignment operator) must be deleted explicity, otherwise compile will failed. + */ + DataLoader(const DataLoader&) = delete; + DataLoader& operator = (const DataLoader&) = delete; + virtual ~DataLoader() { join(); }; @@ -80,4 +87,4 @@ private: } // namespace Train } // namespace MNN -#endif // DataLoader_hpp \ No newline at end of file +#endif // DataLoader_hpp diff --git a/tools/train/source/data/ImageDataset.cpp b/tools/train/source/data/ImageDataset.cpp new file mode 100644 index 00000000..49ef7990 --- /dev/null +++ b/tools/train/source/data/ImageDataset.cpp @@ -0,0 +1,155 @@ +// +// ImageDataset.cpp +// MNN +// +// Created by MNN on 2019/12/30. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "ImageDataset.hpp" +#include +#include +#include +#include +#include +#define STB_IMAGE_IMPLEMENTATION +#include "MNN/ImageProcess.hpp" +#include "MNN/MNNDefine.h" +#include "stb_image.h" + +using namespace std; +using namespace MNN::CV; + +// behave like python split +vector split(const string sourceStr, string splitChar = " ") { + vector result; + int pos = 0; + int start = 0; + + while ((pos = sourceStr.find(splitChar, start)) != string::npos) { + result.emplace_back(sourceStr.substr(start, pos - start)); + start = pos + splitChar.size(); + } + + if (start < sourceStr.size()) { + result.emplace_back(sourceStr.substr(start)); + } + + return result; +} + +ImageDataset::ImageDataset(const std::string pathToImages, const std::string pathToImageTxt, ImageConfig cfg, + bool readAllToMemory) { + mReadAllToMemory = readAllToMemory; + mConfig = cfg; + + ImageProcess::Config config; + config.sourceFormat = ImageFormat::RGBA; + config.filterType = MNN::CV::BILINEAR; + + switch (cfg.destFormat) { + case DestImageFormat::GRAY: + config.destFormat = ImageFormat::GRAY; + break; + case DestImageFormat::RGB: + config.destFormat = ImageFormat::RGB; + break; + case DestImageFormat::BGR: + config.destFormat = ImageFormat::BGR; + break; + default: + MNN_PRINT("not supported dest format\n"); + MNN_ASSERT(false); + break; + } + mProcess.reset(ImageProcess::create(config)); + + getAllDataAndLabelsFromTxt(pathToImages, pathToImageTxt); + + if (mReadAllToMemory) { + for (int i = 0; i < mAllTxtLines.size(); i++) { + auto dataLabelsPair = getDataAndLabelsFrom(mAllTxtLines[i]); + mDataAndLabels.emplace_back(dataLabelsPair); + } + } +} + +Example ImageDataset::get(size_t index) { + if (mReadAllToMemory) { + return {{mDataAndLabels[index].first}, {mDataAndLabels[index].second}}; + } else { + auto dataAndLabels = getDataAndLabelsFrom(mAllTxtLines[index]); + return {{dataAndLabels.first}, {dataAndLabels.second}}; + } +} + +size_t ImageDataset::size() { + return mAllTxtLines.size(); +} + +void ImageDataset::getAllDataAndLabelsFromTxt(const std::string pathToImages, std::string pathToImageTxt) { + std::ifstream txtFile(pathToImageTxt); + if (!txtFile.is_open()) { + MNN_PRINT("%s: file not found\n", pathToImageTxt.c_str()); + MNN_ASSERT(false); + } + string line; + while (getline(txtFile, line)) { + vector splitStr; + splitStr = split(line, " "); + if (splitStr.size() != 2) { + MNN_PRINT("%s: file format error\n", pathToImageTxt.c_str()); + MNN_ASSERT(false); + } + std::pair > dataPair; + dataPair.first = pathToImages + splitStr[0]; + vector labels; + labels = split(splitStr[1], ","); + for (int i = 0; i < labels.size(); i++) { + dataPair.second.emplace_back(atoi(labels[i].c_str())); + } + mAllTxtLines.emplace_back(dataPair); + } + txtFile.close(); +} + +std::pair ImageDataset::getDataAndLabelsFrom(std::pair > dataAndLabels) { + int originalWidth, originalHeight, comp; + string imageName = dataAndLabels.first; + auto bitmap32bits = stbi_load(imageName.c_str(), &originalWidth, &originalHeight, &comp, 4); + if (bitmap32bits == nullptr) { + MNN_PRINT("can not open image: %s\n", imageName.c_str()); + MNN_ASSERT(false); + } + MNN::CV::Matrix trans; + // choose resize or crop + // resize method + int oh, ow, bpp; + if (mConfig.resizeHeight > 0 && mConfig.resizeWidth > 0) { + trans.setScale((float)(originalWidth - 1) / (float)(mConfig.resizeWidth - 1), + (float)(originalHeight - 1) / (float)(mConfig.resizeHeight - 1)); + oh = mConfig.resizeHeight; + ow = mConfig.resizeWidth; + } else { + trans.setScale(1.0f, 1.0f); + oh = originalHeight; + ow = originalWidth; + } + bpp = mConfig.destFormat == DestImageFormat::GRAY ? 1 : 3; + mProcess->setMatrix(trans); + + auto data = _Input({oh, ow, bpp}, NHWC, halide_type_of()); + auto txtLabels = dataAndLabels.second; + auto labels = _Input({int(txtLabels.size())}, NHWC, halide_type_of()); + + mProcess->convert(bitmap32bits, originalWidth, originalHeight, 0, data->writeMap(), ow, oh, bpp, ow * bpp, + halide_type_of()); + + auto labelsDataPtr = labels->writeMap(); + for (int j = 0; j < txtLabels.size(); j++) { + labelsDataPtr[j] = txtLabels[j]; + } + stbi_image_free(bitmap32bits); + + return std::make_pair(data, labels); +} diff --git a/tools/train/source/data/ImageDataset.hpp b/tools/train/source/data/ImageDataset.hpp new file mode 100644 index 00000000..57804c9b --- /dev/null +++ b/tools/train/source/data/ImageDataset.hpp @@ -0,0 +1,71 @@ +// +// ImageDataset.hpp +// MNN +// +// Created by MNN on 2019/12/30. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef ImageDataset_hpp +#define ImageDataset_hpp + +#include +#include +#include +#include "Dataset.hpp" +#include "Example.hpp" +#include "MNN/ImageProcess.hpp" + +using namespace MNN; +using namespace MNN::Train; + +// +// the ImageDataset read stored images as input data. +// use 'pathToImages' and a txt file to construct a ImageDataset. +// the txt file should use format as below: +// image1.jpg label1,label2,... +// image2.jpg label3,label4,... +// ... +// the ImageDataset would read images from: +// pathToImages/image1.jpg +// pathToImages/image2.jpg +// ... +// +class MNN_PUBLIC ImageDataset : public Dataset { +public: + enum DestImageFormat { + GRAY, + RGB, + BGR, + }; + + struct ImageConfig { + ImageConfig(DestImageFormat destFmt = DestImageFormat::GRAY, int resizeH = 0, int resizeW = 0) { + destFormat = destFmt; + resizeHeight = resizeH; + resizeWidth = resizeW; + } + DestImageFormat destFormat; + int resizeHeight; + int resizeWidth; + }; + + explicit ImageDataset(const std::string pathToImages, const std::string pathToImageTxt, + ImageConfig cfg = ImageConfig(), bool readAllToMemory = false); + + Example get(size_t index) override; + + size_t size() override; + +private: + bool mReadAllToMemory; + std::vector > > mAllTxtLines; + std::vector > mDataAndLabels; + std::shared_ptr mProcess = nullptr; + ImageConfig mConfig; + + void getAllDataAndLabelsFromTxt(const std::string pathToImages, std::string pathToImageTxt); + std::pair getDataAndLabelsFrom(std::pair > dataAndLabels); +}; + +#endif // ImageDataset_hpp diff --git a/tools/train/source/data/MnistDataset.cpp b/tools/train/source/data/MnistDataset.cpp index 9f7e1ef0..e1efe95e 100644 --- a/tools/train/source/data/MnistDataset.cpp +++ b/tools/train/source/data/MnistDataset.cpp @@ -7,9 +7,9 @@ // #include "MnistDataset.hpp" +#include #include #include -#include // referenced from pytorch C++ frontend mnist.cpp // https://github.com/pytorch/pytorch/blob/master/torch/csrc/api/src/data/datasets/mnist.cpp diff --git a/tools/train/source/demo/ImageDatasetDemo.cpp b/tools/train/source/demo/ImageDatasetDemo.cpp new file mode 100644 index 00000000..ed7f73f3 --- /dev/null +++ b/tools/train/source/demo/ImageDatasetDemo.cpp @@ -0,0 +1,105 @@ +// +// ImageDatasetDemo.cpp +// MNN +// +// Created by MNN on 2019/11/20. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include "DataLoader.hpp" +#include "DemoUnit.hpp" +#include "ImageDataset.hpp" +#include "MNN_generated.h" + +#ifdef MNN_USE_OPENCV +#include // use opencv to show pictures +using namespace cv; +#endif + +using namespace std; + +/* + * this is an demo for how to use the ImageDataset and DataLoader + */ + +class ImageDatasetDemo : public DemoUnit { +public: + // this function is an example to use the lambda transform + // here we use lambda transform to normalize data from 0~255 to 0~1 + static Example func(Example example) { + // // an easier way to do this + auto cast = _Cast(example.data[0], halide_type_of()); + example.data[0] = _Multiply(cast, _Const(1.0f / 255.0f)); + return example; + } + + virtual int run(int argc, const char* argv[]) override { + if (argc != 3) { + cout << "usage: ./runTrainDemo.out ImageDatasetDemo path/to/images/ path/to/image/txt\n" << endl; + + cout << "the ImageDataset read stored images as input data.\n" + "use 'pathToImages' and a txt file to construct a ImageDataset.\n" + "the txt file should use format as below:\n" + " image1.jpg label1,label2,...\n" + " image2.jpg label3,label4,...\n" + " ...\n" + "the ImageDataset would read images from:\n" + " pathToImages/image1.jpg\n" + " pathToImages/image2.jpg\n" + " ...\n" + << endl; + + return 0; + } + + std::string pathToImages = argv[1]; + std::string pathToImageTxt = argv[2]; + + // total image num + const size_t datasetSize = 20; + + auto converImagesToFormat = ImageDataset::DestImageFormat::BGR; + int resizeHeight = 224; + int resizeWidth = 224; + auto config = ImageDataset::ImageConfig(converImagesToFormat, resizeHeight, resizeWidth); + bool readAllImagesToMemory = false; + + auto dataset = std::make_shared(pathToImages, pathToImageTxt, config, readAllImagesToMemory); + + // the lambda transform for one example, we also can do it in batch + auto transform = std::make_shared(func); + + // // the stack transform, stack [1, 28, 28] to [n, 1, 28, 28] + // auto transform = std::make_shared(); + + const int batchSize = 1; + const int numWorkers = 1; + + auto dataLoader = DataLoader::makeDataLoader(dataset, {transform}, batchSize, false, numWorkers); + + const size_t iterations = datasetSize / batchSize; + + for (int i = 0; i < iterations; i++) { + auto trainData = dataLoader->next(); + + auto data = trainData[0].data[0]->readMap(); + auto label = trainData[0].target[0]->readMap(); + + cout << "index: " << i << " label: " << int(label[0]) << endl; + +#ifdef MNN_USE_OPENCV + // only show the first picture in the batch + Mat image = Mat(resizeHeight, resizeWidth, CV_32FC(3), (void*)data); + imshow("image", image); + + waitKey(-1); +#endif + } + // this will reset the sampler's internal state + dataLoader->reset(); + return 0; + } +}; + +DemoUnitSetRegister(ImageDatasetDemo, "ImageDatasetDemo"); diff --git a/tools/train/source/demo/MnistUtils.cpp b/tools/train/source/demo/MnistUtils.cpp new file mode 100644 index 00000000..4e3115e3 --- /dev/null +++ b/tools/train/source/demo/MnistUtils.cpp @@ -0,0 +1,123 @@ +// +// MnistUtils.cpp +// MNN +// +// Created by MNN on 2020/01/08. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "MnistUtils.hpp" +#include +#include +#include +#include +#include +#include "DataLoader.hpp" +#include "DemoUnit.hpp" +#include "MnistDataset.hpp" +#include "NN.hpp" +#include "SGD.hpp" +#define MNN_OPEN_TIME_TRACE +#include +#include "ADAM.hpp" +#include "LearningRateScheduler.hpp" +#include "Loss.hpp" +#include "RandomGenerator.hpp" +#include "Transformer.hpp" +using namespace MNN; +using namespace MNN::Express; +using namespace MNN::Train; + +void MnistUtils::train(std::shared_ptr model, std::string root) { + auto exe = Executor::getGlobalExecutor(); + BackendConfig config; + exe->setGlobalExecutorConfig(MNN_FORWARD_CPU, config, 2); + std::shared_ptr sgd(new SGD); + sgd->append(model->parameters()); + sgd->setMomentum(0.9f); + // sgd->setMomentum2(0.99f); + sgd->setWeightDecay(0.0005f); + + auto dataset = std::make_shared(root, MnistDataset::Mode::TRAIN); + // the stack transform, stack [1, 28, 28] to [n, 1, 28, 28] + auto transform = std::make_shared(); + + const size_t batchSize = 64; + const size_t numWorkers = 4; + bool shuffle = true; + + auto dataLoader = DataLoader::makeDataLoader(dataset, {transform}, batchSize, shuffle, numWorkers); + + const size_t iterations = dataset->size() / batchSize; + + auto testDataset = std::make_shared(root, MnistDataset::Mode::TEST); + const size_t testBatchSize = 20; + const size_t testNumWorkers = 1; + shuffle = false; + + auto testDataLoader = DataLoader::makeDataLoader(testDataset, {transform}, testBatchSize, shuffle, testNumWorkers); + + const size_t testIterations = testDataset->size() / testBatchSize; + + for (int epoch = 0; epoch < 50; ++epoch) { + model->clearCache(); + exe->gc(Executor::FULL); + exe->resetProfile(); + { + AUTOTIME; + dataLoader->reset(); + model->setIsTraining(true); + for (int i = 0; i < iterations; i++) { + // AUTOTIME; + auto trainData = dataLoader->next(); + auto example = trainData[0]; + auto cast = _Cast(example.data[0]); + example.data[0] = cast * _Const(1.0f / 255.0f); + + // Compute One-Hot + auto newTarget = _OneHot(_Cast(example.target[0]), _Scalar(10), _Scalar(1.0f), + _Scalar(0.0f)); + + auto predict = model->forward(example.data[0]); + auto loss = _CrossEntropy(predict, newTarget); + float rate = LrScheduler::inv(0.01, epoch * iterations + i, 0.0001, 0.75); + sgd->setLearningRate(rate); + if ((epoch * iterations + i) % 100 == 0) { + std::cout << "train iteration: " << epoch * iterations + i; + std::cout << " loss: " << loss->readMap()[0]; + std::cout << " lr: " << rate << std::endl; + } + sgd->step(loss); + if (i == iterations - 1) { + model->setIsTraining(false); + auto forwardInput = _Input({1, 1, 28, 28}, NCHW); + forwardInput->setName("data"); + predict = model->forward(forwardInput); + predict->setName("prob"); + Variable::save({predict}, "temp.mnist.mnn"); + } + } + } + + int correct = 0; + testDataLoader->reset(); + model->setIsTraining(false); + for (int i = 0; i < testIterations; i++) { + exe->gc(Executor::PART); + if ((i + 1) % 100 == 0) { + std::cout << "test iteration: " << (i + 1) << std::endl; + } + auto data = testDataLoader->next(); + auto example = data[0]; + auto cast = _Cast(example.data[0]); + example.data[0] = cast * _Const(1.0f / 255.0f); + auto predict = model->forward(example.data[0]); + predict = _ArgMax(predict, 1); + auto accu = _Cast(_Equal(predict, _Cast(example.target[0]))).sum({}); + correct += accu->readMap()[0]; + } + auto accu = (float)correct / (float)testDataset->size(); + std::cout << "epoch: " << epoch << " accuracy: " << accu << std::endl; + exe->dumpProfile(); + } +} diff --git a/tools/train/source/demo/MnistUtils.hpp b/tools/train/source/demo/MnistUtils.hpp new file mode 100644 index 00000000..5a231b7d --- /dev/null +++ b/tools/train/source/demo/MnistUtils.hpp @@ -0,0 +1,16 @@ +// +// MnistUtils.hpp +// MNN +// +// Created by MNN on 2020/01/08. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef MnistUtils_hpp +#define MnistUtils_hpp +#include "Module.hpp" +class MnistUtils { +public: + static void train(std::shared_ptr model, std::string root); +}; +#endif diff --git a/tools/train/source/demo/dataLoaderDemo.cpp b/tools/train/source/demo/dataLoaderDemo.cpp index 6e8abc48..de98af15 100644 --- a/tools/train/source/demo/dataLoaderDemo.cpp +++ b/tools/train/source/demo/dataLoaderDemo.cpp @@ -8,14 +8,9 @@ #include #include "DataLoader.hpp" -#include "DataLoaderConfig.hpp" #include "DemoUnit.hpp" -#include "LambdaTransform.hpp" #include "MNN_generated.h" #include "MnistDataset.hpp" -#include "RandomSampler.hpp" -#include "StackTransform.hpp" -#include "TransformDataset.hpp" #ifdef MNN_USE_OPENCV #include // use opencv to show pictures @@ -88,18 +83,19 @@ public: auto label = trainData[0].target[0]->readMap(); cout << "index: " << i << " train label: " << int(label[0]) << endl; - - // // only show the first picture in the batch - // imshow("train", Mat(28, 28, CV_32FC1, (void*)data)); - +#ifdef MNN_USE_OPENCV + // only show the first picture in the batch + imshow("train", Mat(28, 28, CV_32FC1, (void*)data)); +#endif data = testData[0].data[0]->readMap(); label = testData[0].target[0]->readMap(); cout << "index: " << i << " test label: " << int(label[0]) << endl; - - // // only show the first picture in the batch - // imshow("test", Mat(28, 28, CV_32FC1, (void*)data)); - // waitKey(-1); +#ifdef MNN_USE_OPENCV + // only show the first picture in the batch + imshow("test", Mat(28, 28, CV_32FC1, (void*)data)); + waitKey(-1); +#endif } // this will reset the sampler's internal state, not necessary here trainDataLoader->reset(); diff --git a/tools/train/source/demo/dataLoaderTest.cpp b/tools/train/source/demo/dataLoaderTest.cpp index 0aa11037..02c4806e 100644 --- a/tools/train/source/demo/dataLoaderTest.cpp +++ b/tools/train/source/demo/dataLoaderTest.cpp @@ -7,18 +7,13 @@ // #include +#include #include #include #include -#include #include "DataLoader.hpp" -#include "DataLoaderConfig.hpp" #include "DemoUnit.hpp" -#include "LambdaTransform.hpp" #include "MnistDataset.hpp" -#include "RandomSampler.hpp" -#include "StackTransform.hpp" -#include "TransformDataset.hpp" using namespace std; diff --git a/tools/train/source/demo/mnistTrain.cpp b/tools/train/source/demo/mnistTrain.cpp index f8d65a10..4697fb62 100644 --- a/tools/train/source/demo/mnistTrain.cpp +++ b/tools/train/source/demo/mnistTrain.cpp @@ -10,22 +10,21 @@ #include #include #include +#include #include -#include "DataLoader.hpp" -#include "MnistDataset.hpp" #include "DemoUnit.hpp" +#include "Mnist.hpp" +#include "MnistUtils.hpp" #include "NN.hpp" -#include "SGD.hpp" #define MNN_OPEN_TIME_TRACE #include -#include "ADAM.hpp" -#include "LearningRateScheduler.hpp" -#include "Loss.hpp" #include "RandomGenerator.hpp" #include "Transformer.hpp" using namespace MNN::Train; using namespace MNN::Express; +using namespace MNN::Train::Model; + class MnistV2 : public Module { public: MnistV2() { @@ -34,6 +33,7 @@ public: convOption.channel = {1, 10}; convOption.depthwise = false; conv1 = NN::Conv(convOption); + bn = NN::BatchNorm(10); convOption.reset(); convOption.kernelSize = {5, 5}; convOption.channel = {10, 10}; @@ -41,12 +41,13 @@ public: conv2 = NN::Conv(convOption); ip1 = NN::Linear(160, 100); ip2 = NN::Linear(100, 10); - registerModel({conv1, conv2, ip1, ip2}); + registerModel({conv1, bn, conv2, ip1, ip2}); } virtual std::vector onForward(const std::vector& inputs) override { VARP x = inputs[0]; x = conv1->forward(x); + x = bn->forward(x); x = _MaxPool(x, {2, 2}, {2, 2}); x = conv2->forward(x); x = _MaxPool(x, {2, 2}, {2, 2}); @@ -59,26 +60,32 @@ public: return {x}; } std::shared_ptr conv1; + std::shared_ptr bn; std::shared_ptr conv2; std::shared_ptr ip1; std::shared_ptr ip2; }; -class Mnist : public Module { +class MnistInt8 : public Module { public: - Mnist() { + MnistInt8(int bits) { + AUTOTIME; NN::ConvOption convOption; convOption.kernelSize = {5, 5}; convOption.channel = {1, 20}; - conv1 = NN::Conv(convOption); + conv1 = NN::ConvInt8(convOption, bits); convOption.reset(); convOption.kernelSize = {5, 5}; convOption.channel = {20, 50}; - conv2 = NN::Conv(convOption); - ip1 = NN::Linear(800, 500); - ip2 = NN::Linear(500, 10); + conv2 = NN::ConvInt8(convOption, bits); + convOption.reset(); + convOption.kernelSize = {1, 1}; + convOption.channel = {800, 500}; + ip1 = NN::ConvInt8(convOption, bits); + convOption.kernelSize = {1, 1}; + convOption.channel = {500, 10}; + ip2 = NN::ConvInt8(convOption, bits); dropout = NN::Dropout(0.5); registerModel({conv1, conv2, ip1, ip2, dropout}); - AUTOTIME; } virtual std::vector onForward(const std::vector& inputs) override { @@ -88,11 +95,14 @@ public: x = conv2->forward(x); x = _MaxPool(x, {2, 2}, {2, 2}); x = _Convert(x, NCHW); - x = _Reshape(x, {0, -1}); + x = _Reshape(x, {0, -1, 1, 1}); x = ip1->forward(x); x = _Relu(x); + x = _Convert(x, NCHW); x = dropout->forward(x); x = ip2->forward(x); + x = _Convert(x, NCHW); + x = _Reshape(x, {0, -1}); x = _Softmax(x, 1); return {x}; } @@ -104,90 +114,36 @@ public: }; static void train(std::shared_ptr model, std::string root) { - auto exe = Executor::getGlobalExecutor(); - BackendConfig config; - exe->setGlobalExecutorConfig(MNN_FORWARD_CPU, config, 2); - std::shared_ptr sgd(new SGD); - sgd->append(model->parameters()); - sgd->setMomentum(0.9f); - // sgd->setMomentum2(0.99f); - sgd->setWeightDecay(0.0005f); - - auto dataset = std::make_shared(root, MnistDataset::Mode::TRAIN); - // the stack transform, stack [1, 28, 28] to [n, 1, 28, 28] - auto transform = std::make_shared(); - - const size_t batchSize = 64; - const size_t numWorkers = 4; - bool shuffle = true; - - auto dataLoader = DataLoader::makeDataLoader(dataset, {transform}, batchSize, shuffle, numWorkers); - - const size_t iterations = dataset->size() / batchSize; - - auto testDataset = std::make_shared(root, MnistDataset::Mode::TEST); - const size_t testBatchSize = 20; - const size_t testNumWorkers = 1; - shuffle = false; - - auto testDataLoader = DataLoader::makeDataLoader(testDataset, {transform}, testBatchSize, shuffle, testNumWorkers); - - const size_t testIterations = testDataset->size() / testBatchSize; - - for (int epoch = 0; epoch < 50; ++epoch) { - exe->gc(); - int correct = 0; - testDataLoader->reset(); - model->setIsTraining(false); - for (int i = 0; i < testIterations; i++) { - if ((i + 1) % 100 == 0) { - std::cout << "test iteration: " << (i + 1) << std::endl; - } - auto data = testDataLoader->next(); - auto example = data[0]; - auto cast = _Cast(example.data[0]); - example.data[0] = cast * _Const(1.0f / 255.0f); - auto predict = model->forward(example.data[0]); - predict = _ArgMax(predict, 1); - auto accu = _Cast(_Equal(predict, _Cast(example.target[0]))).sum({}); - correct += accu->readMap()[0]; - } - auto accu = (float)correct / (float)testDataset->size(); - std::cout << "epoch: " << epoch << " accuracy: " << accu << std::endl; - - dataLoader->reset(); - AUTOTIME; - model->setIsTraining(true); - for (int i = 0; i < iterations; i++) { - // AUTOTIME; - auto trainData = dataLoader->next(); - auto example = trainData[0]; - auto cast = _Cast(example.data[0]); - example.data[0] = cast * _Const(1.0f / 255.0f); - - // Compute One-Hot - auto newTarget = _OneHot(_Cast(example.target[0]), _Scalar(10), _Scalar(1.0f), - _Scalar(0.0f)); - - auto predict = model->forward(example.data[0]); - auto loss = _CrossEntropy(predict, newTarget); - float rate = LrScheduler::inv(0.01, epoch * iterations + i, 0.0001, 0.75); - sgd->setLearningRate(rate); - if ((epoch * iterations + i) % 100 == 0) { - std::cout << "train iteration: " << epoch * iterations + i; - std::cout << " loss: " << loss->readMap()[0]; - std::cout << " lr: " << rate << std::endl; - } - sgd->step(loss); - if (i == iterations - 1) { - model->setIsTraining(false); - predict = model->forward(_Input({1, 1, 28, 28}, NCHW)); - Variable::save({predict}, "temp.mnist.mnn"); - } - } - } + MnistUtils::train(model, root); } +class MnistInt8Train : public DemoUnit { +public: + virtual int run(int argc, const char* argv[]) override { + if (argc < 2) { + std::cout << "usage: ./runTrainDemo.out MnistInt8Train /path/to/unzipped/mnist/data/ quantbits" + << std::endl; + return 0; + } + // global random number generator, should invoke before construct the model and dataset + RandomGenerator::generator(17); + + std::string root = argv[1]; + int bits = 8; + if (argc >= 3) { + std::istringstream is(argv[2]); + is >> bits; + } + if (1 > bits || bits > 8) { + MNN_ERROR("bits must be 2-8, use 8 default\n"); + bits = 8; + } + std::shared_ptr model(new MnistInt8(bits)); + train(model, root); + return 0; + } +}; + class MnistTrain : public DemoUnit { public: virtual int run(int argc, const char* argv[]) override { @@ -198,10 +154,6 @@ public: // global random number generator, should invoke before construct the model and dataset RandomGenerator::generator(17); - auto exe = Executor::getGlobalExecutor(); - BackendConfig config; - exe->setGlobalExecutorConfig(MNN_FORWARD_CPU, config, 2); - std::string root = argv[1]; std::shared_ptr model(new Mnist); if (argc >= 3) { @@ -212,6 +164,47 @@ public: } }; +class PostTrainModule : public Module { +public: + PostTrainModule(const char* fileName) { + auto varMap = Variable::loadMap(fileName); + auto input = Variable::getInputAndOutput(varMap).first.begin()->second; + auto lastVar = varMap["pool6"]; + + NN::ConvOption option; + option.channel = {1024, 10}; + mLastConv = NN::Conv(option); + + mFix = Module::transform({input}, {lastVar}); + + // Only train last parameter + registerModel({mLastConv}); + } + virtual std::vector onForward(const std::vector& inputs) override { + auto pool = mFix->forward(_Interp({_Convert(inputs[0], NC4HW4)}, 2.0f, 2.0f, 0, 0, 1, true)); + auto result = _Softmax(_Reshape(_Convert(mLastConv->forward(pool), NCHW), {0, -1})); + return {result}; + } + std::shared_ptr mFix; + std::shared_ptr mLastConv; +}; + +class PostTrainMobilenet : public DemoUnit { +public: + virtual int run(int argc, const char* argv[]) override { + if (argc < 3) { + std::cout + << "usage: ./runTrainDemo.out PostTrainMobilenet /path/to/mobilenet /path/to/unzipped/mnist/data/ " + << std::endl; + return 0; + } + std::string root = argv[2]; + std::shared_ptr model(new PostTrainModule(argv[1])); + train(model, root); + return 0; + } +}; + class PostTrain : public DemoUnit { public: virtual int run(int argc, const char* argv[]) override { @@ -221,7 +214,8 @@ public: return 0; } std::string root = argv[2]; - auto varMap = Variable::loadMap(argv[1]); + + auto varMap = Variable::loadMap(argv[1]); if (varMap.empty()) { MNN_ERROR("Can not load model %s\n", argv[1]); return 0; @@ -238,4 +232,6 @@ public: }; DemoUnitSetRegister(MnistTrain, "MnistTrain"); +DemoUnitSetRegister(MnistInt8Train, "MnistInt8Train"); DemoUnitSetRegister(PostTrain, "PostTrain"); +DemoUnitSetRegister(PostTrainMobilenet, "PostTrainMobilenet"); diff --git a/tools/train/source/demo/nnGradTest.cpp b/tools/train/source/demo/nnGradTest.cpp index 452dac53..b4e7b1d9 100644 --- a/tools/train/source/demo/nnGradTest.cpp +++ b/tools/train/source/demo/nnGradTest.cpp @@ -6,10 +6,11 @@ // Copyright © 2018, Alibaba Group Holding Limited // +#include +#include "ADAM.hpp" #include "DemoUnit.hpp" #include "NN.hpp" #include "SGD.hpp" -#include using namespace MNN::Express; using namespace MNN::Train; #include @@ -192,7 +193,7 @@ public: weightTarget2 = _Const(targetVecs.data(), {1, oc, kh, kw}, NCHW); } - std::shared_ptr sgd(new SGD); + std::shared_ptr sgd(new ADAM); sgd->setLearningRate(0.01f); sgd->append(convModule->parameters()); std::vector randomInputs(1 * ic * ih * iw); @@ -282,7 +283,7 @@ public: } auto weightTarget = _Const(targetVecs.data(), {b, l, h}, NCHW); auto weightOrigin = _Const(0.0f, {b, l, h}, NCHW); - std::shared_ptr sgd(new SGD); + std::shared_ptr sgd(new ADAM); sgd->setLearningRate(0.01f); sgd->append({weightOrigin}); std::vector randomInputs(b * e * l); diff --git a/tools/train/source/demo/quanMnist.cpp b/tools/train/source/demo/quanMnist.cpp new file mode 100644 index 00000000..928eab0e --- /dev/null +++ b/tools/train/source/demo/quanMnist.cpp @@ -0,0 +1,119 @@ +// +// quanMnist.cpp +// MNN +// +// Created by MNN on 2020/01/08. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include +#include +#include +#include +#include +#include "DemoUnit.hpp" +#include "FixModule.hpp" +#include "NN.hpp" +#include "PipelineModule.hpp" +#define MNN_OPEN_TIME_TRACE +#include +#include +#include "MNN_generated.h" +#include "MnistUtils.hpp" +#include "RandomGenerator.hpp" +#include "Transformer.hpp" + +using namespace MNN; +using namespace MNN::Express; +using namespace MNN::Train; + +class QuanMnist : public DemoUnit { +public: + virtual int run(int argc, const char* argv[]) override { + if (argc < 3) { + MNN_PRINT("usage: ./runTrainDemo.out QuanMnist /path/to/mnistModel /path/to/unzipped/mnist/data/ [bits]\n"); + return 0; + } + std::string root = argv[2]; + auto varMap = Variable::loadMap(argv[1]); + if (varMap.empty()) { + MNN_ERROR("Can not load model %s\n", argv[1]); + return 0; + } + int bits = 8; + if (argc > 3) { + std::istringstream is(argv[3]); + is >> bits; + } + if (1 > bits || bits > 8) { + MNN_ERROR("bits must be 2-8, use 8 default\n"); + bits = 8; + } + FUNC_PRINT(bits); + auto inputOutputs = Variable::getInputAndOutput(varMap); + auto inputs = Variable::mapToSequence(inputOutputs.first); + auto outputs = Variable::mapToSequence(inputOutputs.second); + std::function, std::shared_ptr>(EXPRP)> transformFunction = + [bits](EXPRP source) { + if (source->get() == nullptr) { + return std::make_pair(std::vector{}, std::shared_ptr(nullptr)); + } + auto convExtracted = NN::Utils::ExtractConvolution(source); + if (std::get<1>(convExtracted) == nullptr) { + return std::make_pair(std::vector{}, std::shared_ptr(nullptr)); + } + std::shared_ptr module(NN::ConvInt8(std::get<0>(convExtracted), std::get<1>(convExtracted), + std::get<2>(convExtracted), std::get<3>(convExtracted), + bits)); + return std::make_pair(std::vector{0}, module); + }; + Transformer::turnModelToTrainable(Transformer::TrainConfig())->onExecute(outputs); + std::shared_ptr model(new PipelineModule(inputs, outputs, transformFunction)); + + MnistUtils::train(model, root); + return 0; + } +}; +DemoUnitSetRegister(QuanMnist, "QuanMnist"); + +class OctaveMnist : public DemoUnit { +public: + virtual int run(int argc, const char* argv[]) override { + if (argc < 3) { + MNN_PRINT("usage: ./runTrainDemo.out OctaveMnist /path/to/mnistModel /path/to/unzipped/mnist/data/ \n"); + return 0; + } + std::string root = argv[2]; + auto varMap = Variable::loadMap(argv[1]); + if (varMap.empty()) { + MNN_ERROR("Can not load model %s\n", argv[1]); + return 0; + } + auto inputOutputs = Variable::getInputAndOutput(varMap); + auto inputs = Variable::mapToSequence(inputOutputs.first); + auto outputs = Variable::mapToSequence(inputOutputs.second); + std::function, std::shared_ptr>(EXPRP)> transformFunction = + [](EXPRP source) { + if (source->get() == nullptr) { + return std::make_pair(std::vector{}, std::shared_ptr(nullptr)); + } + auto convExtracted = NN::Utils::ExtractConvolution(source); + if (std::get<1>(convExtracted) == nullptr) { + return std::make_pair(std::vector{}, std::shared_ptr(nullptr)); + } + if (std::get<0>(convExtracted).channel[0] <= 4 || std::get<0>(convExtracted).channel[1] <= 4) { + return std::make_pair(std::vector{}, std::shared_ptr(nullptr)); + } + std::shared_ptr module(NN::ConvOctave(std::get<0>(convExtracted), std::get<1>(convExtracted), + std::get<2>(convExtracted), std::get<3>(convExtracted), + 0.5f, 0.5f)); + return std::make_pair(std::vector{0}, module); + }; + Transformer::turnModelToTrainable(Transformer::TrainConfig())->onExecute(outputs); + std::shared_ptr model(new PipelineModule(inputs, outputs, transformFunction)); + + MnistUtils::train(model, root); + return 0; + } +}; +DemoUnitSetRegister(OctaveMnist, "OctaveMnist"); diff --git a/tools/train/source/grad/BatchNormGrad.cpp b/tools/train/source/grad/BatchNormGrad.cpp deleted file mode 100644 index 1a729f99..00000000 --- a/tools/train/source/grad/BatchNormGrad.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// -// BatchNormGrad.cpp -// MNN -// -// Created by MNN on 2019/11/07. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "BatchNormGrad.hpp" -#include -#include -#include "core/Macro.h" - -using namespace std; -using namespace MNN; -using namespace MNN::Express; - -class BatchNormGrad : public OpGrad { -public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& outputs, - const std::vector& backDiff) override { - // input, scale, bias, running_mean, running_variance, epsilon, momentum, is_training - // scale and bias are learnable - std::shared_ptr forwardOp(expr->get()->UnPack()); - std::vector res; - auto inputs = expr->inputs(); - res.resize(inputs.size()); // only back propgate to input, scale, bias - - auto input = inputs[0]; - auto scale = inputs[1]; - auto bias = inputs[2]; - auto output = outputs[0]; - auto normalizedData = outputs[3]; // (input - sample_mean) / sqrt(sample_variance + epsilon) - auto rSampleStd = outputs[4]; // rsqrt(sample_variance + epsilon) - - MNN_ASSERT(scale->getInfo()->dim.size() == 1); - // reshape in order to use broadcast - auto factor = _Reshape(_Multiply(scale, rSampleStd), {1, scale->getInfo()->dim[0], 1, 1}, NCHW); - res[0] = _Multiply(backDiff[0], factor); - res[0]->setName(forwardOp->name + "_BN_Input_Grad"); - - res[1] = _ReduceSum(_Multiply(backDiff[0], normalizedData), {0, 2, 3}, false); - res[1]->setName(forwardOp->name + "_BN_Scale_Grad"); - - res[2] = _ReduceSum(backDiff[0], {0, 2, 3}, false); - res[2]->setName(forwardOp->name + "_BN_Bias_Grad"); - - return res; - } -}; -static const auto gRegister = []() { - static BatchNormGrad _c; - OpGrad::insert(OpType_BatchNorm, &_c); - return true; -}(); diff --git a/tools/train/source/grad/BinaryGrad.cpp b/tools/train/source/grad/BinaryGrad.cpp index e1538aa1..3bbcf7f4 100644 --- a/tools/train/source/grad/BinaryGrad.cpp +++ b/tools/train/source/grad/BinaryGrad.cpp @@ -13,7 +13,7 @@ using namespace MNN; using namespace MNN::Express; class EltwiseGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector res; auto inputs = expr->inputs(); @@ -55,7 +55,7 @@ public: } case MNN::EltwiseType_MAXIMUM: { for (int i = 0; i < inputs.size(); ++i) { - auto mask = _Sign(inputs[i] - output[i]) + _Const(1.0f, {}, NCHW); + auto mask = _Sign(inputs[i] - Variable::create(expr, i)) + _Const(1.0f, {}, NCHW); res[i] = mask * outputDiff; } break; @@ -68,13 +68,17 @@ public: }; class BinaryGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector res; auto inputs = expr->inputs(); res.resize(inputs.size()); auto op = expr->get(); auto outputDiff = backwardOutput[0]; + std::vector output(expr->outputSize()); + for (int i = 0; i < expr->outputSize(); ++i) { + output[i] = Variable::create(expr, i); + } switch (op->main_as_BinaryOp()->opType()) { case BinaryOpOperation_ADD: { res[0] = outputDiff; @@ -145,7 +149,7 @@ public: reduceDims.clear(); auto diff = (int)backShape->dim.size() - (int)inputShape->dim.size(); for (int j = 0; j < inputShape->dim.size(); j++) { - if (backShape->dim[j+diff] > 1 && inputShape->dim[j] == 1) { + if (backShape->dim[j + diff] > 1 && inputShape->dim[j] == 1) { reduceDims.emplace_back(j); } } diff --git a/tools/train/source/grad/ConcatGrad.cpp b/tools/train/source/grad/ConcatGrad.cpp index 0f013181..09fe384a 100644 --- a/tools/train/source/grad/ConcatGrad.cpp +++ b/tools/train/source/grad/ConcatGrad.cpp @@ -14,7 +14,7 @@ using namespace MNN::Express; class ConcatGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector res(expr->inputs().size()); if (!expr->requireInfo()) { diff --git a/tools/train/source/grad/ConvGrad.cpp b/tools/train/source/grad/ConvGrad.cpp index d6965c3a..1d79009d 100644 --- a/tools/train/source/grad/ConvGrad.cpp +++ b/tools/train/source/grad/ConvGrad.cpp @@ -14,7 +14,7 @@ using namespace MNN; class ConvGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { auto inputs = expr->inputs(); if (inputs.size() == 1) { @@ -69,7 +69,7 @@ public: class DeconvGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { auto inputs = expr->inputs(); if (inputs.size() == 1) { diff --git a/tools/train/source/grad/InterpGrad.cpp b/tools/train/source/grad/InterpGrad.cpp index bedf497f..79bcc39a 100644 --- a/tools/train/source/grad/InterpGrad.cpp +++ b/tools/train/source/grad/InterpGrad.cpp @@ -13,7 +13,7 @@ using namespace MNN::Express; class InterpGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { auto op = expr->get(); // FIXME, the grad may be compute a little error diff --git a/tools/train/source/grad/MatMulGrad.cpp b/tools/train/source/grad/MatMulGrad.cpp index 8bcd577a..3a655c5c 100644 --- a/tools/train/source/grad/MatMulGrad.cpp +++ b/tools/train/source/grad/MatMulGrad.cpp @@ -15,7 +15,7 @@ public: BatchMatMulGrad() { mType = LINEAR; } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector res; auto inputs = expr->inputs(); @@ -78,7 +78,7 @@ public: MatMulGrad() { mType = LINEAR; } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector res; auto inputs = expr->inputs(); diff --git a/tools/train/source/grad/OpGrad.cpp b/tools/train/source/grad/OpGrad.cpp index b761670d..d7604406 100644 --- a/tools/train/source/grad/OpGrad.cpp +++ b/tools/train/source/grad/OpGrad.cpp @@ -52,11 +52,7 @@ std::map OpGrad::grad(VARP loss, const std::setname().c_str(), expr->get()->type()); continue; } - std::vector outputs(expr->outputSize()); - for (int i = 0; i < expr->outputSize(); ++i) { - outputs[i] = Variable::create(expr, i); - } - auto inputGrad = grad->onGrad(expr, outputs, backwardMap[expr]); + auto inputGrad = grad->onGrad(expr, backwardMap[expr]); auto empty = true; for (auto grad : inputGrad) { if (nullptr != grad) { @@ -65,7 +61,7 @@ std::map OpGrad::grad(VARP loss, const std::setname().c_str(), expr->get()->type()); + // MNN_PRINT("Can't grad for %s, %d\n", expr->name().c_str(), expr->get()->type()); continue; } MNN_ASSERT(inputGrad.size() <= inputs.size()); @@ -99,6 +95,7 @@ std::map OpGrad::grad(VARP loss, const std::setexpr().second]; } } + // MNN_PRINT("Grad: %d <- %d\n", grads.size(), parameters.size()); return grads; } diff --git a/tools/train/source/grad/OpGrad.hpp b/tools/train/source/grad/OpGrad.hpp index c9049b0c..2072caa7 100644 --- a/tools/train/source/grad/OpGrad.hpp +++ b/tools/train/source/grad/OpGrad.hpp @@ -27,7 +27,7 @@ public: return mType; } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) = 0; static OpGrad* get(int type); diff --git a/tools/train/source/grad/PermuteGrad.cpp b/tools/train/source/grad/PermuteGrad.cpp index 0886c70f..f08e7a49 100644 --- a/tools/train/source/grad/PermuteGrad.cpp +++ b/tools/train/source/grad/PermuteGrad.cpp @@ -13,7 +13,7 @@ using namespace MNN::Express; class TransposeGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { auto shapeInfo = expr->inputs()[1]->getInfo(); auto shape = expr->inputs()[1]->readMap(); @@ -40,7 +40,7 @@ public: class PermuteGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { MNN_ASSERT(expr->inputs().size() == 1); auto op = expr->get(); diff --git a/tools/train/source/grad/PoolGrad.cpp b/tools/train/source/grad/PoolGrad.cpp index 8cc3f2dd..3a2fff4e 100644 --- a/tools/train/source/grad/PoolGrad.cpp +++ b/tools/train/source/grad/PoolGrad.cpp @@ -18,7 +18,7 @@ public: mType = SEMI_LINEAR; } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector result(1, nullptr); auto outputDiff = backwardOutput[0]; @@ -29,7 +29,8 @@ public: newOp->main.type = OpParameter_Pool; newOp->main.value = copyP; - result[0] = Variable::create(Expr::create(std::move(newOp), {expr->inputs()[0], output[0], outputDiff})); + result[0] = Variable::create( + Expr::create(std::move(newOp), {expr->inputs()[0], Variable::create(expr, 0), outputDiff})); result[0]->setName(expr->name() + "_Grad"); return result; } diff --git a/tools/train/source/grad/ReduceGrad.cpp b/tools/train/source/grad/ReduceGrad.cpp index c638aea4..efaa9eac 100644 --- a/tools/train/source/grad/ReduceGrad.cpp +++ b/tools/train/source/grad/ReduceGrad.cpp @@ -13,7 +13,7 @@ using namespace MNN::Express; class ReduceGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector result; auto inputs = expr->inputs(); @@ -73,7 +73,7 @@ public: }; class FillGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { return {backwardOutput[0].sum({})}; } diff --git a/tools/train/source/grad/ReluGrad.cpp b/tools/train/source/grad/ReluGrad.cpp index 3a10abdf..16fbca2f 100644 --- a/tools/train/source/grad/ReluGrad.cpp +++ b/tools/train/source/grad/ReluGrad.cpp @@ -16,7 +16,7 @@ public: ReluGrad() { mType = SEMI_LINEAR; } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector result(1, nullptr); @@ -37,7 +37,7 @@ public: Relu6Grad() { mType = SEMI_LINEAR; } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector result{nullptr}; diff --git a/tools/train/source/grad/ReshapeGrad.cpp b/tools/train/source/grad/ReshapeGrad.cpp index de4baf23..3a0d25f9 100644 --- a/tools/train/source/grad/ReshapeGrad.cpp +++ b/tools/train/source/grad/ReshapeGrad.cpp @@ -14,7 +14,7 @@ using namespace MNN::Express; class ReshapeGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { auto inputs = expr->inputs(); std::vector result(inputs.size(), nullptr); diff --git a/tools/train/source/grad/SelectGrad.cpp b/tools/train/source/grad/SelectGrad.cpp index 42972dc8..b3a724ac 100644 --- a/tools/train/source/grad/SelectGrad.cpp +++ b/tools/train/source/grad/SelectGrad.cpp @@ -17,7 +17,7 @@ public: SelectGrad() { mType = SEMI_LINEAR; } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { auto inputs = expr->inputs(); std::vector result(inputs.size(), nullptr); diff --git a/tools/train/source/grad/SliceGrad.cpp b/tools/train/source/grad/SliceGrad.cpp index ed17f647..6cc16841 100644 --- a/tools/train/source/grad/SliceGrad.cpp +++ b/tools/train/source/grad/SliceGrad.cpp @@ -13,7 +13,7 @@ using namespace MNN::Express; class SliceGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { MNN_ASSERT(expr->inputs().size() == 1); auto slice = expr->get()->main_as_Slice(); @@ -21,7 +21,7 @@ public: std::vector res{nullptr}; std::vector validBackward(backwardOutput.size()); for (int i = 0; i < backwardOutput.size(); ++i) { - auto origin = output[i]; + auto origin = Variable::create(expr, i); if (nullptr != backwardOutput[i]) { validBackward[i] = backwardOutput[i]; continue; diff --git a/tools/train/source/grad/SoftmaxGrad.cpp b/tools/train/source/grad/SoftmaxGrad.cpp index f7163988..6fa7e271 100644 --- a/tools/train/source/grad/SoftmaxGrad.cpp +++ b/tools/train/source/grad/SoftmaxGrad.cpp @@ -16,7 +16,7 @@ public: SoftmaxGrad() { mType = NO_LINEAR; } - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector result(1, nullptr); unique_ptr newOp(new OpT); @@ -24,7 +24,8 @@ public: newOp->main.type = OpParameter_Axis; newOp->main.value = new AxisT; newOp->main.AsAxis()->axis = expr->get()->main_as_Axis()->axis(); - result[0] = Express::Variable::create(Express::Expr::create(std::move(newOp), {output[0], backwardOutput[0]})); + result[0] = Express::Variable::create( + Express::Expr::create(std::move(newOp), {Express::Variable::create(expr, 0), backwardOutput[0]})); result[0]->setName(expr->name() + "_Grad"); return result; } diff --git a/tools/train/source/grad/TensorConvertGrad.cpp b/tools/train/source/grad/TensorConvertGrad.cpp index e0599044..9bfea71c 100644 --- a/tools/train/source/grad/TensorConvertGrad.cpp +++ b/tools/train/source/grad/TensorConvertGrad.cpp @@ -13,7 +13,7 @@ using namespace MNN::Express; class TensorConvertGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector result{nullptr}; auto originInput = expr->inputs()[0]; diff --git a/tools/train/source/grad/UnaryGrad.cpp b/tools/train/source/grad/UnaryGrad.cpp index 200bbd09..c346f840 100644 --- a/tools/train/source/grad/UnaryGrad.cpp +++ b/tools/train/source/grad/UnaryGrad.cpp @@ -14,12 +14,13 @@ using namespace MNN::Express; class UnaryGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::unique_ptr forwardOp(expr->get()->UnPack()); auto outputDiff = backwardOutput[0]; auto input = expr->inputs()[0]; std::vector res(1, nullptr); + std::vector output{Variable::create(expr, 0)}; switch (forwardOp->main.AsUnaryOp()->opType) { case MNN::UnaryOpOperation_LOG1P: { @@ -58,7 +59,6 @@ public: break; } default: - MNN_ASSERT(false); return res; } @@ -68,10 +68,11 @@ public: }; class SigmoidGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector result(1, nullptr); auto outputDiff = backwardOutput[0]; + std::vector output{Variable::create(expr, 0)}; // y = (1/(1+e(-x))) , dy = y(1-y) * dx = (y*y - y)*dx auto mul = _Multiply(output[0], output[0]); @@ -85,9 +86,11 @@ public: class TanhGrad : public OpGrad { public: - virtual std::vector onGrad(Express::EXPRP expr, const std::vector& output, + virtual std::vector onGrad(Express::EXPRP expr, const std::vector& backwardOutput) override { std::vector result{nullptr}; + std::vector output{Variable::create(expr, 0)}; + auto outputDiff = backwardOutput[0]; // d tanh(x) = (1-tanh(x)^2)dx result[0] = (_Const(1.0f, {}, NCHW) - _Square(output[0])) * outputDiff; diff --git a/tools/train/source/grad/ZeroGrad.cpp b/tools/train/source/grad/ZeroGrad.cpp new file mode 100644 index 00000000..e77f1866 --- /dev/null +++ b/tools/train/source/grad/ZeroGrad.cpp @@ -0,0 +1,30 @@ +// +// ZeroGrad.cpp +// MNN +// +// Created by MNN on 2019/04/22. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "ReluGrad.hpp" +#include "core/Macro.h" +using namespace std; +using namespace MNN; + +class ZeroGrad : public OpGrad { +public: + ZeroGrad() { + mType = LINEAR; + } + virtual std::vector onGrad(Express::EXPRP expr, + const std::vector& backwardOutput) override { + std::vector result(1, nullptr); + result[0] = backwardOutput[0]; + return result; + } +}; +static const auto gRegister = []() { + static ZeroGrad _c; + OpGrad::insert(OpType_ZeroGrad, &_c); + return true; +}(); diff --git a/tools/train/source/models/Mnist.cpp b/tools/train/source/models/Mnist.cpp new file mode 100644 index 00000000..5f61dd89 --- /dev/null +++ b/tools/train/source/models/Mnist.cpp @@ -0,0 +1,49 @@ +// +// Mnist.cpp +// MNN +// +// Created by MNN on 2020/01/10. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "Mnist.hpp" + +namespace MNN { +namespace Train { +namespace Model { + +Mnist::Mnist() { + NN::ConvOption convOption; + convOption.kernelSize = {5, 5}; + convOption.channel = {1, 20}; + conv1 = NN::Conv(convOption); + convOption.reset(); + convOption.kernelSize = {5, 5}; + convOption.channel = {20, 50}; + conv2 = NN::Conv(convOption); + ip1 = NN::Linear(800, 500); + ip2 = NN::Linear(500, 10); + dropout = NN::Dropout(0.5); + registerModel({conv1, conv2, ip1, ip2, dropout}); +} + +std::vector Mnist::onForward(const std::vector& inputs) { + using namespace Express; + VARP x = inputs[0]; + x = conv1->forward(x); + x = _MaxPool(x, {2, 2}, {2, 2}); + x = conv2->forward(x); + x = _MaxPool(x, {2, 2}, {2, 2}); + x = _Convert(x, NCHW); + x = _Reshape(x, {0, -1}); + x = ip1->forward(x); + x = _Relu(x); + x = dropout->forward(x); + x = ip2->forward(x); + x = _Softmax(x, 1); + return {x}; +} + +} // namespace Model +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/models/Mnist.hpp b/tools/train/source/models/Mnist.hpp new file mode 100644 index 00000000..c98f9e5f --- /dev/null +++ b/tools/train/source/models/Mnist.hpp @@ -0,0 +1,36 @@ +// +// Mnist.hpp +// MNN +// +// Created by MNN on 2020/01/10. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef MnistModels_hpp +#define MnistModels_hpp + +#include "Module.hpp" +#include "NN.hpp" + +namespace MNN { +namespace Train { +namespace Model { + +class MNN_PUBLIC Mnist : public Module { +public: + Mnist(); + + virtual std::vector onForward(const std::vector& inputs) override; + + std::shared_ptr conv1; + std::shared_ptr conv2; + std::shared_ptr ip1; + std::shared_ptr ip2; + std::shared_ptr dropout; +}; + +} // namespace Model +} // namespace Train +} // namespace MNN + +#endif // MnistModels_hpp diff --git a/tools/train/source/models/MobilenetUtils.cpp b/tools/train/source/models/MobilenetUtils.cpp new file mode 100644 index 00000000..18d1f0f2 --- /dev/null +++ b/tools/train/source/models/MobilenetUtils.cpp @@ -0,0 +1,33 @@ +// +// MobilenetUtils.cpp +// MNN +// +// Created by MNN on 2020/01/10. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "MobilenetUtils.hpp" +#include + +namespace MNN { +namespace Train { +namespace Model { + +// https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py +int makeDivisible(int v, int divisor, int minValue) { + if (minValue == 0) { + minValue = divisor; + } + int newV = std::max(minValue, int(v + divisor / 2) / divisor * divisor); + + // Make sure that round down does not go down by more than 10%. + if (newV < 0.9 * v) { + newV += divisor; + } + + return newV; +} + +} // namespace Model +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/models/MobilenetUtils.hpp b/tools/train/source/models/MobilenetUtils.hpp new file mode 100644 index 00000000..2702f8d7 --- /dev/null +++ b/tools/train/source/models/MobilenetUtils.hpp @@ -0,0 +1,23 @@ +// +// MobilenetUtils.hpp +// MNN +// +// Created by MNN on 2020/01/10. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef MobilenetUtils_hpp +#define MobilenetUtils_hpp + +namespace MNN { +namespace Train { +namespace Model { + +// https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py +int makeDivisible(int v, int divisor = 8, int minValue = 0); + +} // namespace Model +} // namespace Train +} // namespace MNN + +#endif // MobilenetUtils_hpp diff --git a/tools/train/source/models/MobilenetV1.cpp b/tools/train/source/models/MobilenetV1.cpp new file mode 100644 index 00000000..0c060334 --- /dev/null +++ b/tools/train/source/models/MobilenetV1.cpp @@ -0,0 +1,125 @@ +// +// MobilenetV1.cpp +// MNN +// +// Created by MNN on 2020/01/08. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "MobilenetV1.hpp" + +namespace MNN { +namespace Train { +namespace Model { + +_ConvBlock::_ConvBlock(std::vector inputOutputChannels, int stride) { + int inputChannels = inputOutputChannels[0], outputChannels = inputOutputChannels[1]; + + NN::ConvOption convOption; + convOption.kernelSize = {3, 3}; + convOption.channel = {inputChannels, inputChannels}; + convOption.padMode = Express::SAME; + convOption.stride = {stride, stride}; + convOption.depthwise = true; + conv3x3 = NN::Conv(convOption, false, std::shared_ptr(Initializer::MSRA())); + + bn1 = NN::BatchNorm(inputChannels); + + convOption.reset(); + convOption.kernelSize = {1, 1}; + convOption.channel = {inputChannels, outputChannels}; + convOption.padMode = Express::SAME; + convOption.stride = {1, 1}; + convOption.depthwise = false; + conv1x1 = NN::Conv(convOption, false, std::shared_ptr(Initializer::MSRA())); + + bn2 = NN::BatchNorm(outputChannels); + + registerModel({conv3x3, bn1, conv1x1, bn2}); +} + +std::vector _ConvBlock::onForward(const std::vector &inputs) { + using namespace Express; + VARP x = inputs[0]; + + x = conv3x3->forward(x); + x = bn1->forward(x); + x = _Relu6(x); + x = conv1x1->forward(x); + x = bn2->forward(x); + x = _Relu6(x); + + return {x}; +} + +MobilenetV1::MobilenetV1(int numClasses, float widthMult, int divisor) { + NN::ConvOption convOption; + convOption.kernelSize = {3, 3}; + int outputChannels = makeDivisible(32 * widthMult, divisor); + convOption.channel = {3, outputChannels}; + convOption.padMode = Express::SAME; + convOption.stride = {2, 2}; + conv1 = NN::Conv(convOption, false, std::shared_ptr(Initializer::MSRA())); + + bn1 = NN::BatchNorm(outputChannels); + + std::vector > convSettings; + convSettings.push_back({64, 1}); + convSettings.push_back({128, 2}); + convSettings.push_back({256, 2}); + convSettings.push_back({512, 6}); + convSettings.push_back({1024, 2}); + + int inputChannels = outputChannels; + for (int i = 0; i < convSettings.size(); i++) { + auto setting = convSettings[i]; + outputChannels = setting[0]; + int times = setting[1]; + outputChannels = makeDivisible(outputChannels * widthMult, divisor); + + for (int j = 0; j < times; j++) { + int stride = 1; + if (times > 1 && j == 0) { + stride = 2; + } + + convBlocks.emplace_back(ConvBlock({inputChannels, outputChannels}, stride)); + inputChannels = outputChannels; + } + } + + dropout = NN::Dropout(0.1); + fc = NN::Linear(1024, numClasses, true, std::shared_ptr(Initializer::MSRA())); + + registerModel({conv1, bn1, dropout, fc}); + registerModel(convBlocks); +} + +std::vector MobilenetV1::onForward(const std::vector &inputs) { + using namespace Express; + VARP x = inputs[0]; + + x = conv1->forward(x); + x = bn1->forward(x); + x = _Relu6(x); + + for (int i = 0; i < convBlocks.size(); i++) { + x = convBlocks[i]->forward(x); + } + + // global avg pooling + x = _AvePool(x, {-1, -1}); + + x = _Convert(x, NCHW); + x = _Reshape(x, {0, -1}); + + x = dropout->forward(x); + x = fc->forward(x); + + x = _Softmax(x, 1); + return {x}; +} + +} // namespace Model +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/models/MobilenetV1.hpp b/tools/train/source/models/MobilenetV1.hpp new file mode 100644 index 00000000..cb16e98a --- /dev/null +++ b/tools/train/source/models/MobilenetV1.hpp @@ -0,0 +1,55 @@ +// +// MobilenetV1.hpp +// MNN +// +// Created by MNN on 2020/01/08. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef MobilenetV1_hpp +#define MobilenetV1_hpp + +#include +#include "Initializer.hpp" +#include "MobilenetUtils.hpp" +#include "Module.hpp" +#include "NN.hpp" + +namespace MNN { +namespace Train { +namespace Model { + +class _ConvBlock : public Module { +public: + _ConvBlock(std::vector inputOutputChannels, int stride); + + virtual std::vector onForward(const std::vector &inputs) override; + + std::shared_ptr conv3x3; + std::shared_ptr bn1; + std::shared_ptr conv1x1; + std::shared_ptr bn2; +}; + +std::shared_ptr ConvBlock(std::vector inputOutputChannels, int stride) { + return std::shared_ptr(new _ConvBlock(inputOutputChannels, stride)); +} + +class MNN_PUBLIC MobilenetV1 : public Module { +public: + MobilenetV1(int numClasses = 1000, float widthMult = 1.0f, int divisor = 8); + + virtual std::vector onForward(const std::vector &inputs) override; + + std::shared_ptr conv1; + std::shared_ptr bn1; + std::vector > convBlocks; + std::shared_ptr dropout; + std::shared_ptr fc; +}; + +} // namespace Model +} // namespace Train +} // namespace MNN + +#endif // MobilenetV1_hpp diff --git a/tools/train/source/models/MobilenetV2.cpp b/tools/train/source/models/MobilenetV2.cpp new file mode 100644 index 00000000..c2a18b90 --- /dev/null +++ b/tools/train/source/models/MobilenetV2.cpp @@ -0,0 +1,158 @@ +// +// MobilenetV2.cpp +// MNN +// +// Created by MNN on 2020/01/08. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "MobilenetV2.hpp" + +namespace MNN { +namespace Train { +namespace Model { + +_ConvBnRelu::_ConvBnRelu(std::vector inputOutputChannels, int kernelSize, int stride, bool depthwise) { + int inputChannels = inputOutputChannels[0], outputChannels = inputOutputChannels[1]; + + NN::ConvOption convOption; + convOption.kernelSize = {kernelSize, kernelSize}; + convOption.channel = {inputChannels, outputChannels}; + convOption.padMode = Express::SAME; + convOption.stride = {stride, stride}; + convOption.depthwise = depthwise; + conv = NN::Conv(convOption, false, std::shared_ptr(Initializer::MSRA())); + + bn = NN::BatchNorm(outputChannels); + + registerModel({conv, bn}); +} + +std::vector _ConvBnRelu::onForward(const std::vector &inputs) { + using namespace Express; + VARP x = inputs[0]; + + x = conv->forward(x); + x = bn->forward(x); + x = _Relu6(x); + + return {x}; +} + +_BottleNeck::_BottleNeck(std::vector inputOutputChannels, int stride, int expandRatio) { + int inputChannels = inputOutputChannels[0], outputChannels = inputOutputChannels[1]; + int expandChannels = inputChannels * expandRatio; + + if (stride == 1 && inputChannels == outputChannels) { + useShortcut = true; + } + + if (expandRatio != 1) { + layers.emplace_back(ConvBnRelu({inputChannels, expandChannels}, 1)); + } + + layers.emplace_back(ConvBnRelu({expandChannels, expandChannels}, 3, stride, true)); + + NN::ConvOption convOption; + convOption.kernelSize = {1, 1}; + convOption.channel = {expandChannels, outputChannels}; + convOption.padMode = Express::SAME; + convOption.stride = {1, 1}; + convOption.depthwise = false; + layers.emplace_back(NN::Conv(convOption, false, std::shared_ptr(Initializer::MSRA()))); + + layers.emplace_back(NN::BatchNorm(outputChannels)); + + registerModel(layers); +} + +std::vector _BottleNeck::onForward(const std::vector &inputs) { + using namespace Express; + VARP x = inputs[0]; + + for (int i = 0; i < layers.size(); i++) { + x = layers[i]->forward(x); + } + + if (useShortcut) { + x = x + inputs[0]; + } + + return {x}; +} + +MobilenetV2::MobilenetV2(int numClasses, float widthMult, int divisor) { + int inputChannels = 32; + int lastChannels = 1280; + + std::vector > invertedResidualSetting; + invertedResidualSetting.push_back({1, 16, 1, 1}); + invertedResidualSetting.push_back({6, 24, 2, 2}); + invertedResidualSetting.push_back({6, 32, 3, 2}); + invertedResidualSetting.push_back({6, 64, 4, 2}); + invertedResidualSetting.push_back({6, 96, 3, 1}); + invertedResidualSetting.push_back({6, 160, 3, 2}); + invertedResidualSetting.push_back({6, 320, 1, 1}); + + inputChannels = makeDivisible(inputChannels * widthMult, divisor); + lastChannels = makeDivisible(lastChannels * std::max(1.0f, widthMult), divisor); + + firstConv = ConvBnRelu({3, inputChannels}, 3, 2); + + for (int i = 0; i < invertedResidualSetting.size(); i++) { + std::vector setting = invertedResidualSetting[i]; + int t = setting[0]; + int c = setting[1]; + int n = setting[2]; + int s = setting[3]; + + int outputChannels = makeDivisible(c * widthMult, divisor); + + for (int j = 0; j < n; j++) { + int stride = 1; + if (j == 0) { + stride = s; + } + + bottleNeckBlocks.emplace_back(BottleNeck({inputChannels, outputChannels}, stride, t)); + inputChannels = outputChannels; + } + } + + lastConv = ConvBnRelu({inputChannels, lastChannels}, 1); + + dropout = NN::Dropout(0.1); + fc = NN::Linear(lastChannels, numClasses, true, std::shared_ptr(Initializer::MSRA())); + + registerModel({firstConv, lastConv, dropout, fc}); + registerModel(bottleNeckBlocks); +} + +std::vector MobilenetV2::onForward(const std::vector &inputs) { + using namespace Express; + VARP x = inputs[0]; + + x = firstConv->forward(x); + + for (int i = 0; i < bottleNeckBlocks.size(); i++) { + x = bottleNeckBlocks[i]->forward(x); + } + + x = lastConv->forward(x); + + // global avg pooling + x = _AvePool(x, {-1, -1}); + + x = _Convert(x, NCHW); + x = _Reshape(x, {0, -1}); + + x = dropout->forward(x); + x = fc->forward(x); + + x = _Softmax(x, 1); + return {x}; +} + +} // namespace Model +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/models/MobilenetV2.hpp b/tools/train/source/models/MobilenetV2.hpp new file mode 100644 index 00000000..5fc446ce --- /dev/null +++ b/tools/train/source/models/MobilenetV2.hpp @@ -0,0 +1,68 @@ +// +// MobilenetV2.hpp +// MNN +// +// Created by MNN on 2020/01/08. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef MobilenetV2_hpp +#define MobilenetV2_hpp + +#include +#include +#include "MobilenetUtils.hpp" +#include "Module.hpp" +#include "NN.hpp" + +namespace MNN { +namespace Train { +namespace Model { + +class _ConvBnRelu : public Module { +public: + _ConvBnRelu(std::vector inputOutputChannels, int kernelSize = 3, int stride = 1, bool depthwise = false); + + virtual std::vector onForward(const std::vector &inputs) override; + + std::shared_ptr conv; + std::shared_ptr bn; +}; + +std::shared_ptr ConvBnRelu(std::vector inputOutputChannels, int kernelSize = 3, int stride = 1, + bool depthwise = false) { + return std::shared_ptr(new _ConvBnRelu(inputOutputChannels, kernelSize, stride, depthwise)); +} + +class _BottleNeck : public Module { +public: + _BottleNeck(std::vector inputOutputChannels, int stride, int expandRatio); + + virtual std::vector onForward(const std::vector &inputs) override; + + std::vector > layers; + bool useShortcut = false; +}; + +std::shared_ptr BottleNeck(std::vector inputOutputChannels, int stride, int expandRatio) { + return std::shared_ptr(new _BottleNeck(inputOutputChannels, stride, expandRatio)); +} + +class MobilenetV2 : public Module { +public: + MobilenetV2(int numClasses = 1000, float widthMult = 1.0f, int divisor = 8); + + virtual std::vector onForward(const std::vector &inputs) override; + + std::shared_ptr firstConv; + std::vector > bottleNeckBlocks; + std::shared_ptr lastConv; + std::shared_ptr dropout; + std::shared_ptr fc; +}; + +} // namespace Model +} // namespace Train +} // namespace MNN + +#endif // MobilenetV2_hpp diff --git a/tools/train/source/module/Module.cpp b/tools/train/source/module/Module.cpp index a4125bff..f18b4386 100644 --- a/tools/train/source/module/Module.cpp +++ b/tools/train/source/module/Module.cpp @@ -12,6 +12,7 @@ using namespace MNN::Express; namespace MNN { namespace Train { + Express::VARP Module::forward(Express::VARP input) { return this->onForward({input})[0]; } @@ -83,5 +84,12 @@ std::shared_ptr Module::transform(const std::vector& inpu std::shared_ptr m(new FixModule(newOutputs, parameters, inputsPair)); return m; } +void Module::clearCache() { + for (auto c : mChildren) { + c->clearCache(); + } + this->onClearCache(); +} + } // namespace Train } // namespace MNN diff --git a/tools/train/source/module/Module.hpp b/tools/train/source/module/Module.hpp index 80c378c3..aa3c0b06 100644 --- a/tools/train/source/module/Module.hpp +++ b/tools/train/source/module/Module.hpp @@ -24,9 +24,13 @@ public: static std::shared_ptr transform(const std::vector& inputs, const std::vector& outputs); + void clearCache(); + protected: void registerModel(const std::vector>& children); void addParameter(Express::VARP parameter); + virtual void onClearCache() { + } private: void _collectParameters(std::set& result) const; diff --git a/tools/train/source/module/NN.cpp b/tools/train/source/module/NN.cpp index a8a4796f..91a0ab75 100644 --- a/tools/train/source/module/NN.cpp +++ b/tools/train/source/module/NN.cpp @@ -10,7 +10,9 @@ #include "Distributions.hpp" #include "FixModule.hpp" #include "Initializer.hpp" +#include "MNN_generated.h" #include "RandomGenerator.hpp" +#include "core/Macro.h" using namespace MNN::Express; namespace MNN { @@ -47,7 +49,7 @@ class BatchNormModule : public Module { public: BatchNormModule(const int channels, const int dims = 4, const float m = 0.999, const float e = 1e-5) { mMomentum = m; - mEps = e; + mEps = e; mChannels = channels; MNN_ASSERT((dims == 2) || (dims == 4)); @@ -55,17 +57,17 @@ public: std::vector statShape; std::vector reductionDims; if (dims == 2) { - statShape = {channels}; + statShape = {channels}; mReductionDims = {0}; } if (dims == 4) { - statShape = {channels, 1 , 1}; + statShape = {channels, 1, 1}; mReductionDims = {0, 2, 3}; } - mScale = _Const(1.0f, statShape, NCHW); - mBias = _Const(0.0f, statShape, NCHW); - mRunningMean = _Const(0.0f, statShape, NCHW); + mScale = _Const(1.0f, statShape, NCHW); + mBias = _Const(0.0f, statShape, NCHW); + mRunningMean = _Const(0.0f, statShape, NCHW); mRunningVariance = _Const(0.0f, statShape, NCHW); addParameter(mScale); @@ -85,20 +87,25 @@ public: VARP outputData = nullptr; if (getIsTraining()) { - auto sampleMean = _ReduceMean(x, mReductionDims, true); // mean for each channel in the batch - auto sampleVar = _ReduceMean(_Square(_Subtract(x, sampleMean)), mReductionDims, true); // variance for each channel in the batch - auto rSampleStd = _Const(1.0f) / _Sqrt(sampleVar + _Const(mEps)); + Variable::prepareCompute({x}); + auto sampleMean = _ReduceMean(x, mReductionDims, true); // mean for each channel in the batch + auto sampleVar = _ReduceMean(_Square(_Subtract(x, sampleMean)), mReductionDims, + true); // variance for each channel in the batch + auto rSampleStd = _Const(1.0f) / _Sqrt(sampleVar + _Const(mEps)); auto normalizedData = _Subtract(x, sampleMean) * rSampleStd; - outputData = normalizedData * mScale + mBias; + outputData = normalizedData * mScale + mBias; mRunningMean = _Const(mMomentum) * mRunningMean + _Const(1 - mMomentum) * sampleMean; mRunningMean.fix(Express::VARP::CONST); mRunningVariance = _Const(mMomentum) * mRunningVariance + _Const(1 - mMomentum) * sampleVar; mRunningVariance.fix(Express::VARP::CONST); } else { - auto rStd = _Const(1.0f) / _Sqrt(mRunningVariance + _Const(mEps)); - auto normalizedData = _Subtract(x, mRunningMean) * rStd; - outputData = normalizedData * mScale + mBias; + auto rStd = _Const(1.0f) / _Sqrt(mRunningVariance + _Const(mEps)); + auto alpha = rStd * mScale; + auto beta = mBias - mRunningMean * rStd * mScale; + alpha.fix(VARP::CONST); + beta.fix(VARP::CONST); + outputData = x * alpha + beta; } if (dimFormat != NCHW) { @@ -109,11 +116,11 @@ public: } private: - float mMomentum = 0.999; - float mEps = 1e-5; - VARP mScale = nullptr; - VARP mBias = nullptr; - VARP mRunningMean = nullptr; + float mMomentum = 0.999; + float mEps = 1e-5; + VARP mScale = nullptr; + VARP mBias = nullptr; + VARP mRunningMean = nullptr; VARP mRunningVariance = nullptr; int mChannels; std::vector mReductionDims; @@ -227,5 +234,277 @@ std::shared_ptr NN::BatchNorm(const int channels, const int dims, const return std::shared_ptr(new BatchNormModule(channels, dims, m, e)); } +class ConvInt8Module : public Module { +public: + ConvInt8Module(const NN::ConvOption& option, VARP weight, VARP bias, int group, int bits) : mOption(option) { + MNN_ASSERT(bits <= 8 && bits > 1); + auto limit = (float)(1 << (bits - 1)) - 1.0f; + mWeight = weight; + mBias = bias; + mGroup = group; + if (nullptr != mBias) { + addParameter(mBias); + } + mLimitScale = _Scalar(1.0f / limit); + mClampValue = _Scalar(limit); + addParameter(mWeight); + } + + std::pair fakeQuantFeature(VARP x) { + auto originFormat = x->getInfo()->order; + auto tempX = x; + if (originFormat == NC4HW4) { + tempX = _Convert(tempX, NCHW); + } + auto originX = tempX; + auto scale = _Maximum(_ReduceMax(_Abs(tempX)), _Scalar(0.000000001f)) * mLimitScale; + scale.fix(VARP::CONST); + tempX = _Round(tempX * _Reciprocal(scale)) * scale; + tempX = _Convert(tempX + _ZeroGrad(originX), originFormat); + return std::make_pair(tempX, scale); + } + VARP clamp(VARP x) { + return _Maximum(_Minimum(x, mClampValue), _Negative(mClampValue)); + } + virtual std::vector onForward(const std::vector& inputs) override { + VARP res; + auto x = _Convert(inputs[0], NCHW); + Variable::prepareCompute({x}); + if (getIsTraining()) { + auto weightScale = _ReduceMax(_Abs(mWeight), {1, 2, 3}, true) * mLimitScale; + weightScale.fix(VARP::CONST); + // FUNC_PRINT_ALL(weightScale->readMap()[0], f); + auto weightTemp = _Round(mWeight * _Reciprocal(weightScale)) * weightScale; + weightTemp = weightTemp + _ZeroGrad(mWeight); + auto inputPair = fakeQuantFeature(x); + res = _Conv(weightTemp, mBias, _Convert(inputPair.first, NC4HW4), mOption.padMode, mOption.stride, + mOption.dilate, mGroup); + Variable::prepareCompute({res}); + auto outputPair = fakeQuantFeature(res); + res = outputPair.first; + mInputScale = inputPair.second; + mOutputScale = outputPair.second; + } else { + x = _Round(x * _Reciprocal(mInputScale)); + x = _Cast(clamp(x)); + std::vector weight; + std::vector bias; + std::vector scale; + { + auto weightScale = _ReduceMax(_Abs(mWeight), {1, 2, 3}, true) * mLimitScale; + auto quanWeight = _Cast(_Round(mWeight * _Reciprocal(weightScale))); + auto quanTemp = _Cast(_Round(mWeight * _Reciprocal(weightScale))); + auto convScale = mInputScale * _Reciprocal(mOutputScale) * weightScale; + auto quanBias = _Cast(mBias * _Reciprocal(mInputScale * weightScale)); + Variable::prepareCompute({quanBias, quanWeight, convScale}); + { + auto info = quanWeight->getInfo(); + weight.resize(info->size); + auto ptr = quanWeight->readMap(); + ::memcpy(weight.data(), ptr, weight.size() * sizeof(int8_t)); + } + { + auto info = quanBias->getInfo(); + bias.resize(info->size); + auto ptr = quanBias->readMap(); + ::memcpy(bias.data(), ptr, bias.size() * sizeof(int32_t)); + } + { + auto info = convScale->getInfo(); + scale.resize(info->size); + auto ptr = convScale->readMap(); + ::memcpy(scale.data(), ptr, scale.size() * sizeof(float)); + } + } + res = _Conv(std::move(weight), std::move(bias), std::move(scale), _Convert(x, NC4HW4), mOption.channel, + mOption.kernelSize, mOption.padMode, mOption.stride, mOption.dilate, mGroup, mOption.pads); + res = _Cast(_Convert(res, NCHW)) * mOutputScale; + res = _Convert(res, NC4HW4); + } + return {res}; + } + +private: + const NN::ConvOption mOption; + VARP mWeight; + VARP mBias; + int mGroup; + VARP mLimitScale; + VARP mInputScale; + VARP mOutputScale; + VARP mClampValue; +}; + +std::shared_ptr NN::ConvInt8(const ConvOption& option, int bits, bool hasBias, + std::shared_ptr weightInit, std::shared_ptr biasInit) { + auto tuple = _initParameters(option, hasBias, weightInit, biasInit); + return std::shared_ptr( + new ConvInt8Module(option, std::get<0>(tuple), std::get<1>(tuple), std::get<2>(tuple), bits)); +} +std::shared_ptr NN::ConvInt8(const ConvOption& option, VARP weight, VARP bias, int group, int bits) { + return std::shared_ptr(new ConvInt8Module(option, weight, bias, group, bits)); +} + +std::tuple NN::Utils::ExtractConvolution(EXPRP source) { + std::tuple _default; + if (source->get() == nullptr) { + return _default; + } + if (source->get()->type() != OpType_Convolution && source->get()->type() != OpType_ConvolutionDepthwise) { + return _default; + } + auto inputs = source->inputs(); + if (inputs.size() < 2) { + // TODO Support Extract Single Convolution + return _default; + } + auto conv2D = source->get()->main_as_Convolution2D(); + NN::ConvOption option; + option.kernelSize = {conv2D->common()->kernelX(), conv2D->common()->kernelY()}; + option.stride = {conv2D->common()->strideX(), conv2D->common()->strideY()}; + option.pads = {conv2D->common()->padX(), conv2D->common()->padY()}; + switch (conv2D->common()->padMode()) { + case MNN::PadMode_SAME: + option.padMode = SAME; + break; + case MNN::PadMode_VALID: + option.padMode = VALID; + break; + default: + break; + } + option.dilate = {conv2D->common()->dilateX(), conv2D->common()->dilateY()}; + option.depthwise = source->get()->type() == OpType_ConvolutionDepthwise; + option.channel = {conv2D->common()->inputCount(), conv2D->common()->outputCount()}; + int group = 1; + if (source->get()->type() == OpType_ConvolutionDepthwise) { + group = conv2D->common()->outputCount(); + } + VARP weight = inputs[1]; + VARP bias; + if (inputs.size() > 2) { + bias = inputs[2]; + } + return std::make_tuple(option, weight, bias, group); +} + +static int _clamp(int c, int maxValue, int minValue) { + if (c > maxValue) { + return maxValue; + } + if (c < minValue) { + return minValue; + } + return c; +} +class ConvOctaveModule : public Module { +public: + ConvOctaveModule(const NN::ConvOption& option, VARP weight, VARP bias, int group, float inFactor, float outFactor) + : mOption(option) { + auto inputCountC4 = UP_DIV(option.channel[0], 4); + auto outputCountC4 = UP_DIV(option.channel[1], 4); + MNN_ASSERT(inputCountC4 > 1 && outputCountC4 > 1); + MNN_ASSERT(nullptr != bias); + auto iC0 = (int)((float)inputCountC4 * inFactor); + iC0 = _clamp(iC0, inputCountC4 - 1, 1); + + auto oC0 = (int)((float)outputCountC4 * outFactor); + oC0 = _clamp(oC0, outputCountC4 - 1, 1); + + iC0 = iC0 * 4; + auto iC1 = option.channel[0] - iC0; + oC0 = oC0 * 4; + auto oC1 = option.channel[1] - oC0; + mSplitInput = {iC0, iC1}; + + MNN_PRINT("Octave: %d, %d -> %d - %d, %d-%d\n", option.channel[0], option.channel[1], iC0, iC1, oC0, oC1); + auto splitBias = _Split(bias * _Scalar(0.5f), {oC0, oC1}, 0); + mLBias = splitBias[0]; + mHBias = splitBias[1]; + mLBias.fix(VARP::CONST); + mHBias.fix(VARP::CONST); + + auto splitWeight = _Split(weight, {oC0, oC1}, 0); + auto lw = _Split(splitWeight[0], {iC0, iC1}, 1); + auto hw = _Split(splitWeight[1], {iC0, iC1}, 1); + mLLW = lw[0]; + mLHW = lw[1]; + mHLW = hw[0]; + mHHW = hw[1]; + + mLLW.fix(VARP::CONST); + mLHW.fix(VARP::CONST); + mHLW.fix(VARP::CONST); + mHHW.fix(VARP::CONST); + mGroup = group; + addParameter(mLBias); + addParameter(mHBias); + addParameter(mLLW); + addParameter(mLHW); + addParameter(mHHW); + addParameter(mHLW); + } + virtual std::vector onForward(const std::vector& inputs) override { + auto input = _Convert(inputs[0], NC4HW4); + auto inputSplit = _Split(input, mSplitInput, 1); + auto XL = inputSplit[0]; + auto XH = inputSplit[1]; + if (input->getInfo()->dim[3] < 2) { + auto L2L = _Conv(mLLW, mLBias, XL, mOption.padMode, mOption.stride, mOption.dilate, mGroup); + auto L2H = _Conv(mHLW, mHBias, XL, mOption.padMode, mOption.stride, mOption.dilate, mGroup); + auto H2L = _Conv(mLHW, mLBias, XH, mOption.padMode, mOption.stride, mOption.dilate, mGroup); + auto H2H = _Conv(mHHW, mHBias, XH, mOption.padMode, mOption.stride, mOption.dilate, mGroup); + auto L = L2L + H2L; + auto H = H2H + L2H; + return {_Concat({L, H}, 1)}; + } + XL = _AvePool(XL, {2, 2}, {2, 2}); + auto info = XL->getInfo(); + auto L2L = _Conv(mLLW, mLBias, XL, mOption.padMode, mOption.stride, mOption.dilate, mGroup); + auto L2H = _Conv(mHLW, mHBias, XL, mOption.padMode, mOption.stride, mOption.dilate, mGroup); + auto H2L = + _Conv(mLHW, mLBias, _AvePool(XH, {2, 2}, {2, 2}), mOption.padMode, mOption.stride, mOption.dilate, mGroup); + auto H2H = _Conv(mHHW, mHBias, XH, mOption.padMode, mOption.stride, mOption.dilate, mGroup); + auto L = L2L + H2L; + auto H = H2H; + auto dstShape = H->getInfo()->dim; // NCHW + { H = H2H + _Interp({L2H}, 0.0f, 0.0f, dstShape[3], dstShape[2], 1, true); } + auto res = _Concat({_Interp({L}, 0.0f, 0.0f, dstShape[3], dstShape[2], 1, true), H}, 1); + info = res->getInfo(); + MNN_ASSERT(nullptr != info); + return {res}; + } + +private: + const NN::ConvOption mOption; + VARP mLLW; + VARP mLHW; + VARP mHLW; + VARP mHHW; + VARP mLBias; + VARP mHBias; + + std::vector mSplitInput; + + int mGroup; +}; +std::shared_ptr NN::Conv(const ConvOption& option, Express::VARP weight, Express::VARP bias, int group) { + VARP input = _Input({1, option.channel[0], 1, 1}, NC4HW4); + if (nullptr == weight) { + return nullptr; + } + if (nullptr != bias) { + auto tempOutput = _Conv(weight, bias, input, option.padMode, option.stride, option.dilate, group); + return std::shared_ptr(new FixModule({tempOutput}, {weight, bias}, {{input, NC4HW4}})); + } + auto tempOutput = _Conv(weight, nullptr, input, option.padMode, option.stride, option.dilate, group); + return std::shared_ptr(new FixModule({tempOutput}, {weight}, {{input, NC4HW4}})); +} + +std::shared_ptr NN::ConvOctave(const ConvOption& option, Express::VARP weight, Express::VARP bias, int group, + float inFactor, float outFactor) { + return std::shared_ptr(new ConvOctaveModule(option, weight, bias, group, inFactor, outFactor)); +} + } // namespace Train } // namespace MNN diff --git a/tools/train/source/module/NN.hpp b/tools/train/source/module/NN.hpp index b9b6bf9e..8ec33f92 100644 --- a/tools/train/source/module/NN.hpp +++ b/tools/train/source/module/NN.hpp @@ -17,7 +17,10 @@ class Initializer; class MNN_PUBLIC NN { public: - struct ConvOption { + /* Unlike enum in class, class in class need be dllimport or dllexport explcility. + Compiling in other system will not be affected. + */ + struct MNN_PUBLIC ConvOption { Express::INTS kernelSize = {1, 1}; Express::INTS channel = {0, 0}; Express::INTS stride = {1, 1}; @@ -38,8 +41,23 @@ public: std::shared_ptr weightInit = nullptr, std::shared_ptr biasInit = nullptr); static std::shared_ptr Dropout(const float dropRatio); - static std::shared_ptr BatchNorm(const int channels, const int dims = 4, - const float m = 0.999, const float e = 1e-5); + static std::shared_ptr BatchNorm(const int channels, const int dims = 4, const float m = 0.999, + const float e = 1e-5); + + static std::shared_ptr ConvInt8(const ConvOption& option, int bits = 8, bool bias = true, + std::shared_ptr weightInit = nullptr, + std::shared_ptr biasInit = nullptr); + static std::shared_ptr ConvInt8(const ConvOption& option, Express::VARP weight, Express::VARP bias, + int group, int bits); + static std::shared_ptr ConvOctave(const ConvOption& option, Express::VARP weight, Express::VARP bias, + int group, float inFactor, float outFactor); + static std::shared_ptr Conv(const ConvOption& option, Express::VARP weight, Express::VARP bias, int group); + + class Utils { + public: + // ConvOption, Weight, Bias, Group + static std::tuple ExtractConvolution(Express::EXPRP expr); + }; }; } // namespace Train diff --git a/tools/train/source/module/PipelineModule.cpp b/tools/train/source/module/PipelineModule.cpp new file mode 100644 index 00000000..7936e0d5 --- /dev/null +++ b/tools/train/source/module/PipelineModule.cpp @@ -0,0 +1,138 @@ +// +// PipelineModule.cpp +// MNN +// +// Created by MNN on 2020/01/09. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#include "PipelineModule.hpp" +using namespace MNN::Express; +namespace MNN { +namespace Train { +class ExprModule : public Module { +public: + ExprModule(EXPRP expr) { + MNN_ASSERT(expr->get() != nullptr); + mExpr = expr; + mInputs = expr->inputs(); + for (int i = 0; i < mInputs.size(); ++i) { + auto inputExpr = mInputs[i]->expr().first; + if (inputExpr->get() != nullptr) { + mInputs[i] = nullptr; + mInputIndexes.emplace_back(i); + continue; + } + switch (inputExpr->inputType()) { + case VARP::INPUT: + mInputs[i] = nullptr; + mInputIndexes.emplace_back(i); + break; + case VARP::CONST: + break; + case VARP::TRAINABLE: + addParameter(mInputs[i]); + break; + default: + break; + } + } + } + virtual std::vector onForward(const std::vector& inputs) override { + MNN_ASSERT(mInputIndexes.size() == inputs.size()); + std::vector tempInputs = mInputs; + for (int i = 0; i < inputs.size(); ++i) { + tempInputs[mInputIndexes[i]] = inputs[i]; + } + std::vector outputVars; + auto newExpr = Expr::create(mExpr->extra(), std::move(tempInputs), mExpr->outputSize()); + for (int i = 0; i < mExpr->outputSize(); ++i) { + outputVars.emplace_back(Variable::create(newExpr, i)); + } + return outputVars; + } + const std::vector& inputIndexes() const { + return mInputIndexes; + } + +private: + EXPRP mExpr; + std::vector mInputs; + std::vector mInputIndexes; +}; +PipelineModule::PipelineModule(std::vector inputs, std::vector outputs, Transformer& transformFunction) { + auto executeOrder = Variable::getExecuteOrder(outputs); + // Set Indexes + std::map indexes; + int currentIndexes = 0; + for (auto expr : executeOrder) { + indexes[expr] = currentIndexes; + currentIndexes += expr->outputSize(); + } + mInputIndexes.clear(); + mStack.resize(currentIndexes); + for (auto v : inputs) { + auto inputExpr = v->expr(); + mInputIndexes.emplace_back(indexes[inputExpr.first] + inputExpr.second); + } + + // Create All SubModule + for (auto expr : executeOrder) { + if (expr->get() == nullptr) { + continue; + } + auto moduleResult = transformFunction(expr); + if (moduleResult.second == nullptr) { + std::shared_ptr module(new ExprModule(expr)); + moduleResult.first = ((ExprModule*)module.get())->inputIndexes(); + moduleResult.second = module; + } + auto subInputs = expr->inputs(); + auto exprInputIndexes = moduleResult.first; + std::vector inputIndexes(exprInputIndexes.size()); + for (int i = 0; i < exprInputIndexes.size(); ++i) { + auto inputExpr = subInputs[exprInputIndexes[i]]->expr(); + inputIndexes[i] = indexes[inputExpr.first] + inputExpr.second; + } + + std::vector outputIndexes(expr->outputSize()); + for (int i = 0; i < outputIndexes.size(); ++i) { + outputIndexes[i] = indexes[expr] + i; + } + mSubModules.emplace_back(std::make_tuple(moduleResult.second, inputIndexes, outputIndexes)); + registerModel({moduleResult.second}); + } + mOutputIndexes.clear(); + for (auto output : outputs) { + auto outputExpr = output->expr(); + mOutputIndexes.emplace_back(indexes[outputExpr.first] + outputExpr.second); + } +} +std::vector PipelineModule::onForward(const std::vector& inputs) { + for (int i = 0; i < mInputIndexes.size(); ++i) { + mStack[mInputIndexes[i]] = inputs[i]; + } + for (auto& m : mSubModules) { + std::vector tempInputs(std::get<1>(m).size()); + for (int i = 0; i < tempInputs.size(); ++i) { + tempInputs[i] = mStack[std::get<1>(m)[i]]; + } + std::vector tempOutputs = std::get<0>(m)->onForward(tempInputs); + MNN_ASSERT(tempOutputs.size() == std::get<2>(m).size()); + for (int i = 0; i < tempOutputs.size(); ++i) { + mStack[std::get<2>(m)[i]] = tempOutputs[i]; + } + } + std::vector outputs(mOutputIndexes.size()); + for (int i = 0; i < mOutputIndexes.size(); ++i) { + outputs[i] = mStack[mOutputIndexes[i]]; + } + return outputs; +} +void PipelineModule::onClearCache() { + for (auto& v : mStack) { + v = nullptr; + } +} +} // namespace Train +} // namespace MNN diff --git a/tools/train/source/module/PipelineModule.hpp b/tools/train/source/module/PipelineModule.hpp new file mode 100644 index 00000000..b8029ffe --- /dev/null +++ b/tools/train/source/module/PipelineModule.hpp @@ -0,0 +1,32 @@ +// +// PipelineModule.hpp +// MNN +// +// Created by MNN on 2020/01/09. +// Copyright © 2018, Alibaba Group Holding Limited +// + +#ifndef PipelineModule_hpp +#define PipelineModule_hpp +#include "Module.hpp" +namespace MNN { +namespace Train { + +class MNN_PUBLIC PipelineModule : public Module { +public: + typedef std::function, std::shared_ptr>(Express::EXPRP)> Transformer; + PipelineModule(std::vector inputs, std::vector outputs, + Transformer& transformFunction); + virtual std::vector onForward(const std::vector& inputs) override; + virtual void onClearCache() override; + +private: + std::vector, std::vector, std::vector>> mSubModules; + std::vector mStack; + std::vector mInputIndexes; + std::vector mOutputIndexes; +}; +} // namespace Train +} // namespace MNN + +#endif diff --git a/tools/train/source/optimizer/ADAM.cpp b/tools/train/source/optimizer/ADAM.cpp index dc171fd2..f3329516 100644 --- a/tools/train/source/optimizer/ADAM.cpp +++ b/tools/train/source/optimizer/ADAM.cpp @@ -22,25 +22,23 @@ void ADAM::setEps(float eps) { mEps = eps; } -void ADAM::append(const std::set& parameters) { +void ADAM::onAppend(const std::set& parameters) { for (auto p : parameters) { - mParameters.insert(p); mHistory[p] = _Const(0.0f, p->getInfo()->dim, p->getInfo()->order); mHistory2[p] = _Const(0.0f, p->getInfo()->dim, p->getInfo()->order); } } -void ADAM::remove(const std::set& parameters) { +void ADAM::onRemove(const std::set& parameters) { for (auto p : parameters) { - mParameters.erase(p); mHistory.erase(p); mHistory2.erase(p); } } -Express::VARP ADAM::computeUpdateValue(Express::VARP param, Express::VARP grad) { +Express::VARP ADAM::onComputeUpdateValue(Express::VARP param, Express::VARP grad) { auto lr = _Const(mLearningRate, {}, NCHW); - auto step = _Const(mStep, {}, NCHW); + auto step = _Const(currentStep(), {}, NCHW); auto beta1 = _Const(mMomentum, {}, NCHW); auto beta2 = _Const(mMomentum2, {}, NCHW); auto eps = _Const(mEps, {}, NCHW); diff --git a/tools/train/source/optimizer/ADAM.hpp b/tools/train/source/optimizer/ADAM.hpp index bb78114b..0d31928b 100644 --- a/tools/train/source/optimizer/ADAM.hpp +++ b/tools/train/source/optimizer/ADAM.hpp @@ -20,11 +20,11 @@ namespace Train { class MNN_PUBLIC ADAM : public SGD { public: - void append(const std::set& parameters); + void onAppend(const std::set& parameters) override; - void remove(const std::set& parameters); + void onRemove(const std::set& parameters) override; - Express::VARP computeUpdateValue(Express::VARP param, Express::VARP grad) override; + virtual Express::VARP onComputeUpdateValue(Express::VARP param, Express::VARP grad) override; void setMomentum2(float momentum2); diff --git a/tools/train/source/optimizer/LearningRateScheduler.cpp b/tools/train/source/optimizer/LearningRateScheduler.cpp index 2a472912..00068422 100644 --- a/tools/train/source/optimizer/LearningRateScheduler.cpp +++ b/tools/train/source/optimizer/LearningRateScheduler.cpp @@ -7,8 +7,8 @@ // #include "LearningRateScheduler.hpp" -#include #include +#include namespace MNN { namespace Train { diff --git a/tools/train/source/optimizer/ParameterOptimizer.cpp b/tools/train/source/optimizer/ParameterOptimizer.cpp index c2fe598a..c4147b8b 100644 --- a/tools/train/source/optimizer/ParameterOptimizer.cpp +++ b/tools/train/source/optimizer/ParameterOptimizer.cpp @@ -12,6 +12,7 @@ namespace MNN { namespace Train { bool ParameterOptimizer::step(Express::VARP loss) { + mStep++; auto res = this->onGetNextParameter(loss); for (auto iter : res) { iter.second.fix(Express::VARP::CONST); @@ -22,5 +23,29 @@ bool ParameterOptimizer::step(Express::VARP loss) { return !res.empty(); } +int ParameterOptimizer::currentStep() { + return mStep; +} + +void ParameterOptimizer::setCurrentStep(int step) { + mStep = step; +} + +void ParameterOptimizer::append(const std::set& parameters) { + for (auto p : parameters) { + mParameters.insert(p); + } + this->onAppend(parameters); +} +void ParameterOptimizer::remove(const std::set& parameters) { + for (auto p : parameters) { + mParameters.erase(p); + } + this->onRemove(parameters); +} +const std::set& ParameterOptimizer::parameters() const { + return mParameters; +} + } // namespace Train } // namespace MNN diff --git a/tools/train/source/optimizer/ParameterOptimizer.hpp b/tools/train/source/optimizer/ParameterOptimizer.hpp index 0259ed72..f1f799c6 100644 --- a/tools/train/source/optimizer/ParameterOptimizer.hpp +++ b/tools/train/source/optimizer/ParameterOptimizer.hpp @@ -9,7 +9,7 @@ #ifndef ParameterOptimizer_hpp #define ParameterOptimizer_hpp #include - +#include namespace MNN { namespace Train { @@ -18,7 +18,19 @@ public: ParameterOptimizer() = default; virtual ~ParameterOptimizer() = default; bool step(Express::VARP loss); + int currentStep(); + void setCurrentStep(int step); + void append(const std::set& parameters); + void remove(const std::set& parameters); + virtual std::map onGetNextParameter(Express::VARP loss) = 0; + const std::set& parameters() const; + +private: + virtual void onAppend(const std::set& parameters) = 0; + virtual void onRemove(const std::set& parameters) = 0; + std::set mParameters; + int mStep = 0; }; } // namespace Train diff --git a/tools/train/source/optimizer/SGD.cpp b/tools/train/source/optimizer/SGD.cpp index e177faff..97c77486 100644 --- a/tools/train/source/optimizer/SGD.cpp +++ b/tools/train/source/optimizer/SGD.cpp @@ -30,24 +30,22 @@ void SGD::setRegularizationMethod(RegularizationMethod method) { mRegularizationMethod = method; } -void SGD::append(const std::set& parameters) { +float SGD::currentLearningRate() { + return mLearningRate; +} + +void SGD::onAppend(const std::set& parameters) { for (auto p : parameters) { - mParameters.insert(p); mHistory[p] = _Const(0.0f, p->getInfo()->dim, p->getInfo()->order); } } -void SGD::remove(const std::set& parameters) { +void SGD::onRemove(const std::set& parameters) { for (auto p : parameters) { - mParameters.erase(p); mHistory.erase(p); } } -const std::set& SGD::parameters() const { - return mParameters; -} - Express::VARP SGD::regularizeParameters(Express::VARP param, Express::VARP grad) { VARP addWeightDecayGrad; if (mRegularizationMethod == L1) { @@ -60,7 +58,7 @@ Express::VARP SGD::regularizeParameters(Express::VARP param, Express::VARP grad) return addWeightDecayGrad; } -Express::VARP SGD::computeUpdateValue(Express::VARP param, Express::VARP grad) { +Express::VARP SGD::onComputeUpdateValue(Express::VARP param, Express::VARP grad) { auto lr = _Const(mLearningRate, {}, NCHW); mHistory[param] = lr * grad + _Const(mMomentum, {}, NCHW) * mHistory[param]; mHistory[param].fix(Express::VARP::CONST); @@ -69,16 +67,19 @@ Express::VARP SGD::computeUpdateValue(Express::VARP param, Express::VARP grad) { } std::map SGD::onGetNextParameter(Express::VARP loss) { - mStep++; - - auto grad = OpGrad::grad(loss, mParameters); + auto grad = OpGrad::grad(loss, parameters()); + std::vector prepareCompute; + for (auto& iter : grad) { + prepareCompute.emplace_back(iter.second); + } + Variable::prepareCompute(prepareCompute); for (auto& iter : grad) { // apply regularization auto addWeightDecayGrad = regularizeParameters(iter.first, iter.second); addWeightDecayGrad.fix(Express::VARP::CONST); // apply momentum, etc. - auto updateValue = computeUpdateValue(iter.first, addWeightDecayGrad); + auto updateValue = this->onComputeUpdateValue(iter.first, addWeightDecayGrad); // apply update auto newParameter = iter.first - updateValue; iter.second = newParameter; diff --git a/tools/train/source/optimizer/SGD.hpp b/tools/train/source/optimizer/SGD.hpp index 662ad020..b0ec90b4 100644 --- a/tools/train/source/optimizer/SGD.hpp +++ b/tools/train/source/optimizer/SGD.hpp @@ -27,9 +27,9 @@ public: virtual std::map onGetNextParameter(Express::VARP loss) override; - virtual Express::VARP regularizeParameters(Express::VARP param, Express::VARP grad); + Express::VARP regularizeParameters(Express::VARP param, Express::VARP grad); - virtual Express::VARP computeUpdateValue(Express::VARP param, Express::VARP grad); + virtual Express::VARP onComputeUpdateValue(Express::VARP param, Express::VARP grad); void setLearningRate(float rate); @@ -39,20 +39,18 @@ public: void setRegularizationMethod(RegularizationMethod method); - void append(const std::set& parameters); + float currentLearningRate(); - void remove(const std::set& parameters); + virtual void onAppend(const std::set& parameters) override; - const std::set& parameters() const; + virtual void onRemove(const std::set& parameters) override; protected: float mLearningRate = 0.001f; float mMomentum = 0; float mWeightDecay = 0; RegularizationMethod mRegularizationMethod = L2; - std::set mParameters; std::map mHistory; - int mStep = 0; // For Cache const Express::Expr* mLoss = nullptr; diff --git a/tools/train/source/transformer/ConvolutionConverter.cpp b/tools/train/source/transformer/ConvolutionConverter.cpp deleted file mode 100644 index 9b810af1..00000000 --- a/tools/train/source/transformer/ConvolutionConverter.cpp +++ /dev/null @@ -1,60 +0,0 @@ -// -// ConvolutionConverter.cpp -// MNN -// -// Created by MNN on 2019/04/22. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#include "ConvolutionConverter.hpp" -#include -#include "core/Macro.h" -using namespace MNN; - -OpConverter::ReductResult ConvolutionConverter::onReduct(int opIndex, MNN::OpT* op, MNN::NetT* net) { - OpConverter::ReductResult result; - if (op->inputIndexes.size() != 3) { - return result; - } - - auto conv2D = op->main.AsConvolution2D(); - auto conv2DCommon = conv2D->common.get(); - - auto relu = conv2DCommon->relu; - auto relu6 = conv2DCommon->relu6; - - // set output - auto outputIndex = op->outputIndexes[0]; - if (relu || relu6) { - auto& reluOp = net->oplists[opIndex + 1]; - outputIndex = reluOp->outputIndexes[0]; - } - - op->outputIndexes = {outputIndex}; - - // add weight - auto& weightOp = net->oplists[opIndex - 2]; - op->main.AsConvolution2D()->weight = std::move(weightOp->main.AsBlob()->float32s); - - // add bias - auto& biasOp = net->oplists[opIndex - 1]; - op->main.AsConvolution2D()->bias = std::move(biasOp->main.AsBlob()->float32s); - - // set input - op->inputIndexes = {op->inputIndexes[0]}; - - result.needDeleteOpIndexes.emplace_back(opIndex - 2); - result.needDeleteOpIndexes.emplace_back(opIndex - 1); - if (relu || relu6) { - result.needDeleteOpIndexes.emplace_back(opIndex + 1); - } - - return result; -} - -static const auto gRegister = []() { - static ConvolutionConverter _c; - OpConverter::insert(OpType_Convolution, &_c); - OpConverter::insert(OpType_ConvolutionDepthwise, &_c); - return true; -}(); diff --git a/tools/train/source/transformer/ConvolutionConverter.hpp b/tools/train/source/transformer/ConvolutionConverter.hpp deleted file mode 100644 index 61debd06..00000000 --- a/tools/train/source/transformer/ConvolutionConverter.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// -// ConvolutionConverter.hpp -// MNN -// -// Created by MNN on 2019/04/22. -// Copyright © 2018, Alibaba Group Holding Limited -// - -#ifndef ConvolutionConverter_hpp -#define ConvolutionConverter_hpp - -#include -#include "OpConverter.hpp" -class ConvolutionConverter : public OpConverter { -public: - virtual ReductResult onReduct(int opIndex, MNN::OpT* op, MNN::NetT* net) override; -}; - -#endif /* ConvolutionConverter_hpp */