Sync Internal CMake changes

This commit is contained in:
海境 2020-01-17 10:20:15 +08:00
parent 4d6c19f121
commit ed8b9f2a23
No known key found for this signature in database
GPG Key ID: DE9DE2D2FA0E073D
51 changed files with 24431 additions and 558 deletions

76
.gitignore vendored
View File

@ -63,6 +63,29 @@ obj/
*.iws *.iws
/out/ /out/
# User-specific configurations
.idea/caches/
.idea/libraries/
.idea/shelf/
.idea/workspace.xml
.idea/tasks.xml
.idea/.name
.idea/compiler.xml
.idea/copyright/profiles_settings.xml
.idea/encodings.xml
.idea/misc.xml
.idea/modules.xml
.idea/scopes/scope_settings.xml
.idea/dictionaries
.idea/vcs.xml
.idea/jsLibraryMappings.xml
.idea/datasources.xml
.idea/dataSources.ids
.idea/sqlDataSources.xml
.idea/dynamic.xml
.idea/uiDesigner.xml
.idea/assetWizardSettings.xml
# OS-specific files # OS-specific files
.DS_Store .DS_Store
.DS_Store? .DS_Store?
@ -90,9 +113,14 @@ hs_err_pid*
## Plugin-specific files: ## Plugin-specific files:
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin # JIRA plugin
atlassian-ide-plugin.xml atlassian-ide-plugin.xml
# Mongo Explorer plugin
.idea/mongoSettings.xml
# Crashlytics plugin (for Android Studio and IntelliJ) # Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml com_crashlytics_export_strings.xml
@ -282,7 +310,20 @@ build.mac/
### Projects ### Projects
*.podspec.json *.podspec.json
demo/android/.idea
demo/android/.idea/gradle.xml
demo/android/.idea/misc.xml
demo/android/.idea/runConfigurations.xml
demo/android/.idea/vcs.xml
demo/android/.idea/caches/build_file_checksums.ser
demo/android/app/libs/ demo/android/app/libs/
project/android/.idea/.name
project/android/.idea/gradle.xml
project/android/.idea/misc.xml
project/android/.idea/modules.xml
project/android/.idea/runConfigurations.xml
project/android/.idea/vcs.xml
project/android/.idea/caches/build_file_checksums.ser
### Temps ### Temps
3rd_party/flatbuffers/tmp 3rd_party/flatbuffers/tmp
@ -292,27 +333,24 @@ schema/private
tools/converter/source/IR tools/converter/source/IR
benchmark/benchmark.txt benchmark/benchmark.txt
### Python MNN
pymnn/android/build/
pymnn/android/local.properties
pymnn/android/.idea
pymnn/android/.idea/.name
pymnn/android/.idea/gradle.xml
pymnn/android/.idea/misc.xml
pymnn/android/.idea/modules.xml
pymnn/android/.idea/runConfigurations.xml
pymnn/android/.idea/vcs.xml
pymnn/android/.idea/caches/build_file_checksums.ser
buildios buildios
build*/ build*/
source/backend/opencl/execution/cl/codegen/opencl_program.cc
source/backend/opencl/execution/cl/opencl_program.cc
# FIXME(haijing): MTL issues.....
# source/backend/metal/MetalOPRegister.mm
source/backend/opengl/AllShader.cpp
source/backend/opengl/AllShader.hpp
source/backend/vulkan/compiler/AllShader.cpp
project/ios/iOS_64
project/ios/iOS_32
project/ios/SIM_32
project/ios/SIM_64
project/ios/out/
project/ios/MNN_iOS64
project/ios/MNN_iOS32
project/ios/MNN_SIM_32
project/ios/MNN_SIM_64
.idea/
include/MNN/VCS.h include/MNN/VCS.h
schema/current/ .idea
project/ios/ios_64
project/ios/ios_32
project/ios/MNN.framework
pymnn_build/ pymnn_build/
macosbuild

View File

@ -1,37 +0,0 @@
# Build Status
## macOS
Configuration | CI Status
--------------|:---------
macOS11.2 CPU_Metal | [![macOS11.2 CPU_Metal](http://badges.herokuapp.com/travis/alibaba/MNN?env=MNNCITARGET=MACOSCPUMETAL&label=build&branch=master)](https://travis-ci.org/alibaba/MNN)
macOS11.2 CPU | [![macOS11.2 CPU](http://badges.herokuapp.com/travis/alibaba/MNN?env=MNNCITARGET=MACOSCPU&label=build&branch=master)](https://travis-ci.org/alibaba/MNN)
## iOS
Configuration | CI Status
--------------|:---------
iOS CPU_Metal Xcode Project | [![iOS CPU_Metal Xcode](http://badges.herokuapp.com/travis/alibaba/MNN?env=MNNCITARGET=IOSCPUMETALXCODE&label=build&branch=master)](https://travis-ci.org/alibaba/MNN)
iOS CPU_Metal Xcode CMake | [![iOS CPU_Metal CMake](http://badges.herokuapp.com/travis/alibaba/MNN?env=MNNCITARGET=IOSCPUMETALCMAKE&label=build&branch=master)](https://travis-ci.org/alibaba/MNN)
## Android
Configuration | CI Status
--------------------------|:---------
AArch32 ThreadPool Vulkan | [![ARM32THREADPOOLVULKAN](http://badges.herokuapp.com/travis/alibaba/MNN?env=MNNCITARGET=ARM32THREADPOOLVULKAN&label=build&branch=master)](https://travis-ci.org/alibaba/MNN)
AArch32 OpenMP Vulkan | [![ARM32OMPVULKAN](http://badges.herokuapp.com/travis/alibaba/MNN?env=MNNCITARGET=ARM32OMPVULKAN&label=build&branch=master)](https://travis-ci.org/alibaba/MNN)
AArch64 ThreadPool Vulkan | [![ARM64THREADPOOLVULKAN](http://badges.herokuapp.com/travis/alibaba/MNN?env=MNNCITARGET=ARM64THREADPOOLVULKAN&label=build&branch=master)](https://travis-ci.org/alibaba/MNN)
AArch64 OpenMP Vulkan | [![ARM64OMPVULKAN](http://badges.herokuapp.com/travis/alibaba/MNN?env=MNNCITARGET=ARM64OMPVULKAN&label=build&branch=master)](https://travis-ci.org/alibaba/MNN)
## Linux
Configuration | CI Status
--------------|:---------
Ubuntu18.04 ThreadPool OpenCL Vulkan | [![Ubuntu18.04 ThreadPool OpenCL Vulkan](http://badges.herokuapp.com/travis/alibaba/MNN?env=MNNCITARGET=LINUXCLTHREADPOOLVULKAN&label=build&branch=master)](https://travis-ci.org/alibaba/MNN)
## Windows
Configuration | CI Status
--------------|:---------
Windows Server 2016 Ver.1803 32Bit CPU | [![WINX86](http://badges.herokuapp.com/travis/alibaba/MNN?env=MNNCITARGET=WINX86&label=build&branch=master)](https://travis-ci.org/alibaba/MNN)
Windows Server 2016 Ver.1803 64Bit CPU| [![WINX64](http://badges.herokuapp.com/travis/alibaba/MNN?env=MNNCITARGET=WINX64&label=build&branch=master)](https://travis-ci.org/alibaba/MNN)

View File

@ -10,7 +10,7 @@ if(NOT DEFINED MNN_VERSION_PATCH)
set(MNN_VERSION_PATCH 1) set(MNN_VERSION_PATCH 1)
endif() endif()
if(NOT DEFINED MNN_VERSION_BUILD) if(NOT DEFINED MNN_VERSION_BUILD)
set(MNN_VERSION_BUILD 8) set(MNN_VERSION_BUILD 5)
endif() endif()
if(NOT DEFINED MNN_VERSION_SUFFIX) if(NOT DEFINED MNN_VERSION_SUFFIX)
set(MNN_VERSION_SUFFIX git) set(MNN_VERSION_SUFFIX git)
@ -25,14 +25,8 @@ add_definitions("-DMNN_VERSION_MINOR=${MNN_VERSION_MINOR}")
add_definitions("-DMNN_VERSION_PATCH=${MNN_VERSION_PATCH}") add_definitions("-DMNN_VERSION_PATCH=${MNN_VERSION_PATCH}")
# CMP0048 is related to letting CMake managing the package version for us # CMP0048 is related to letting CMake managing the package version for us
# CMP0079 is required for OpenMP
IF(POLICY CMP0048) cmake_policy(SET CMP0048 NEW)
cmake_policy(SET CMP0048 NEW)
ENDIF()
IF(POLICY CMP0079)
cmake_policy(SET CMP0079 NEW)
ENDIF()
project(MNN VERSION ${MNN_VERSION_MAJOR}.${MNN_VERSION_MINOR}.${MNN_VERSION_PATCH}.${MNN_VERSION_BUILD} LANGUAGES C CXX ASM) project(MNN VERSION ${MNN_VERSION_MAJOR}.${MNN_VERSION_MINOR}.${MNN_VERSION_PATCH}.${MNN_VERSION_BUILD} LANGUAGES C CXX ASM)
# complier options # complier options
set(CMAKE_C_STANDARD 99) set(CMAKE_C_STANDARD 99)
@ -69,29 +63,39 @@ option(MNN_DEBUG_TENSOR_SIZE "Enable Tensor Size" OFF)
option(MNN_GPU_TRACE "Enable MNN Gpu Debug" OFF) option(MNN_GPU_TRACE "Enable MNN Gpu Debug" OFF)
option(MNN_PORTABLE_BUILD "Link the static version of third party libraries where possible to improve the portability of built executables" OFF) option(MNN_PORTABLE_BUILD "Link the static version of third party libraries where possible to improve the portability of built executables" OFF)
option(MNN_SEP_BUILD "Build MNN Backends and expression seperately. Only works with MNN_BUILD_SHARED_LIBS=ON" ON) option(MNN_SEP_BUILD "Build MNN Backends and expression seperately. Only works with MNN_BUILD_SHARED_LIBS=ON" ON)
option(MNN_AAPL_FMWK "Build MNN.framework instead of traditional .a/.dylib" OFF)
option(MNN_USE_SSE "Enable SSE Optimizations" ON)
option(MNN_USE_AVX "Enable AVX Optimizations" ON)
option(NATIVE_LIBRARY_OUTPUT "Native Library Path" OFF) option(NATIVE_LIBRARY_OUTPUT "Native Library Path" OFF)
option(NATIVE_INCLUDE_OUTPUT "Native Include Path" OFF) option(NATIVE_INCLUDE_OUTPUT "Native Include Path" OFF)
option(MNN_AAPL_FMWK "Build MNN.framework instead of traditional .a/.dylib" OFF)
set(MNN_SCHEMA_SUFFIX "private" CACHE STRING "MNN Schema Source Path Suffix")
set(MNN_SCHEMA_SUFFIX "default" CACHE STRING "MNN Schema Source Path Suffix")
IF(APPLE AND MNN_AAPL_FMWK AND MNN_SEP_BUILD) IF(APPLE AND MNN_AAPL_FMWK AND MNN_SEP_BUILD)
message(WARNING "MNN_SEP_BUILD AND MNN_AAPL_FMWK can't coexist. Turning off MNN_SEP_BUILD") message(WARNING "MNN_SEP_BUILD AND MNN_AAPL_FMWK can't coexist. Turning off MNN_SEP_BUILD")
SET(MNN_SEP_BUILD OFF) SET(MNN_SEP_BUILD OFF)
ENDIF() ENDIF()
IF(MSVC OR WIN32) IF((MSVC OR WIN32) AND MNN_SEP_BUILD)
message(WARNING "MNN_SEP_BUILD IS TROUBLESOME ON Windows. Forcing OFF...") message(WARNING "MNN_SEP_BUILD IS TROUBLESOME ON Windows. Forcing OFF...")
SET(MNN_SEP_BUILD OFF) SET(MNN_SEP_BUILD OFF)
ENDIF() ENDIF()
include(${CMAKE_CURRENT_LIST_DIR}/cmake/macros.cmake) include(${CMAKE_CURRENT_LIST_DIR}/cmake/macros.cmake)
# Import FlatBuffers and use standard way to generate schemas
IF(CMAKE_CROSSCOMPILING)
message(WARNING "Cross Compilation Detected. Third-Party tools like protobuf/flatbuffer are not built. You'll need to make sure they are available in your $PATH")
ELSE()
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/3rd_party/flatbuffers ${CMAKE_CURRENT_BINARY_DIR}/flatbuffers-build EXCLUDE_FROM_ALL)
ENDIF()
FILE(GLOB MNN_SCHEMA_SRC ${CMAKE_CURRENT_LIST_DIR}/schema/${MNN_SCHEMA_SUFFIX}/*.fbs)
SET(SCHEMA_TARGETS "")
FOREACH(SCHEMA_SRC ${MNN_SCHEMA_SRC})
get_filename_component(SCHEMA_NAME "${SCHEMA_SRC}" NAME_WE)
add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/schema/current/${SCHEMA_NAME}_generated.h" COMMAND flatc -c -b --gen-object-api --reflect-names ${SCHEMA_SRC} COMMENT "Generating ${SCHEMA_NAME} Schema in ${CMAKE_CURRENT_LIST_DIR}/schema/${MNN_SCHEMA_SUFFIX}" WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/schema/current/ DEPENDS ${SCHEMA_SRC})
ADD_CUSTOM_TARGET(MNN_SCHEMA_GEN_${SCHEMA_NAME} ALL DEPENDS "${CMAKE_CURRENT_LIST_DIR}/schema/current/${SCHEMA_NAME}_generated.h")
IF(NOT CMAKE_CROSSCOMPILING)
add_dependencies(MNN_SCHEMA_GEN_${SCHEMA_NAME} flatc)
ENDIF()
LIST(APPEND SCHEMA_TARGETS "${CMAKE_CURRENT_LIST_DIR}/schema/current/${SCHEMA_NAME}_generated.h")
ENDFOREACH()
add_custom_target(MNN_SCHEMA_GEN DEPENDS ${SCHEMA_TARGETS})
if (MNN_USE_THREAD_POOL) if (MNN_USE_THREAD_POOL)
set(MNN_OPENMP OFF) set(MNN_OPENMP OFF)
add_definitions(-DMNN_USE_THREAD_POOL) add_definitions(-DMNN_USE_THREAD_POOL)
@ -115,15 +119,6 @@ if(MNN_GPU_TRACE)
add_definitions(-DMNN_GPU_FORCE_FINISH) add_definitions(-DMNN_GPU_FORCE_FINISH)
endif() endif()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "(x86_64)|(X86_64)|(x64)|(X64)|(amd64)|(AMD64)")
IF(MNN_USE_AVX)
add_definitions(-DMNN_USE_AVX)
ENDIF()
IF(MNN_USE_SSE)
add_definitions(-DMNN_USE_SSE)
ENDIF()
ENDIF()
# backend options # backend options
option(MNN_METAL "Enable Metal" OFF) option(MNN_METAL "Enable Metal" OFF)
option(MNN_OPENCL "Enable OpenCL" OFF) option(MNN_OPENCL "Enable OpenCL" OFF)
@ -141,10 +136,10 @@ option(MNN_BUILD_BENCHMARK "Build benchmark or not" OFF)
option(MNN_BUILD_TEST "Build tests or not" OFF) option(MNN_BUILD_TEST "Build tests or not" OFF)
option(MNN_BUILD_FOR_ANDROID_COMMAND "Build from command" OFF) option(MNN_BUILD_FOR_ANDROID_COMMAND "Build from command" OFF)
set (MNN_HIDDEN FALSE) set (MNN_HIDDEN FALSE)
IF(CMAKE_BUILD_TYPE MATCHES Debug) IF(CMAKE_BUILD_TYPE MATCHES DEBUG)
ELSE() ELSE()
set(MNN_HIDDEN TRUE) set(MNN_HIDDEN TRUE)
ENDIF(CMAKE_BUILD_TYPE MATCHES Debug) ENDIF(CMAKE_BUILD_TYPE MATCHES DEBUG)
message(STATUS ">>>>>>>>>>>>>") message(STATUS ">>>>>>>>>>>>>")
@ -178,27 +173,18 @@ if(WIN32)
endif() endif()
endif () endif ()
endforeach() endforeach()
elseif(CMAKE_SYSTEM_NAME MATCHES "^Android" OR (UNIX AND NOT APPLE)) elseif(CMAKE_SYSTEM_NAME MATCHES "^Android" OR CMAKE_SYSTEM_NAME MATCHES "^Linux")
add_definitions(-fPIC) add_definitions(-fPIC)
endif() endif()
if(CMAKE_SYSTEM_NAME MATCHES "^Android") if(CMAKE_SYSTEM_NAME MATCHES "^Android")
add_definitions(-DMNN_BUILD_FOR_ANDROID) add_definitions(-DMNN_BUILD_FOR_ANDROID)
endif() if(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
add_definitions(-mfloat-abi=softfp -mfpu=neon)
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") endif()
if(CMAKE_SYSTEM_NAME MATCHES "^Android")
add_definitions(-mfloat-abi=softfp -mfpu=neon)
else()
IF(MNN_BUILD_HARD)
add_definitions(-mfloat-abi=hard)
ELSE()
add_definitions(-mfloat-abi=softfp)
ENDIF()
endif()
endif() endif()
IF(CMAKE_BUILD_TYPE MATCHES Debug) IF(CMAKE_BUILD_TYPE MATCHES DEBUG)
add_definitions(-DMNN_DEBUG -DDEBUG) add_definitions(-DMNN_DEBUG -DDEBUG)
if(MSVC) if(MSVC)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /DEBUG") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /DEBUG")
@ -214,9 +200,9 @@ else()
else() else()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS}")
if(CMAKE_SYSTEM_NAME MATCHES "^Android") if(CMAKE_SYSTEM_NAME MATCHES "^Android")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie -fPIE") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -s")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie -fPIE -s")
if(MNN_BUILD_FOR_ANDROID_COMMAND) if(MNN_BUILD_FOR_ANDROID_COMMAND)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gc-sections") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gc-sections")
endif() endif()
@ -226,10 +212,10 @@ else()
IF(NOT NATIVE_LIBRARY_OUTPUT) IF(NOT NATIVE_LIBRARY_OUTPUT)
set(NATIVE_LIBRARY_OUTPUT ".") set(NATIVE_LIBRARY_OUTPUT ".")
ENDIF() ENDIF()
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${NATIVE_LIBRARY_OUTPUT}/${ANDROID_ABI}) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${NATIVE_LIBRARY_OUTPUT})
endif() endif()
endif() endif()
ENDIF(CMAKE_BUILD_TYPE MATCHES Debug) ENDIF(CMAKE_BUILD_TYPE MATCHES DEBUG)
if(${CMAKE_SYSTEM_NAME} MATCHES "^Linux") if(${CMAKE_SYSTEM_NAME} MATCHES "^Linux")
if((CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") OR (CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64")) if((CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") OR (CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64"))
@ -250,99 +236,53 @@ include_directories(${CMAKE_CURRENT_LIST_DIR}/include/
${CMAKE_CURRENT_LIST_DIR}/3rd_party/half ${CMAKE_CURRENT_LIST_DIR}/3rd_party/half
${CMAKE_CURRENT_LIST_DIR}/3rd_party/imageHelper ${CMAKE_CURRENT_LIST_DIR}/3rd_party/imageHelper
${CMAKE_CURRENT_LIST_DIR}/3rd_party/OpenCLHeaders/ ${CMAKE_CURRENT_LIST_DIR}/3rd_party/OpenCLHeaders/
) )
# Import FlatBuffers and use standard way to generate schemas
IF(CMAKE_CROSSCOMPILING)
message(WARNING "Cross Compilation Detected. Third-Party tools like protobuf/flatbuffer are not built. You'll need to make sure they are available in your $PATH")
ELSE()
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/3rd_party/flatbuffers ${CMAKE_CURRENT_BINARY_DIR}/flatbuffers-build EXCLUDE_FROM_ALL)
ENDIF()
FILE(GLOB MNN_SCHEMA_SRC ${CMAKE_CURRENT_LIST_DIR}/schema/${MNN_SCHEMA_SUFFIX}/*.fbs)
SET(SCHEMA_TARGETS "")
FOREACH(SCHEMA_SRC ${MNN_SCHEMA_SRC})
file(MAKE_DIRECTORY "${CMAKE_CURRENT_LIST_DIR}/schema/current/")
get_filename_component(SCHEMA_NAME "${SCHEMA_SRC}" NAME_WE)
add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/schema/current/${SCHEMA_NAME}_generated.h" COMMAND flatc -c -b --gen-object-api --reflect-names ${SCHEMA_SRC} COMMENT "Generating ${SCHEMA_NAME} Schema in ${CMAKE_CURRENT_LIST_DIR}/schema/${MNN_SCHEMA_SUFFIX}" WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/schema/current/ DEPENDS ${SCHEMA_SRC})
ADD_CUSTOM_TARGET(MNN_SCHEMA_GEN_${SCHEMA_NAME} ALL DEPENDS "${CMAKE_CURRENT_LIST_DIR}/schema/current/${SCHEMA_NAME}_generated.h")
IF(NOT CMAKE_CROSSCOMPILING)
add_dependencies(MNN_SCHEMA_GEN_${SCHEMA_NAME} flatc)
ENDIF()
LIST(APPEND SCHEMA_TARGETS "${CMAKE_CURRENT_LIST_DIR}/schema/current/${SCHEMA_NAME}_generated.h")
ENDFOREACH()
# GenVCSHDR is not actually required. But this allows sub-targets using VCS.h without extra work in their CMake dependency declaration
add_custom_target(MNN_SCHEMA_GEN DEPENDS ${SCHEMA_TARGETS} GenVCSHDR)
set(MNN_OBJECTS_TO_LINK "") set(MNN_OBJECTS_TO_LINK "")
set(MNN_TARGETS "") set(MNN_TARGETS "")
# Core # Core
FILE(GLOB MNN_Core_SRC ${CMAKE_CURRENT_LIST_DIR}/source/core/*.cpp ${CMAKE_CURRENT_LIST_DIR}/source/core/*.c) FILE(GLOB MNN_Core_SRC ${CMAKE_CURRENT_LIST_DIR}/source/core/*.cpp ${CMAKE_CURRENT_LIST_DIR}/source/core/*.c)
add_library(MNNCore OBJECT ${MNN_Core_SRC} ${SCHEMA_TARGETS}) add_library(MNNCore OBJECT ${MNN_Core_SRC})
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNCore>) list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNCore>)
list(APPEND MNN_TARGETS MNNCore) list(APPEND MNN_TARGETS MNNCore)
# CV # CV
FILE(GLOB MNN_CV_SRC ${CMAKE_CURRENT_LIST_DIR}/source/cv/*.cpp ${CMAKE_CURRENT_LIST_DIR}/source/cv/*.c) FILE(GLOB MNN_CV_SRC ${CMAKE_CURRENT_LIST_DIR}/source/cv/*.cpp ${CMAKE_CURRENT_LIST_DIR}/source/cv/*.c)
add_library(MNNCV OBJECT ${MNN_CV_SRC} ${SCHEMA_TARGETS}) add_library(MNNCV OBJECT ${MNN_CV_SRC})
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNCV>) list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNCV>)
list(APPEND MNN_TARGETS MNNCV) list(APPEND MNN_TARGETS MNNCV)
# Math # Math
FILE(GLOB MNN_Math_SRC ${CMAKE_CURRENT_LIST_DIR}/source/math/*.cpp ${CMAKE_CURRENT_LIST_DIR}/source/math/*.c) FILE(GLOB MNN_Math_SRC ${CMAKE_CURRENT_LIST_DIR}/source/math/*.cpp ${CMAKE_CURRENT_LIST_DIR}/source/math/*.c)
add_library(MNNMath OBJECT ${MNN_Math_SRC} ${SCHEMA_TARGETS}) add_library(MNNMath OBJECT ${MNN_Math_SRC})
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNMath>) list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNMath>)
list(APPEND MNN_TARGETS MNNMath) list(APPEND MNN_TARGETS MNNMath)
# Shape # Shape
FILE(GLOB MNN_Shape_SRC ${CMAKE_CURRENT_LIST_DIR}/source/shape/*.cpp ${CMAKE_CURRENT_LIST_DIR}/source/shape/*.c) FILE(GLOB MNN_Shape_SRC ${CMAKE_CURRENT_LIST_DIR}/source/shape/*.cpp ${CMAKE_CURRENT_LIST_DIR}/source/shape/*.c)
add_library(MNNShape OBJECT ${MNN_Shape_SRC} ${SCHEMA_TARGETS}) add_library(MNNShape OBJECT ${MNN_Shape_SRC})
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNShape>) list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNShape>)
list(APPEND MNN_TARGETS MNNShape) list(APPEND MNN_TARGETS MNNShape)
# CPU
FILE(GLOB MNN_CPU_SRC ${CMAKE_CURRENT_LIST_DIR}/source/backend/cpu/*.cpp)
add_library(MNNCPU OBJECT ${MNN_CPU_SRC} ${SCHEMA_TARGETS})
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNCPU>)
list(APPEND MNN_TARGETS MNNCPU)
# Compute # Compute
FILE(GLOB MNN_Compute_SRC ${CMAKE_CURRENT_LIST_DIR}/source/backend/cpu/compute/*.cpp) FILE(GLOB MNN_Compute_SRC ${CMAKE_CURRENT_LIST_DIR}/source/backend/cpu/compute/*.cpp)
add_library(MNNCompute OBJECT ${MNN_Compute_SRC} ${SCHEMA_TARGETS}) add_library(MNNCompute OBJECT ${MNN_Compute_SRC})
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNCompute>) list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNCompute>)
list(APPEND MNN_TARGETS MNNCompute) list(APPEND MNN_TARGETS MNNCompute)
# Include sub components # CPU
## add_subdirectory() avoids recompilation if an option was toggled FILE(GLOB MNN_CPU_SRC ${CMAKE_CURRENT_LIST_DIR}/source/backend/cpu/*.cpp)
## However due to variable scope issues, add the following two lines to the end of sub-scope CMakeLists add_library(MNNCPU OBJECT ${MNN_CPU_SRC})
### SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE) list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNCPU>)
### SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE) list(APPEND MNN_TARGETS MNNCPU)
# X86_64 AVX/SSE # X86_64 AVX/SSE
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/cpu/x86_x64/) include(${CMAKE_CURRENT_LIST_DIR}/source/backend/cpu/x86_x64/CMakeLists.txt)
# AArch32/64 Assemblies # AArch32/64 Assemblies
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/cpu/arm/) include(${CMAKE_CURRENT_LIST_DIR}/source/backend/cpu/arm/CMakeLists.txt)
# Metal
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/metal/)
# Vulkan
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/vulkan/)
# OpenCL
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/opencl/)
# OpenGL
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/opengl/)
# ARM82 Assemblies
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/arm82/)
# Express
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/express/)
IF(NOT DEFINED IOS_ARCH) IF(NOT DEFINED IOS_ARCH)
set(IOS_ARCH "") set(IOS_ARCH "")
@ -365,31 +305,87 @@ list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/Expr
list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/MathOp.hpp") list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/MathOp.hpp")
list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/NeuralNetWorkOp.hpp") list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/NeuralNetWorkOp.hpp")
list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/Optimizer.hpp") list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/Optimizer.hpp")
list(APPEND MNN_EXPR_PUB_HDRS "${CMAKE_CURRENT_SOURCE_DIR}/include/MNN/expr/Executor.hpp")
IF(MNN_BUILD_SHARED_LIBS) set(CMAKE_CXX_FLAGS_ORIGIN ${CMAKE_CXX_FLAGS})
set(CMAKE_C_FLAGS_ORIGIN ${CMAKE_C_FLAGS})
if ((NOT (MSVC OR WIN32)) AND MNN_HIDDEN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden -fvisibility=hidden")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -fno-rtti -fno-exceptions ")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math")
if (NOT APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fomit-frame-pointer")
endif()
endif()
# Metal
include(${CMAKE_CURRENT_LIST_DIR}/source/backend/metal/CMakeLists.txt)
set(MNN_DEPS "")
set(MNN_EXTRA_DEPENDS "")
list(APPEND MNN_DEPS MNN)
# Vulkan
IF(MNN_VULKAN)
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/vulkan/)
IF(MNN_SEP_BUILD) IF(MNN_SEP_BUILD)
# TODO: Find better ways to do this list(APPEND MNN_DEPS MNN_Vulkan)
IF(MNN_OPENCL) ELSE()
list(REMOVE_ITEM MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNOpenCL>) list(APPEND MNN_TARGETS MNN_Vulkan)
add_library(MNN_CL SHARED $<TARGET_OBJECTS:MNNOpenCL> ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp) list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNN_Vulkan>)
ENDIF()
IF(MNN_OPENGL)
list(REMOVE_ITEM MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNOpenGL>)
add_library(MNN_GL SHARED $<TARGET_OBJECTS:MNNOpenGL> ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp)
target_link_libraries(MNN_GL PUBLIC GLESv3 EGL)
ENDIF()
IF(MNN_VULKAN)
list(REMOVE_ITEM MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNVulkan>)
add_library(MNN_Vulkan SHARED $<TARGET_OBJECTS:MNNVulkan> ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp)
ENDIF()
IF(MNN_ARM82)
list(REMOVE_ITEM MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNARM82>)
add_library(MNN_Arm82 SHARED $<TARGET_OBJECTS:MNNARM82> ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp)
ENDIF()
list(REMOVE_ITEM MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNExpress>)
add_library(MNN_Express SHARED $<TARGET_OBJECTS:MNNExpress> ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp)
ENDIF() ENDIF()
ENDIF()
# OpenCL
IF(MNN_OPENCL)
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/opencl/)
IF(MNN_SEP_BUILD)
list(APPEND MNN_DEPS MNN_CL)
ELSE()
list(APPEND MNN_TARGETS MNN_CL)
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNN_CL>)
list(APPEND MNN_EXTRA_DEPENDS ${MNN_OCL_LIBS})
ENDIF()
ENDIF()
# OpenGL
IF(MNN_OPENGL)
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/opengl/)
IF(MNN_SEP_BUILD)
list(APPEND MNN_DEPS MNN_GL)
ELSE()
list(APPEND MNN_TARGETS MNN_GL)
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNN_GL>)
list(APPEND MNN_EXTRA_DEPENDS GLESv3)
list(APPEND MNN_EXTRA_DEPENDS EGL)
ENDIF()
ENDIF()
# ARM82 Assemblies
IF(MNN_ARM82)
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/source/backend/arm82/)
IF(MNN_SEP_BUILD)
list(APPEND MNN_DEPS MNN_Arm82)
ELSE()
list(APPEND MNN_TARGETS MNN_Arm82)
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNN_Arm82>)
ENDIF()
ENDIF()
# Express
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/express/)
IF(MNN_SEP_BUILD)
add_library(MNN SHARED ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp ${MNN_OBJECTS_TO_LINK} ${MNN_PUB_HDRS} ${MNN_EXPR_PUB_HDRS}) add_library(MNN SHARED ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp ${MNN_OBJECTS_TO_LINK} ${MNN_PUB_HDRS} ${MNN_EXPR_PUB_HDRS})
target_link_libraries(MNN PUBLIC ${MNN_EXTRA_DEPENDS})
list(APPEND MNN_DEPS MNN_Express)
ELSE()
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNExpress>)
list(APPEND MNN_TARGETS MNNExpress)
IF(MNN_BUILD_SHARED_LIBS)
add_library(MNN SHARED ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp ${MNN_OBJECTS_TO_LINK} ${MNN_PUB_HDRS} ${MNN_EXPR_PUB_HDRS})
ELSE()
add_library(MNN STATIC ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp ${MNN_OBJECTS_TO_LINK} ${MNN_PUB_HDRS} ${MNN_EXPR_PUB_HDRS})
ENDIF()
target_link_libraries(MNN PUBLIC ${MNN_EXTRA_DEPENDS})
if (WIN32) if (WIN32)
foreach(TARGET ${MNN_TARGETS}) foreach(TARGET ${MNN_TARGETS})
target_compile_definitions(${TARGET} PRIVATE "-DBUILDING_MNN_DLL") target_compile_definitions(${TARGET} PRIVATE "-DBUILDING_MNN_DLL")
@ -398,45 +394,35 @@ IF(MNN_BUILD_SHARED_LIBS)
target_compile_definitions(MNN PRIVATE "-DBUILDING_MNN_DLL") target_compile_definitions(MNN PRIVATE "-DBUILDING_MNN_DLL")
target_compile_definitions(MNN INTERFACE "-DUSING_MNN_DLL") target_compile_definitions(MNN INTERFACE "-DUSING_MNN_DLL")
endif() endif()
ELSE()
add_library(MNN STATIC ${CMAKE_CURRENT_LIST_DIR}/cmake/dummy.cpp ${MNN_OBJECTS_TO_LINK} ${MNN_PUB_HDRS} ${MNN_EXPR_PUB_HDRS})
ENDIF() ENDIF()
set_target_properties(MNN PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${CMAKE_CURRENT_LIST_DIR}/include/")
if(APPLE) if(APPLE)
IF(MNN_AAPL_FMWK) IF(MNN_AAPL_FMWK)
SET_TARGET_PROPERTIES(MNN PROPERTIES FRAMEWORK TRUE) set_target_properties(MNN PROPERTIES FRAMEWORK TRUE)
SET_TARGET_PROPERTIES(MNN PROPERTIES set_target_properties(MNN PROPERTIES
MACOSX_FRAMEWORK_IDENTIFIER com.alibaba.MNN MACOSX_FRAMEWORK_IDENTIFIER com.alibaba.MNN
MACOSX_FRAMEWORK_SHORT_VERSION_STRING ${PACKAGE_VERSION} MACOSX_FRAMEWORK_SHORT_VERSION_STRING ${PACKAGE_VERSION}
MACOSX_FRAMEWORK_BUNDLE_VERSION ${PACKAGE_VERSION} MACOSX_FRAMEWORK_BUNDLE_VERSION ${PACKAGE_VERSION}
XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "iPhone Developer" XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "iPhone Developer"
) )
SET_TARGET_PROPERTIES(MNN PROPERTIES MACOSX_FRAMEWORK_INFO_PLIST ${CMAKE_CURRENT_SOURCE_DIR}/project/ios/MNN/Info.plist) set_target_properties(MNN PROPERTIES MACOSX_FRAMEWORK_INFO_PLIST ${CMAKE_CURRENT_SOURCE_DIR}/project/ios/MNN/Info.plist)
IF(DEFINED MNN_METALLIB_PATH)
message(STATUS "Metal Library Path:${MNN_METALLIB_PATH}")
SET_TARGET_PROPERTIES(MNN PROPERTIES RESOURCE "${MNN_METALLIB_PATH}")
SET_SOURCE_FILES_PROPERTIES("${MNN_METALLIB_PATH}" PROPERTIES MACOSX_PACKAGE_LOCATION Resources/)
ENDIF()
ENDIF() ENDIF()
find_library(FOUNDATION Foundation REQUIRED)
target_link_libraries(MNN PUBLIC ${FOUNDATION})
IF(MNN_METAL) IF(MNN_METAL)
find_library(FOUNDATION Foundation REQUIRED)
target_link_libraries(MNN PUBLIC ${FOUNDATION})
find_library(METAL Metal REQUIRED) find_library(METAL Metal REQUIRED)
target_link_libraries(MNN PUBLIC ${METAL}) target_link_libraries(MNN PUBLIC ${METAL})
ENDIF() ENDIF()
endif() endif()
add_dependencies(MNN MNNCore MNNCV MNNShape MNNMath MNNCompute MNNCPU GenVCSHDR)
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/tools/converter)
if(CMAKE_SYSTEM_NAME MATCHES "^Android") if(CMAKE_SYSTEM_NAME MATCHES "^Linux")
target_link_libraries(MNN PUBLIC log android m)
elseif(UNIX AND NOT APPLE)
target_link_libraries(MNN PUBLIC pthread) target_link_libraries(MNN PUBLIC pthread)
elseif(CMAKE_SYSTEM_NAME MATCHES "^Android")
target_link_libraries(MNN PUBLIC log android m)
else() else()
endif() endif()
if(MNN_OPENGL)
target_link_libraries(MNN PUBLIC GLESv3 EGL)
endif()
if (MSVC OR WIN32) if (MSVC OR WIN32)
target_link_options(MNN PRIVATE "/IGNORE:4049,4217") target_link_options(MNN PRIVATE "/IGNORE:4049,4217")
foreach(DEPEND ${MNN_DEPEND}) foreach(DEPEND ${MNN_DEPEND})
@ -447,31 +433,6 @@ if (MSVC OR WIN32)
target_link_libraries(MNN PRIVATE ${DEPEND}) target_link_libraries(MNN PRIVATE ${DEPEND})
endforeach() endforeach()
endif() endif()
set(MNN_DEPS "")
list(APPEND MNN_DEPS MNN)
IF(MNN_BUILD_SHARED_LIBS)
IF(MNN_SEP_BUILD)
# TODO: Find better ways to do this
IF(MNN_OPENCL)
target_link_libraries(MNN_CL PRIVATE MNN)
list(APPEND MNN_DEPS MNN_CL)
ENDIF()
IF(MNN_OPENGL)
target_link_libraries(MNN_GL PRIVATE MNN)
list(APPEND MNN_DEPS MNN_GL)
ENDIF()
IF(MNN_VULKAN)
target_link_libraries(MNN_Vulkan PRIVATE MNN)
list(APPEND MNN_DEPS MNN_Vulkan)
ENDIF()
IF(MNN_ARM82)
target_link_libraries(MNN_Arm82 PRIVATE MNN)
list(APPEND MNN_DEPS MNN_Arm82)
ENDIF()
target_link_libraries(MNN_Express PRIVATE MNN)
list(APPEND MNN_DEPS MNN_Express)
ENDIF()
ENDIF()
if (NOT MNN_BUILD_SHARED_LIBS) if (NOT MNN_BUILD_SHARED_LIBS)
if(APPLE) if(APPLE)
set(MNN_DEPEND -Wl,-all_load ${MNN_DEPEND} -Wl,-noall_load) set(MNN_DEPEND -Wl,-all_load ${MNN_DEPEND} -Wl,-noall_load)
@ -479,30 +440,6 @@ if (NOT MNN_BUILD_SHARED_LIBS)
set(MNN_DEPEND -Wl,--whole-archive ${MNN_DEPEND} -Wl,--no-whole-archive) set(MNN_DEPEND -Wl,--whole-archive ${MNN_DEPEND} -Wl,--no-whole-archive)
endif() endif()
endif() endif()
# OpenCL Library
IF(MNN_OPENCL)
IF(APPLE)
find_library(OPENCL OpenCL REQUIRED)
SET(MNN_OCL_LIBS ${OPENCL})
ELSEIF(NOT ${CMAKE_SYSTEM_NAME} MATCHES "Android")
find_package(OpenCL REQUIRED)
SET(MNN_OCL_LIBS ${OpenCL_LIBRARIES})
ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "Android")
add_definitions(-DMNN_USE_OPENCL_WRAPPER)
#add_definitions(-DENABLE_OPENCL_TURNING_PROFILER)
#add_definitions(-DLOG_VERBOSE)
ENDIF()
IF(MNN_OCL_LIBS)
IF(MNN_SEP_BUILD AND MNN_BUILD_SHARED_LIBS)
target_link_libraries(MNN_CL PUBLIC ${MNN_OCL_LIBS})
ELSE()
target_link_libraries(MNN PUBLIC ${MNN_OCL_LIBS})
ENDIF()
ENDIF()
ENDIF()
# OpenMP
if (NOT APPLE) if (NOT APPLE)
if(MNN_OPENMP) if(MNN_OPENMP)
message(STATUS "[*] Checking OpenMP") message(STATUS "[*] Checking OpenMP")
@ -526,8 +463,8 @@ if (NOT APPLE)
set(OpenMP_C_FLAGS "/openmp ${OpenMP_C_FLAGS}") set(OpenMP_C_FLAGS "/openmp ${OpenMP_C_FLAGS}")
set(OpenMP_CXX_FLAGS "/openmp ${OpenMP_CXX_FLAGS}") set(OpenMP_CXX_FLAGS "/openmp ${OpenMP_CXX_FLAGS}")
endif() endif()
FOREACH(TARGET ${MNN_DEPS}) FOREACH(TARGET ${MNN_TARGETS})
target_link_libraries(${TARGET} PUBLIC ${OpenMP_CXX_LIBRARIES}) target_link_libraries(${TARGET} OpenMP::OpenMP_CXX)
IF(WIN32) IF(WIN32)
target_compile_options(${TARGET} PUBLIC /openmp ${OpenMP_CXX_FLAGS} ${OpenMP_C_FLAGS}) target_compile_options(${TARGET} PUBLIC /openmp ${OpenMP_CXX_FLAGS} ${OpenMP_C_FLAGS})
ELSE() ELSE()
@ -539,26 +476,22 @@ endif()
list(APPEND MNN_TARGETS MNN) list(APPEND MNN_TARGETS MNN)
FOREACH(TARGET ${MNN_TARGETS}) FOREACH(TARGET ${MNN_TARGETS})
add_dependencies(${TARGET} MNN_SCHEMA_GEN) add_dependencies(${TARGET} MNN_SCHEMA_GEN)
IF(NOT MSVC) IF((NOT MSVC) AND (NOT WIN32))
target_compile_options(${TARGET} PUBLIC -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -fno-rtti)
target_compile_options(${TARGET} PRIVATE -fno-exceptions)
if(MNN_HIDDEN)
target_compile_options(${TARGET} PRIVATE -fvisibility-inlines-hidden -fvisibility=hidden)
endif()
else() else()
target_compile_definitions(${TARGET} PRIVATE "_CRT_SECURE_NO_WARNINGS") target_compile_definitions(${TARGET} PRIVATE _CRT_SECURE_NO_WARNINGS)
target_compile_options(${TARGET} PRIVATE "/wd4267" "/wd4018" "/wd4251" "/wd4996" "/wd4244" "/wd4146" "/wd4129" "/wd4305") target_compile_options(${TARGET} PRIVATE "/wd4267" "/wd4018" "/wd4251" "/wd4996" "/wd4244" "/wd4146" "/wd4129" "/wd4305")
endif() endif()
ENDFOREACH() ENDFOREACH()
list(REMOVE_ITEM MNN_TARGETS MNN) list(REMOVE_ITEM MNN_TARGETS MNN)
include(${CMAKE_CURRENT_LIST_DIR}/demo/exec/CMakeLists.txt) include(${CMAKE_CURRENT_LIST_DIR}/demo/exec/CMakeLists.txt)
include(${CMAKE_CURRENT_LIST_DIR}/tools/cpp/CMakeLists.txt) include(${CMAKE_CURRENT_LIST_DIR}/tools/cpp/CMakeLists.txt)
include(${CMAKE_CURRENT_LIST_DIR}/tools/train/CMakeLists.txt) IF (MNN_BUILD_TRAIN)
add_subdirectory(tools/train)
ENDIF()
include(${CMAKE_CURRENT_LIST_DIR}/test/CMakeLists.txt) include(${CMAKE_CURRENT_LIST_DIR}/test/CMakeLists.txt)
include(${CMAKE_CURRENT_LIST_DIR}/benchmark/CMakeLists.txt) include(${CMAKE_CURRENT_LIST_DIR}/benchmark/CMakeLists.txt)
include(${CMAKE_CURRENT_LIST_DIR}/tools/quantization/CMakeLists.txt) include(${CMAKE_CURRENT_LIST_DIR}/tools/quantization/CMakeLists.txt)
include(${CMAKE_CURRENT_LIST_DIR}/tools/evaluation/CMakeLists.txt) include(${CMAKE_CURRENT_LIST_DIR}/tools/evaluation/CMakeLists.txt)
include(${CMAKE_CURRENT_LIST_DIR}/tools/converter/CMakeLists.txt)
# Install headers # Install headers
IF(CMAKE_SYSTEM_NAME MATCHES "^Android" AND NOT MNN_BUILD_FOR_ANDROID_COMMAND) IF(CMAKE_SYSTEM_NAME MATCHES "^Android" AND NOT MNN_BUILD_FOR_ANDROID_COMMAND)
@ -613,4 +546,7 @@ ELSE()
FOREACH(HDR ${MNN_PUB_HDRS}) FOREACH(HDR ${MNN_PUB_HDRS})
SET_SOURCE_FILES_PROPERTIES(${HDR} PROPERTIES MACOSX_PACKAGE_LOCATION Headers/ ) SET_SOURCE_FILES_PROPERTIES(${HDR} PROPERTIES MACOSX_PACKAGE_LOCATION Headers/ )
ENDFOREACH() ENDFOREACH()
IF(MNN_METAL)
SET_SOURCE_FILES_PROPERTIES(${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib PROPERTIES MACOSX_PACKAGE_LOCATION Resources/)
ENDIF()
ENDIF() ENDIF()

View File

@ -1,6 +1,6 @@
Pod::Spec.new do |s| Pod::Spec.new do |s|
s.name = "MNN" s.name = "MNN"
s.version = "0.2.1.7" s.version = "0.1.1"
s.summary = "MNN" s.summary = "MNN"
s.description = <<-DESC s.description = <<-DESC
@ -31,8 +31,31 @@ Pod::Spec.new do |s|
s.platform = :ios s.platform = :ios
s.ios.deployment_target = '8.0' s.ios.deployment_target = '8.0'
s.requires_arc = true s.requires_arc = true
#s.source = { :git => "git@github.com:alibaba/MNN.git", :branch => 'master' }
s.prepare_command = <<-CMD
schema/generate.sh
python source/backend/metal/MetalCodeGen.py source/backend/metal/ source/backend/metal/MetalOPRegister.mm
CMD
s.source = {:git => "/Users/zhang/Development/AliNNPrivate/",:branch=> 'head'}
s.frameworks = 'Metal', 'Accelerate' s.frameworks = 'Metal', 'Accelerate'
s.library = 'c++' s.library = 'c++'
s.source = {:http=>"https://github.com/alibaba/MNN/releases/download/#{s.version}/MNN-iOS-#{s.version}.zip"} s.source_files = \
s.vendored_frameworks = "MNN.framework" 'include/MNN/*.{h,hpp}',\
'include/MNN/expr/*.{h,hpp}',\
'schema/current/*.{h}',\
'3rd_party/flatbuffers/include/flatbuffers/*.{h}',\
'source/core/**/*.{h,c,m,mm,cc,hpp,cpp}',\
'source/cv/**/*.{h,c,m,mm,cc,hpp,cpp}',\
'source/math/**/*.{h,c,m,mm,cc,hpp,cpp,metal}',\
'source/shape/*.{h,c,m,mm,cc,hpp,cpp}',\
'source/backend/cpu/*.{h,c,m,mm,cc,S,hpp,cpp}',\
'source/backend/cpu/arm/**/*.{h,c,m,mm,cc,S,hpp,cpp}',\
'source/backend/cpu/compute/*.{h,c,m,mm,cc,S,hpp,cpp}',\
'source/backend/metal/*.{h,c,m,mm,cc,hpp,cpp,metal}',\
'express/**/*.{hpp,cpp}'
s.header_mappings_dir = 'include'
s.pod_target_xcconfig = {'METAL_LIBRARY_FILE_BASE' => 'mnn', 'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/include" "$(PODS_TARGET_SRCROOT)/3rd_party/flatbuffers/include" "$(PODS_TARGET_SRCROOT)/source" "$(PODS_TARGET_SRCROOT)/3rd_party/half"', 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) MNN_CODEGEN_REGISTER=1 MNN_SUPPORT_TFLITE_QUAN=1'}
s.user_target_xcconfig = { 'OTHER_LDFLAGS' => '-force_load $(BUILD_DIR)/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)/MNN/libMNN.a', 'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/include"' }
end end

View File

@ -1,8 +1,4 @@
set -e
schema/generate.sh
cd project/android cd project/android
rm -rf build_32
mkdir build_32 mkdir build_32
cd build_32 cd build_32
cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DANDROID_ABI="armeabi-v7a" -DANDROID_STL=c++_static -DCMAKE_BUILD_TYPE=Release -DANDROID_NATIVE_API_LEVEL=android-21 -DANDROID_TOOLCHAIN=clang -DMNN_BUILD_FOR_ANDROID_COMMAND=true -DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON -DMNN_OPENGL=ON -DMNN_OPENCL=ON ../../../ ../build_32.sh -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON
make -j8

View File

@ -1,8 +1,4 @@
set -e
schema/generate.sh
cd project/android cd project/android
rm -rf build_32
mkdir build_32 mkdir build_32
cd build_32 cd build_32
cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DANDROID_ABI="armeabi-v7a" -DANDROID_STL=c++_static -DCMAKE_BUILD_TYPE=Release -DANDROID_NATIVE_API_LEVEL=android-21 -DANDROID_TOOLCHAIN=clang -DMNN_BUILD_FOR_ANDROID_COMMAND=true -DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF -DMNN_OPENGL=ON -DMNN_OPENCL=ON ../../../ ../build_32.sh -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF
make -j8

View File

@ -1,7 +1,4 @@
set -e
schema/generate.sh
cd project/android cd project/android
mkdir build_64 mkdir build_64
cd build_64 cd build_64
cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DANDROID_ABI="arm64-v8a" -DANDROID_STL=c++_static -DCMAKE_BUILD_TYPE=Release -DANDROID_NATIVE_API_LEVEL=android-21 -DANDROID_TOOLCHAIN=clang -DMNN_BUILD_FOR_ANDROID_COMMAND=true -DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON -DMNN_OPENGL=ON -DMNN_OPENCL=ON ../../../ ../build_64.sh -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON
make -j8

View File

@ -1,8 +1,4 @@
set -e
schema/generate.sh
cd project/android cd project/android
rm -rf build_64
mkdir build_64 mkdir build_64
cd build_64 cd build_64
cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DANDROID_ABI="arm64-v8a" -DANDROID_STL=c++_static -DCMAKE_BUILD_TYPE=Release -DANDROID_NATIVE_API_LEVEL=android-21 -DANDROID_TOOLCHAIN=clang -DMNN_BUILD_FOR_ANDROID_COMMAND=true -DNATIVE_LIBRARY_OUTPUT=. -DNATIVE_INCLUDE_OUTPUT=. -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF -DMNN_OPENGL=ON -DMNN_OPENCL=ON ../../../ ../build_64.sh -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF
make -j8

View File

@ -1,5 +1,3 @@
set -e
schema/generate.sh
cd project/android/ cd project/android/
./gradlew assembleRelease ./gradlew assembleRelease
if [[ -z "${DEPLOY_ENV}" ]]; then if [[ -z "${DEPLOY_ENV}" ]]; then

View File

@ -1,5 +1,6 @@
schema/generate.sh ./schema/generate.sh
mkdir linuxbuild mkdir linuxbuild
cd linuxbuild cd linuxbuild
cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_BUILD_TEST=ON -DMNN_OPENCL=ON -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF -DMNN_BUILD_BENCHMARK=ON cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_BUILD_TEST=ON -DMNN_OPENCL=ON -DMNN_VULKAN=ON -DMNN_OPENMP=ON -DMNN_USE_THREAD_POOL=OFF -DMNN_BUILD_BENCHMARK=ON
make -j8 make -j8
make test

View File

@ -1,5 +1,6 @@
schema/generate.sh ./schema/generate.sh
mkdir linuxbuild mkdir linuxbuild
cd linuxbuild cd linuxbuild
cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_BUILD_TEST=ON -DMNN_OPENCL=ON -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON -DMNN_BUILD_BENCHMARK=ON cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_BUILD_TEST=ON -DMNN_OPENCL=ON -DMNN_VULKAN=ON -DMNN_OPENMP=OFF -DMNN_USE_THREAD_POOL=ON -DMNN_BUILD_BENCHMARK=ON
make -j8 make -j8
make test

View File

@ -1,3 +1 @@
set -e
schema/generate.sh
project/ios/buildiOS.sh project/ios/buildiOS.sh

View File

@ -1,6 +1,5 @@
set -e ./schema/generate.sh
schema/generate.sh xcodebuild -configuration Release -project project/ios/MNN.xcodeproj
xcodebuild CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO ONLY_ACTIVE_ARCH=NO -configuration Release -project project/ios/MNN.xcodeproj
find . -name ".DS_Store" -delete find . -name ".DS_Store" -delete
cd project/ios/build/Release-iphoneos/ cd project/ios/build/Release-iphoneos/
zip -r MNN.iOS.framework.zip ./ zip -r MNN.iOS.framework.zip ./

View File

@ -1,6 +1,6 @@
set -e ./schema/generate.sh
schema/generate.sh
mkdir macosbuild mkdir macosbuild
cd macosbuild cd macosbuild
cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_BUILD_TEST=ON -DMNN_BUILD_BENCHMARK=ON cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_BUILD_TEST=ON -DMNN_BUILD_BENCHMARK=ON
make -j8 make -j8
make test

View File

@ -1,6 +1,6 @@
set -e ./schema/generate.sh
schema/generate.sh
mkdir macosbuild mkdir macosbuild
cd macosbuild cd macosbuild
cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_METAL=ON -DMNN_BUILD_TEST=ON -DMNN_BUILD_BENCHMARK=ON cmake ../ -DCMAKE_BUILD_TYPE=Release -DMNN_BUILD_TRAIN=ON -DMNN_BUILD_DEMO=ON -DMNN_BUILD_QUANTOOLS=ON -DMNN_EVALUATION=ON -DMNN_BUILD_CONVERTER=ON -DMNN_SUPPORT_TFLITE_QUAN=ON -DMNN_METAL=ON -DMNN_BUILD_TEST=ON -DMNN_BUILD_BENCHMARK=ON
make -j8 make -j8
make test

View File

@ -1,4 +1,3 @@
mkdir build && cd build call "C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC/Auxiliary/Build/vcvars64.bat"
call "C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC/Auxiliary/Build/vcvars64.bat" cmake -G "Ninja" -DCMAKE_BUILD_TYPE=Release ..
cmake -G "Ninja" -DCMAKE_BUILD_TYPE=Release .. ninja
ninja

View File

@ -1,4 +1,3 @@
mkdir build && cd build call "C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC/Auxiliary/Build/vcvars32.bat"
call "C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC/Auxiliary/Build/vcvars32.bat" cmake -G "Ninja" -DCMAKE_BUILD_TYPE=Release ..
cmake -G "Ninja" -DCMAKE_BUILD_TYPE=Release .. ninja
ninja

View File

@ -1,6 +1,21 @@
cmake_minimum_required(VERSION 3.4.1) cmake_minimum_required(VERSION 3.4.1)
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../../../ ${CMAKE_CURRENT_BINARY_DIR}/MNN-build EXCLUDE_FROM_ALL) set(lib_DIR ${CMAKE_SOURCE_DIR}/libs)
include_directories(${CMAKE_SOURCE_DIR}/includes)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fopenmp")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99 -fvisibility=hidden -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -flax-vector-conversions")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -fvisibility=hidden -fvisibility-inlines-hidden -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -fno-rtti -fno-exceptions -flax-vector-conversions")
set (CMAKE_LINKER_FLAGS "${CMAKE_LINKER_FLAGS} -Wl,--gc-sections")
add_library( MNN SHARED IMPORTED )
set_target_properties(
MNN
PROPERTIES IMPORTED_LOCATION
${lib_DIR}/${ANDROID_ABI}/libMNN.so
)
file(GLOB_RECURSE CPP_SRCS src/main/jni/*.cpp ) file(GLOB_RECURSE CPP_SRCS src/main/jni/*.cpp )
add_library( mnncore SHARED ${CPP_SRCS} ) add_library( mnncore SHARED ${CPP_SRCS} )

View File

@ -0,0 +1,41 @@
//
// AutoTime.hpp
// MNN
//
// Created by MNN on 2018/07/27.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef AutoTime_hpp
#define AutoTime_hpp
#include <stdint.h>
#include <stdio.h>
#include <MNN/MNNDefine.h>
namespace MNN {
/** time tracing util. prints duration between init and deinit. */
class MNN_PUBLIC AutoTime {
public:
AutoTime(int line, const char* func);
~AutoTime();
AutoTime(const AutoTime&) = delete;
AutoTime(const AutoTime&&) = delete;
AutoTime& operator=(const AutoTime&) = delete;
AutoTime& operator=(const AutoTime&&) = delete;
private:
int mLine;
char* mName;
uint64_t mCurrentTime;
};
} // namespace MNN
#ifdef MNN_OPEN_TIME_TRACE
#define AUTOTIME MNN::AutoTime ___t(__LINE__, __func__)
#else
#define AUTOTIME
#endif
#endif /* AutoTime_hpp */

View File

@ -0,0 +1,33 @@
//
// ErrorCode.hpp
// MNN
//
// Created by MNN on 2018/09/18.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef ErrorCode_h
#define ErrorCode_h
namespace MNN {
enum ErrorCode {
#ifdef NO_ERROR
#undef NO_ERROR
#endif //NO_ERROR
NO_ERROR = 0,
OUT_OF_MEMORY = 1,
NOT_SUPPORT = 2,
COMPUTE_SIZE_ERROR = 3,
NO_EXECUTION = 4,
// User error
INPUT_DATA_ERROR = 10,
CALL_BACK_STOP = 11,
// Op Resize Error
TENSOR_NOT_SUPPORT = 20,
TENSOR_NEED_DIVIDE = 21,
};
}
#endif /* ErrorCode_h */

View File

@ -0,0 +1,307 @@
#ifndef HALIDE_HALIDERUNTIME_H
#define HALIDE_HALIDERUNTIME_H
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
// Note that you should not use "inline" along with HALIDE_ALWAYS_INLINE;
// it is not necessary, and may produce warnings for some build configurations.
#ifdef _MSC_VER
#define HALIDE_ALWAYS_INLINE __forceinline
#define HALIDE_NEVER_INLINE __declspec(noinline)
#else
#define HALIDE_ALWAYS_INLINE __attribute__((always_inline)) inline
#define HALIDE_NEVER_INLINE __attribute__((noinline))
#endif
/** \file
*
* This file declares the routines used by Halide internally in its
* runtime. On platforms that support weak linking, these can be
* replaced with user-defined versions by defining an extern "C"
* function with the same name and signature.
*
* When doing Just In Time (JIT) compilation methods on the Func being
* compiled must be called instead. The corresponding methods are
* documented below.
*
* All of these functions take a "void *user_context" parameter as their
* first argument; if the Halide kernel that calls back to any of these
* functions has been compiled with the UserContext feature set on its Target,
* then the value of that pointer passed from the code that calls the
* Halide kernel is piped through to the function.
*
* Some of these are also useful to call when using the default
* implementation. E.g. halide_shutdown_thread_pool.
*
* Note that even on platforms with weak linking, some linker setups
* may not respect the override you provide. E.g. if the override is
* in a shared library and the halide object files are linked directly
* into the output, the builtin versions of the runtime functions will
* be called. See your linker documentation for more details. On
* Linux, LD_DYNAMIC_WEAK=1 may help.
*
*/
// Forward-declare to suppress warnings if compiling as C.
struct halide_buffer_t;
/** Types in the halide type system. They can be ints, unsigned ints,
* or floats (of various bit-widths), or a handle (which is always 64-bits).
* Note that the int/uint/float values do not imply a specific bit width
* (the bit width is expected to be encoded in a separate value).
*/
typedef enum halide_type_code_t
{
halide_type_int = 0, //!< signed integers
halide_type_uint = 1, //!< unsigned integers
halide_type_float = 2, //!< floating point numbers
halide_type_handle = 3 //!< opaque pointer type (void *)
} halide_type_code_t;
// Note that while __attribute__ can go before or after the declaration,
// __declspec apparently is only allowed before.
#ifndef HALIDE_ATTRIBUTE_ALIGN
#ifdef _MSC_VER
#define HALIDE_ATTRIBUTE_ALIGN(x) __declspec(align(x))
#else
#define HALIDE_ATTRIBUTE_ALIGN(x) __attribute__((aligned(x)))
#endif
#endif
/** A runtime tag for a type in the halide type system. Can be ints,
* unsigned ints, or floats of various bit-widths (the 'bits'
* field). Can also be vectors of the same (by setting the 'lanes'
* field to something larger than one). This struct should be
* exactly 32-bits in size. */
struct halide_type_t {
/** The basic type code: signed integer, unsigned integer, or floating point. */
#if __cplusplus >= 201103L
HALIDE_ATTRIBUTE_ALIGN(1) halide_type_code_t code; // halide_type_code_t
#else
HALIDE_ATTRIBUTE_ALIGN(1) uint8_t code; // halide_type_code_t
#endif
/** The number of bits of precision of a single scalar value of this type. */
HALIDE_ATTRIBUTE_ALIGN(1) uint8_t bits;
/** How many elements in a vector. This is 1 for scalar types. */
HALIDE_ATTRIBUTE_ALIGN(2) uint16_t lanes;
#ifdef __cplusplus
/** Construct a runtime representation of a Halide type from:
* code: The fundamental type from an enum.
* bits: The bit size of one element.
* lanes: The number of vector elements in the type. */
HALIDE_ALWAYS_INLINE halide_type_t(halide_type_code_t code, uint8_t bits, uint16_t lanes = 1)
: code(code), bits(bits), lanes(lanes) {
}
/** Default constructor is required e.g. to declare halide_trace_event
* instances. */
HALIDE_ALWAYS_INLINE halide_type_t() : code((halide_type_code_t)0), bits(0), lanes(0) {}
/** Compare two types for equality. */
HALIDE_ALWAYS_INLINE bool operator==(const halide_type_t &other) const {
return (code == other.code &&
bits == other.bits &&
lanes == other.lanes);
}
HALIDE_ALWAYS_INLINE bool operator!=(const halide_type_t &other) const {
return !(*this == other);
}
/** Size in bytes for a single element, even if width is not 1, of this type. */
HALIDE_ALWAYS_INLINE int bytes() const { return (bits + 7) / 8; }
#endif
};
/** An opaque struct containing per-GPU API implementations of the
* device functions. */
struct halide_device_interface_impl_t;
/** Each GPU API provides a halide_device_interface_t struct pointing
* to the code that manages device allocations. You can access these
* functions directly from the struct member function pointers, or by
* calling the functions declared below. Note that the global
* functions are not available when using Halide as a JIT compiler.
* If you are using raw halide_buffer_t in that context you must use
* the function pointers in the device_interface struct.
*
* The function pointers below are currently the same for every GPU
* API; only the impl field varies. These top-level functions do the
* bookkeeping that is common across all GPU APIs, and then dispatch
* to more API-specific functions via another set of function pointers
* hidden inside the impl field.
*/
struct halide_device_interface_t {
int (*device_malloc)(void *user_context, struct halide_buffer_t *buf,
const struct halide_device_interface_t *device_interface);
int (*device_free)(void *user_context, struct halide_buffer_t *buf);
int (*device_sync)(void *user_context, struct halide_buffer_t *buf);
void (*device_release)(void *user_context,
const struct halide_device_interface_t *device_interface);
int (*copy_to_host)(void *user_context, struct halide_buffer_t *buf);
int (*copy_to_device)(void *user_context, struct halide_buffer_t *buf,
const struct halide_device_interface_t *device_interface);
int (*device_and_host_malloc)(void *user_context, struct halide_buffer_t *buf,
const struct halide_device_interface_t *device_interface);
int (*device_and_host_free)(void *user_context, struct halide_buffer_t *buf);
int (*buffer_copy)(void *user_context, struct halide_buffer_t *src,
const struct halide_device_interface_t *dst_device_interface, struct halide_buffer_t *dst);
int (*device_crop)(void *user_context, const struct halide_buffer_t *src,
struct halide_buffer_t *dst);
int (*device_release_crop)(void *user_context, struct halide_buffer_t *buf);
int (*wrap_native)(void *user_context, struct halide_buffer_t *buf, uint64_t handle,
const struct halide_device_interface_t *device_interface);
int (*detach_native)(void *user_context, struct halide_buffer_t *buf);
const struct halide_device_interface_impl_t *impl;
};
typedef struct halide_dimension_t {
int32_t min, extent, stride;
// Per-dimension flags. None are defined yet (This is reserved for future use).
uint32_t flags;
#ifdef __cplusplus
HALIDE_ALWAYS_INLINE halide_dimension_t() : min(0), extent(0), stride(0), flags(0) {}
HALIDE_ALWAYS_INLINE halide_dimension_t(int32_t m, int32_t e, int32_t s, uint32_t f = 0) :
min(m), extent(e), stride(s), flags(f) {}
HALIDE_ALWAYS_INLINE bool operator==(const halide_dimension_t &other) const {
return (min == other.min) &&
(extent == other.extent) &&
(stride == other.stride) &&
(flags == other.flags);
}
HALIDE_ALWAYS_INLINE bool operator!=(const halide_dimension_t &other) const {
return !(*this == other);
}
#endif
} halide_dimension_t;
#ifdef __cplusplus
} // extern "C"
#endif
typedef enum {halide_buffer_flag_host_dirty = 1,
halide_buffer_flag_device_dirty = 2} halide_buffer_flags;
/**
* The raw representation of an image passed around by generated
* Halide code. It includes some stuff to track whether the image is
* not actually in main memory, but instead on a device (like a
* GPU). For a more convenient C++ wrapper, use Halide::Buffer<T>. */
typedef struct halide_buffer_t {
/** A device-handle for e.g. GPU memory used to back this buffer. */
uint64_t device;
/** The interface used to interpret the above handle. */
const struct halide_device_interface_t *device_interface;
/** A pointer to the start of the data in main memory. In terms of
* the Halide coordinate system, this is the address of the min
* coordinates (defined below). */
uint8_t* host;
/** flags with various meanings. */
uint64_t flags;
/** The type of each buffer element. */
struct halide_type_t type;
/** The dimensionality of the buffer. */
int32_t dimensions;
/** The shape of the buffer. Halide does not own this array - you
* must manage the memory for it yourself. */
halide_dimension_t *dim;
/** Pads the buffer up to a multiple of 8 bytes */
void *padding;
} halide_buffer_t;
#ifdef __cplusplus
namespace {
template<typename T> struct check_is_pointer;
template<typename T> struct check_is_pointer<T *> {};
}
/** Construct the halide equivalent of a C type */
template<typename T>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() {
// Create a compile-time error if T is not a pointer (without
// using any includes - this code goes into the runtime).
check_is_pointer<T> check;
(void)check;
return halide_type_t(halide_type_handle, 64);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<float>() {
return halide_type_t(halide_type_float, 32);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<double>() {
return halide_type_t(halide_type_float, 64);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<bool>() {
return halide_type_t(halide_type_uint, 1);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<uint8_t>() {
return halide_type_t(halide_type_uint, 8);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<uint16_t>() {
return halide_type_t(halide_type_uint, 16);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<uint32_t>() {
return halide_type_t(halide_type_uint, 32);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<uint64_t>() {
return halide_type_t(halide_type_uint, 64);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<int8_t>() {
return halide_type_t(halide_type_int, 8);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<int16_t>() {
return halide_type_t(halide_type_int, 16);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<int32_t>() {
return halide_type_t(halide_type_int, 32);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<int64_t>() {
return halide_type_t(halide_type_int, 64);
}
#endif
#endif // HALIDE_HALIDERUNTIME_H

View File

@ -0,0 +1,126 @@
//
// ImageProcess.hpp
// MNN
//
// Created by MNN on 2018/09/19.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef ImageProcess_hpp
#define ImageProcess_hpp
#include <MNN/ErrorCode.h>
#include "Matrix.h"
#include <MNN/Tensor.hpp>
namespace MNN {
namespace CV {
enum ImageFormat {
RGBA = 0,
RGB,
BGR,
GRAY,
BGRA,
YUV_NV21 = 11,
};
enum Filter { NEAREST = 0, BILINEAR = 1, BICUBIC = 2 };
enum Wrap { CLAMP_TO_EDGE = 0, ZERO = 1, REPEAT = 2 };
/**
* handle image process for tensor.
* step:
* 1: Do transform compute and get points
* 2: Sample line and do format convert
* 3: Turn RGBA to float tensor, and do sub and normalize
*/
class MNN_PUBLIC ImageProcess {
public:
struct Inside;
struct Config {
/** data filter */
Filter filterType = NEAREST;
/** format of source data */
ImageFormat sourceFormat = RGBA;
/** format of destination data */
ImageFormat destFormat = RGBA;
// Only valid if the dest type is float
float mean[4] = {0.0f, 0.0f, 0.0f, 0.0f};
float normal[4] = {1.0f, 1.0f, 1.0f, 1.0f};
/** edge wrapper */
Wrap wrap = CLAMP_TO_EDGE;
};
public:
/**
* @brief create image process with given config for given tensor.
* @param config given config.
* @param dstTensor given tensor.
* @return image processor.
*/
static ImageProcess* create(const Config& config, const Tensor* dstTensor = nullptr);
/**
* @brief create image process with given config for given tensor.
* @param means given means
* @param meanCount given means count
* @param normals given normals
* @param normalCount given normal count
* @param sourceFormat format of source data
* @param destFormat format of destination data
* @param dstTensor given tensor.
* @return image processor.
*/
static ImageProcess* create(const ImageFormat sourceFormat = RGBA, const ImageFormat destFormat = RGBA,
const float* means = nullptr, const int meanCount = 0, const float* normals = nullptr,
const int normalCount = 0, const Tensor* dstTensor = nullptr);
~ImageProcess();
/**
* @brief get affine transform matrix.
* @return affine transform matrix.
*/
inline const Matrix& matrix() const {
return mTransform;
}
void setMatrix(const Matrix& matrix);
/**
* @brief convert source data to given tensor.
* @param source source data.
* @param iw source width.
* @param ih source height.
* @param stride number of elements per row. eg: 100 width RGB contains at least 300 elements.
* @param dest given tensor.
* @return result code.
*/
ErrorCode convert(const uint8_t* source, int iw, int ih, int stride, Tensor* dest);
/**
* @brief create tensor with given data.
* @param w image width.
* @param h image height.
* @param bpp bytes per pixel.
* @param p pixel data pointer.
* @return created tensor.
*/
template <typename T>
static Tensor* createImageTensor(int w, int h, int bpp, void* p = nullptr) {
return createImageTensor(halide_type_of<T>(), w, h, bpp, p);
}
static Tensor* createImageTensor(halide_type_t type, int w, int h, int bpp, void* p = nullptr);
private:
ImageProcess(const Config& config);
Matrix mTransform;
Matrix mTransformInvert;
Inside* mInside;
};
} // namespace CV
} // namespace MNN
#endif /* ImageProcess_hpp */

View File

@ -0,0 +1,264 @@
//
// Interpreter.hpp
// MNN
//
// Created by MNN on 2018/07/23.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef Interpreter_hpp
#define Interpreter_hpp
#include <functional>
#include <map>
#include <string>
#include <MNN/ErrorCode.h>
#include <MNN/MNNForwardType.h>
#include <MNN/Tensor.hpp>
namespace MNN {
/** session schedule config */
struct ScheduleConfig {
/** which tensor should be kept */
std::vector<std::string> saveTensors;
/** forward type */
MNNForwardType type = MNN_FORWARD_CPU;
/** number of threads in parallel */
int numThread = 4;
/** subpath to run */
struct Path {
std::vector<std::string> inputs;
std::vector<std::string> outputs;
enum Mode {
/**
* Op Mode
* - inputs means the source op, can NOT be empty.
* - outputs means the sink op, can be empty.
* The path will start from source op, then flow when encounter the sink op.
* The sink op will not be compute in this path.
*/
Op = 0,
/**
* Tensor Mode (NOT supported yet)
* - inputs means the inputs tensors, can NOT be empty.
* - outputs means the outputs tensors, can NOT be empty.
* It will find the pipeline that compute outputs from inputs.
*/
Tensor = 1
};
/** running mode */
Mode mode = Op;
};
Path path;
/** backup backend used to create execution when desinated backend do NOT support any op */
MNNForwardType backupType = MNN_FORWARD_CPU;
/** extra backend config */
BackendConfig* backendConfig = nullptr;
};
class Session;
struct Content;
class Tensor;
class Backend;
class MNN_PUBLIC OperatorInfo {
struct Info;
public:
/** Operator's name*/
const std::string& name() const;
/** Operator's type*/
const std::string& type() const;
/** Operator's flops, in M*/
float flops() const;
protected:
OperatorInfo();
~OperatorInfo();
Info* mContent;
};
typedef std::function<bool(const std::vector<Tensor*>&, const std::string& /*opName*/)> TensorCallBack;
typedef std::function<bool(const std::vector<Tensor*>&, const OperatorInfo*)> TensorCallBackWithInfo;
/** net data holder. multiple sessions could share same net. */
class MNN_PUBLIC Interpreter {
public:
/**
* @brief create net from file.
* @param file given file.
* @return created net if success, NULL otherwise.
*/
static Interpreter* createFromFile(const char* file);
/**
* @brief create net from buffer.
* @param buffer given data buffer.
* @param size size of data buffer.
* @return created net if success, NULL otherwise.
*/
static Interpreter* createFromBuffer(const void* buffer, size_t size);
~Interpreter();
public:
/**
* @brief create session with schedule config. created session will be managed in net.
* @param config session schedule config.
* @return created session if success, NULL otherwise.
*/
Session* createSession(const ScheduleConfig& config);
/**
* @brief create multi-path session with schedule configs. created session will be managed in net.
* @param configs session schedule configs.
* @return created session if success, NULL otherwise.
*/
Session* createMultiPathSession(const std::vector<ScheduleConfig>& configs);
/**
* @brief release session.
* @param session given session.
* @return true if given session is held by net and is freed.
*/
bool releaseSession(Session* session);
/**
* @brief call this function to get tensors ready. output tensor buffer (host or deviceId) should be retrieved
* after resize of any input tensor.
* @param session given session.
*/
void resizeSession(Session* session);
/**
* @brief call this function if don't need resize or create session any more, it will save a few memory that equal
* to the size of model buffer
*/
void releaseModel();
/**
* @brief Get the model buffer for user to save
* @return std::make_pair(modleBuffer, modelSize).
* @example:
* std::ofstream output("trainResult.alinn")
* auto buffer = net->getModelBuffer();
* output.write((const char*)buffer.first, buffer.second);
*/
std::pair<const void*, size_t> getModelBuffer() const;
/**
* @brief update Session's Tensor to model's Const Op
* @param session given session.
* @return result of running.
*/
ErrorCode updateSessionToModel(Session* session);
/**
* @brief run session.
* @param session given session.
* @return result of running.
*/
ErrorCode runSession(Session* session) const;
/*
* @brief run session.
* @param session given session.
* @param before callback before each op. return true to run the op; return false to skip the op.
* @param after callback after each op. return true to continue running; return false to interrupt the session.
* @param sync synchronously wait for finish of execution or not.
* @return result of running.
*/
ErrorCode runSessionWithCallBack(const Session* session, const TensorCallBack& before, const TensorCallBack& end,
bool sync = false) const;
/*
* @brief run session.
* @param session given session.
* @param before callback before each op. return true to run the op; return false to skip the op.
* @param after callback after each op. return true to continue running; return false to interrupt the session.
* @param sync synchronously wait for finish of execution or not.
* @return result of running.
*/
ErrorCode runSessionWithCallBackInfo(const Session* session, const TensorCallBackWithInfo& before,
const TensorCallBackWithInfo& end, bool sync = false) const;
/**
* @brief get input tensor for given name.
* @param session given session.
* @param name given name. if NULL, return first input.
* @return tensor if found, NULL otherwise.
*/
Tensor* getSessionInput(const Session* session, const char* name);
/**
* @brief get output tensor for given name.
* @param session given session.
* @param name given name. if NULL, return first output.
* @return tensor if found, NULL otherwise.
*/
Tensor* getSessionOutput(const Session* session, const char* name);
/**
* @brief get all input tensors.
* @param session given session.
* @return all input tensors mapped with name.
*/
const std::map<std::string, Tensor*>& getSessionOutputAll(const Session* session) const;
/**
* @brief get all output tensors.
* @param session given session.
* @return all output tensors mapped with name.
*/
const std::map<std::string, Tensor*>& getSessionInputAll(const Session* session) const;
public:
/**
* @brief resize given tensor.
* @param tensor given tensor.
* @param dims new dims. at most 6 dims.
*/
void resizeTensor(Tensor* tensor, const std::vector<int>& dims);
/**
* @brief resize given tensor by nchw.
* @param batch / N.
* @param channel / C.
* @param height / H.
* @param width / W
*/
void resizeTensor(Tensor* tensor, int batch, int channel, int height, int width);
/**
* @brief get backend used to create given tensor.
* @param session given session.
* @param tensor given tensor.
* @return backend used to create given tensor, may be NULL.
*/
const Backend* getBackend(const Session* session, const Tensor* tensor) const;
/**
* @brief get business code (model identifier).
* @return business code.
*/
const char* bizCode() const;
private:
static Interpreter* createFromBufferInternal(Content* net);
Content* mNet = nullptr;
Interpreter(Content* net);
Interpreter(const Interpreter&) = delete;
Interpreter(const Interpreter&&) = delete;
Interpreter& operator=(const Interpreter&) = delete;
Interpreter& operator=(const Interpreter&&) = delete;
};
} // namespace MNN
#endif /* Interpreter_hpp */

View File

@ -0,0 +1,63 @@
//
// MNNDefine.h
// MNN
//
// Created by MNN on 2018/08/09.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNNDefine_h
#define MNNDefine_h
#include <assert.h>
#include <stdio.h>
#if defined(__APPLE__)
#include "TargetConditionals.h"
#if TARGET_OS_IPHONE
#define MNN_BUILD_FOR_IOS
#endif
#endif
#ifdef MNN_USE_LOGCAT
#include <android/log.h>
#define MNN_ERROR(format, ...) __android_log_print(ANDROID_LOG_ERROR, "MNNJNI", format, ##__VA_ARGS__)
#define MNN_PRINT(format, ...) __android_log_print(ANDROID_LOG_INFO, "MNNJNI", format, ##__VA_ARGS__)
#else
#define MNN_PRINT(format, ...) printf(format, ##__VA_ARGS__)
#define MNN_ERROR(format, ...) printf(format, ##__VA_ARGS__)
#endif
#ifdef DEBUG
#define MNN_ASSERT(x) \
{ \
int res = (x); \
if (!res) { \
MNN_ERROR("Error for %s, %d\n", __FILE__, __LINE__); \
assert(res); \
} \
}
#else
#define MNN_ASSERT(x) \
{ \
int res = (x); \
if (!res) { \
MNN_ERROR("Error for %s, %d\n", __FILE__, __LINE__); \
} \
}
#endif
#define FUNC_PRINT(x) MNN_PRINT(#x "=%d in %s, %d \n", x, __func__, __LINE__);
#define FUNC_PRINT_ALL(x, type) MNN_PRINT(#x "=" #type " %" #type " in %s, %d \n", x, __func__, __LINE__);
#if defined(_MSC_VER)
#ifdef BUILDING_DLL
#define MNN_PUBLIC __declspec(dllexport)
#else
#define MNN_PUBLIC __declspec(dllimport)
#endif
#else
#define MNN_PUBLIC __attribute__((visibility("default")))
#endif
#endif /* MNNDefine_h */

View File

@ -0,0 +1,75 @@
//
// MNNForwardType.h
// MNN
//
// Created by MNN on 2019/01/19.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNNForwardType_h
#define MNNForwardType_h
typedef enum {
MNN_FORWARD_CPU = 0,
/*
Firtly find the first available backends not equal to CPU
If no other backends, use cpu
*/
MNN_FORWARD_AUTO = 4,
/*Hand write metal*/
MNN_FORWARD_METAL = 1,
/*Use IOS's MPS instead of hand-write metal, Not Support yet*/
MNN_FORWARD_MPS = 2,
/*Android / Common Device GPU API*/
MNN_FORWARD_OPENCL = 3,
MNN_FORWARD_OPENGL = 6,
MNN_FORWARD_VULKAN = 7,
/*Android 8.1's NNAPI, Not Support yet*/
MNN_FORWARD_NN = 5,
/*User can use API from Backend.hpp to add or search Backend*/
MNN_FORWARD_USER_0 = 8,
MNN_FORWARD_USER_1 = 9,
MNN_FORWARD_USER_2 = 10,
MNN_FORWARD_USER_3 = 11,
MNN_FORWARD_ALL
} MNNForwardType;
#ifdef __cplusplus
namespace MNN {
struct BackendConfig {
enum MemoryMode {
Memory_Normal = 0,
Memory_High,
Memory_Low
};
MemoryMode memory = Memory_Normal;
enum PowerMode {
Power_Normal = 0,
Power_High,
Power_Low
};
PowerMode power = Power_Normal;
enum PrecisionMode {
Precision_Normal = 0,
Precision_High,
Precision_Low
};
PrecisionMode precision = Precision_Normal;
/** user defined context */
void* sharedContext = nullptr;
};
};
#endif
#endif /* MNNForwardType_h */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,580 @@
//
// Rect.h
// MNN
//
// Modified by jiangxiaotang on 2018/09/19.
// Copyright © 2018, Alibaba Group Holding Limited
//
/*
* Copyright 2006 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/* Generated by tools/bookmaker from include/core/Rect.h and docs/SkRect_Reference.bmh
on 2018-07-13 08:15:11. Additional documentation and examples can be found at:
https://skia.org/user/api/SkRect_Reference
You may edit either file directly. Structural changes to public interfaces require
editing both files. After editing docs/SkRect_Reference.bmh, run:
bookmaker -b docs -i include/core/Rect.h -p
to create an updated version of this file.
*/
#ifndef SkRect_DEFINED
#define SkRect_DEFINED
#include <math.h>
#include <algorithm>
#include <utility>
#include <MNN/MNNDefine.h>
namespace MNN {
namespace CV {
struct Point {
float fX;
float fY;
void set(float x, float y) {
fX = x;
fY = y;
}
};
/** \struct Rect
Rect holds four float coordinates describing the upper and
lower bounds of a rectangle. Rect may be created from outer bounds or
from position, width, and height. Rect describes an area; if its right
is less than or equal to its left, or if its bottom is less than or equal to
its top, it is considered empty.
*/
struct MNN_PUBLIC Rect {
float fLeft; //!< smaller x-axis bounds
float fTop; //!< smaller y-axis bounds
float fRight; //!< larger x-axis bounds
float fBottom; //!< larger y-axis bounds
/** Returns constructed Rect set to (0, 0, 0, 0).
Many other rectangles are empty; if left is equal to or greater than right,
or if top is equal to or greater than bottom. Setting all members to zero
is a convenience, but does not designate a special empty rectangle.
@return bounds (0, 0, 0, 0)
*/
static constexpr Rect MakeEmpty() {
return Rect{0, 0, 0, 0};
}
#ifdef SK_SUPPORT_LEGACY_RECTMAKELARGEST
/** Deprecated.
*/
static Rect MakeLargest() {
return {SK_ScalarMin, SK_ScalarMin, SK_ScalarMax, SK_ScalarMax};
}
#endif
/** Returns constructed Rect set to float values (0, 0, w, h). Does not
validate input; w or h may be negative.
Passing integer values may generate a compiler warning since Rect cannot
represent 32-bit integers exactly. Use SkIRect for an exact integer rectangle.
@param w float width of constructed Rect
@param h float height of constructed Rect
@return bounds (0, 0, w, h)
*/
static constexpr Rect MakeWH(float w, float h) {
return Rect{0, 0, w, h};
}
/** Returns constructed Rect set to integer values (0, 0, w, h). Does not validate
input; w or h may be negative.
Use to avoid a compiler warning that input may lose precision when stored.
Use SkIRect for an exact integer rectangle.
@param w integer width of constructed Rect
@param h integer height of constructed Rect
@return bounds (0, 0, w, h)
*/
static Rect MakeIWH(int w, int h) {
Rect r;
r.set(0, 0, (float)(w), (float)(h));
return r;
}
/** Returns constructed Rect set to (l, t, r, b). Does not sort input; Rect may
result in fLeft greater than fRight, or fTop greater than fBottom.
@param l float stored in fLeft
@param t float stored in fTop
@param r float stored in fRight
@param b float stored in fBottom
@return bounds (l, t, r, b)
*/
static constexpr Rect MakeLTRB(float l, float t, float r, float b) {
return Rect{l, t, r, b};
}
/** Returns constructed Rect set to (x, y, x + w, y + h). Does not validate input;
w or h may be negative.
@param x stored in fLeft
@param y stored in fTop
@param w added to x and stored in fRight
@param h added to y and stored in fBottom
@return bounds at (x, y) with width w and height h
*/
static constexpr Rect MakeXYWH(float x, float y, float w, float h) {
return Rect{x, y, x + w, y + h};
}
/** Returns true if fLeft is equal to or greater than fRight, or if fTop is equal
to or greater than fBottom. Call sort() to reverse rectangles with negative
width() or height().
@return true if width() or height() are zero or negative
*/
bool isEmpty() const {
// We write it as the NOT of a non-empty rect, so we will return true if any values
// are NaN.
return !(fLeft < fRight && fTop < fBottom);
}
/** Returns true if fLeft is equal to or less than fRight, or if fTop is equal
to or less than fBottom. Call sort() to reverse rectangles with negative
width() or height().
@return true if width() or height() are zero or positive
*/
bool isSorted() const {
return fLeft <= fRight && fTop <= fBottom;
}
/** Returns left edge of Rect, if sorted. Call isSorted() to see if Rect is valid.
Call sort() to reverse fLeft and fRight if needed.
@return fLeft
*/
float x() const {
return fLeft;
}
/** Returns top edge of Rect, if sorted. Call isEmpty() to see if Rect may be invalid,
and sort() to reverse fTop and fBottom if needed.
@return fTop
*/
float y() const {
return fTop;
}
/** Returns left edge of Rect, if sorted. Call isSorted() to see if Rect is valid.
Call sort() to reverse fLeft and fRight if needed.
@return fLeft
*/
float left() const {
return fLeft;
}
/** Returns top edge of Rect, if sorted. Call isEmpty() to see if Rect may be invalid,
and sort() to reverse fTop and fBottom if needed.
@return fTop
*/
float top() const {
return fTop;
}
/** Returns right edge of Rect, if sorted. Call isSorted() to see if Rect is valid.
Call sort() to reverse fLeft and fRight if needed.
@return fRight
*/
float right() const {
return fRight;
}
/** Returns bottom edge of Rect, if sorted. Call isEmpty() to see if Rect may be invalid,
and sort() to reverse fTop and fBottom if needed.
@return fBottom
*/
float bottom() const {
return fBottom;
}
/** Returns span on the x-axis. This does not check if Rect is sorted, or if
result fits in 32-bit float; result may be negative or infinity.
@return fRight minus fLeft
*/
float width() const {
return fRight - fLeft;
}
/** Returns span on the y-axis. This does not check if Rect is sorted, or if
result fits in 32-bit float; result may be negative or infinity.
@return fBottom minus fTop
*/
float height() const {
return fBottom - fTop;
}
/** Returns average of left edge and right edge. Result does not change if Rect
is sorted. Result may overflow to infinity if Rect is far from the origin.
@return midpoint in x
*/
float centerX() const {
// don't use floatHalf(fLeft + fBottom) as that might overflow before the 0.5
return 0.5f * (fLeft) + 0.5f * (fRight);
}
/** Returns average of top edge and bottom edge. Result does not change if Rect
is sorted.
@return midpoint in y
*/
float centerY() const {
// don't use floatHalf(fTop + fBottom) as that might overflow before the 0.5
return 0.5f * (fTop) + 0.5f * (fBottom);
}
/** Sets Rect to (0, 0, 0, 0).
Many other rectangles are empty; if left is equal to or greater than right,
or if top is equal to or greater than bottom. Setting all members to zero
is a convenience, but does not designate a special empty rectangle.
*/
void setEmpty() {
*this = MakeEmpty();
}
/** Sets Rect to (left, top, right, bottom).
left and right are not sorted; left is not necessarily less than right.
top and bottom are not sorted; top is not necessarily less than bottom.
@param left stored in fLeft
@param top stored in fTop
@param right stored in fRight
@param bottom stored in fBottom
*/
void set(float left, float top, float right, float bottom) {
fLeft = left;
fTop = top;
fRight = right;
fBottom = bottom;
}
/** Sets Rect to (left, top, right, bottom).
left and right are not sorted; left is not necessarily less than right.
top and bottom are not sorted; top is not necessarily less than bottom.
@param left stored in fLeft
@param top stored in fTop
@param right stored in fRight
@param bottom stored in fBottom
*/
void setLTRB(float left, float top, float right, float bottom) {
this->set(left, top, right, bottom);
}
/** Sets Rect to (left, top, right, bottom).
All parameters are promoted from integer to scalar.
left and right are not sorted; left is not necessarily less than right.
top and bottom are not sorted; top is not necessarily less than bottom.
@param left promoted to float and stored in fLeft
@param top promoted to float and stored in fTop
@param right promoted to float and stored in fRight
@param bottom promoted to float and stored in fBottom
*/
void iset(int left, int top, int right, int bottom) {
fLeft = (float)(left);
fTop = (float)(top);
fRight = (float)(right);
fBottom = (float)(bottom);
}
/** Sets Rect to (0, 0, width, height).
width and height may be zero or negative. width and height are promoted from
integer to float, large values may lose precision.
@param width promoted to float and stored in fRight
@param height promoted to float and stored in fBottom
*/
void isetWH(int width, int height) {
fLeft = fTop = 0;
fRight = (float)(width);
fBottom = (float)(height);
}
/** Sets Rect to (x, y, x + width, y + height). Does not validate input;
width or height may be negative.
@param x stored in fLeft
@param y stored in fTop
@param width added to x and stored in fRight
@param height added to y and stored in fBottom
*/
void setXYWH(float x, float y, float width, float height) {
fLeft = x;
fTop = y;
fRight = x + width;
fBottom = y + height;
}
/** Sets Rect to (0, 0, width, height). Does not validate input;
width or height may be negative.
@param width stored in fRight
@param height stored in fBottom
*/
void setWH(float width, float height) {
fLeft = 0;
fTop = 0;
fRight = width;
fBottom = height;
}
/** Returns Rect offset by (dx, dy).
If dx is negative, Rect returned is moved to the left.
If dx is positive, Rect returned is moved to the right.
If dy is negative, Rect returned is moved upward.
If dy is positive, Rect returned is moved downward.
@param dx added to fLeft and fRight
@param dy added to fTop and fBottom
@return Rect offset on axes, with original width and height
*/
Rect makeOffset(float dx, float dy) const {
return MakeLTRB(fLeft + dx, fTop + dy, fRight + dx, fBottom + dy);
}
/** Returns Rect, inset by (dx, dy).
If dx is negative, Rect returned is wider.
If dx is positive, Rect returned is narrower.
If dy is negative, Rect returned is taller.
If dy is positive, Rect returned is shorter.
@param dx added to fLeft and subtracted from fRight
@param dy added to fTop and subtracted from fBottom
@return Rect inset symmetrically left and right, top and bottom
*/
Rect makeInset(float dx, float dy) const {
return MakeLTRB(fLeft + dx, fTop + dy, fRight - dx, fBottom - dy);
}
/** Returns Rect, outset by (dx, dy).
If dx is negative, Rect returned is narrower.
If dx is positive, Rect returned is wider.
If dy is negative, Rect returned is shorter.
If dy is positive, Rect returned is taller.
@param dx subtracted to fLeft and added from fRight
@param dy subtracted to fTop and added from fBottom
@return Rect outset symmetrically left and right, top and bottom
*/
Rect makeOutset(float dx, float dy) const {
return MakeLTRB(fLeft - dx, fTop - dy, fRight + dx, fBottom + dy);
}
/** Offsets Rect by adding dx to fLeft, fRight; and by adding dy to fTop, fBottom.
If dx is negative, moves Rect to the left.
If dx is positive, moves Rect to the right.
If dy is negative, moves Rect upward.
If dy is positive, moves Rect downward.
@param dx offset added to fLeft and fRight
@param dy offset added to fTop and fBottom
*/
void offset(float dx, float dy) {
fLeft += dx;
fTop += dy;
fRight += dx;
fBottom += dy;
}
/** Offsets Rect so that fLeft equals newX, and fTop equals newY. width and height
are unchanged.
@param newX stored in fLeft, preserving width()
@param newY stored in fTop, preserving height()
*/
void offsetTo(float newX, float newY) {
fRight += newX - fLeft;
fBottom += newY - fTop;
fLeft = newX;
fTop = newY;
}
/** Insets Rect by (dx, dy).
If dx is positive, makes Rect narrower.
If dx is negative, makes Rect wider.
If dy is positive, makes Rect shorter.
If dy is negative, makes Rect taller.
@param dx added to fLeft and subtracted from fRight
@param dy added to fTop and subtracted from fBottom
*/
void inset(float dx, float dy) {
fLeft += dx;
fTop += dy;
fRight -= dx;
fBottom -= dy;
}
/** Outsets Rect by (dx, dy).
If dx is positive, makes Rect wider.
If dx is negative, makes Rect narrower.
If dy is positive, makes Rect taller.
If dy is negative, makes Rect shorter.
@param dx subtracted to fLeft and added from fRight
@param dy subtracted to fTop and added from fBottom
*/
void outset(float dx, float dy) {
this->inset(-dx, -dy);
}
private:
static bool Intersects(float al, float at, float ar, float ab, float bl, float bt, float br, float bb) {
float L = std::max(al, bl);
float R = std::min(ar, br);
float T = std::max(at, bt);
float B = std::min(ab, bb);
return L < R && T < B;
}
public:
/** Constructs Rect to intersect from (left, top, right, bottom). Does not sort
construction.
Returns true if Rect intersects construction.
Returns false if either construction or Rect is empty, or do not intersect.
@param left x-axis minimum of constructed Rect
@param top y-axis minimum of constructed Rect
@param right x-axis maximum of constructed Rect
@param bottom y-axis maximum of constructed Rect
@return true if construction and Rect have area in common
*/
bool intersects(float left, float top, float right, float bottom) const {
return Intersects(fLeft, fTop, fRight, fBottom, left, top, right, bottom);
}
/** Returns true if Rect intersects r.
Returns false if either r or Rect is empty, or do not intersect.
@param r Rect to intersect
@return true if r and Rect have area in common
*/
bool intersects(const Rect& r) const {
return Intersects(fLeft, fTop, fRight, fBottom, r.fLeft, r.fTop, r.fRight, r.fBottom);
}
/** Returns true if a intersects b.
Returns false if either a or b is empty, or do not intersect.
@param a Rect to intersect
@param b Rect to intersect
@return true if a and b have area in common
*/
static bool Intersects(const Rect& a, const Rect& b) {
return Intersects(a.fLeft, a.fTop, a.fRight, a.fBottom, b.fLeft, b.fTop, b.fRight, b.fBottom);
}
/** Sets Rect to the union of itself and r.
Asserts if r is empty and SK_DEBUG is defined.
If Rect is empty, sets Rect to r.
May produce incorrect results if r is empty.
@param r expansion Rect
*/
void joinNonEmptyArg(const Rect& r) {
MNN_ASSERT(!r.isEmpty());
// if we are empty, just assign
if (fLeft >= fRight || fTop >= fBottom) {
*this = r;
} else {
this->joinPossiblyEmptyRect(r);
}
}
/** Sets Rect to the union of itself and the construction.
May produce incorrect results if Rect or r is empty.
@param r expansion Rect
*/
void joinPossiblyEmptyRect(const Rect& r) {
fLeft = std::min(fLeft, r.left());
fTop = std::min(fTop, r.top());
fRight = std::max(fRight, r.right());
fBottom = std::max(fBottom, r.bottom());
}
/** Returns true if: fLeft <= x < fRight && fTop <= y < fBottom.
Returns false if Rect is empty.
@param x test Point x-coordinate
@param y test Point y-coordinate
@return true if (x, y) is inside Rect
*/
bool contains(float x, float y) const {
return x >= fLeft && x < fRight && y >= fTop && y < fBottom;
}
/** Swaps fLeft and fRight if fLeft is greater than fRight; and swaps
fTop and fBottom if fTop is greater than fBottom. Result may be empty;
and width() and height() will be zero or positive.
*/
void sort() {
using std::swap;
if (fLeft > fRight) {
swap(fLeft, fRight);
}
if (fTop > fBottom) {
swap(fTop, fBottom);
}
}
/** Returns Rect with fLeft and fRight swapped if fLeft is greater than fRight; and
with fTop and fBottom swapped if fTop is greater than fBottom. Result may be empty;
and width() and height() will be zero or positive.
@return sorted Rect
*/
Rect makeSorted() const {
return MakeLTRB(std::min(fLeft, fRight), std::min(fTop, fBottom), std::max(fLeft, fRight),
std::max(fTop, fBottom));
}
/** Returns pointer to first scalar in Rect, to treat it as an array with four
entries.
@return pointer to fLeft
*/
const float* asScalars() const {
return &fLeft;
}
};
} // namespace CV
} // namespace MNN
#endif

View File

@ -0,0 +1,282 @@
//
// Tensor.hpp
// MNN
//
// Created by MNN on 2018/08/14.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef Tensor_hpp
#define Tensor_hpp
#include <vector>
#include "HalideRuntime.h"
#include <MNN/MNNDefine.h>
namespace MNN {
/**
* data container.
* data for host tensor is saved in `host` field. its memory is allocated malloc directly.
* data for device tensor is saved in `deviceId` field. its memory is allocated by session's backend.
* usually, device tensors are created by engine (like net, session).
* meanwhile, host tensors could be created by engine or user.
*/
class MNN_PUBLIC Tensor {
public:
struct InsideDescribe;
/** dimension type used to create tensor */
enum DimensionType {
/** for tensorflow net type. uses NHWC as data format. */
TENSORFLOW,
/** for caffe net type. uses NCHW as data format. */
CAFFE,
/** for caffe net type. uses NC4HW4 as data format. */
CAFFE_C4
};
/** handle type */
enum HandleDataType {
/** default handle type */
HANDLE_NONE = 0,
/** string handle type */
HANDLE_STRING = 1
};
/** dimension reorder flag */
enum DataReorderType {
/** default reorder type, do not reorder */
NO_REORDER = 0,
/** reorder dimension 4 by 4. usually used with NC4HW4 or NHWC4 while data type is float. */
REORDER_4 = 1,
/** reorder dimension 8 by 8. usually used with NC4HW4 or NHWC4 while data type is uint8 or int8. */
REORDER_8
};
public:
/**
* @brief create a tensor with dimension size and type without acquire memory for data.
* @param dimSize dimension size.
* @param type dimension type.
*/
Tensor(int dimSize = 4, DimensionType type = CAFFE);
/**
* @brief create a tensor with same shape as given tensor.
* @param tensor shape provider.
* @param type dimension type.
* @param allocMemory acquire memory for data or not.
* @warning tensor data won't be copied.
*/
Tensor(const Tensor* tensor, DimensionType type = CAFFE, bool allocMemory = true);
/** deinitializer */
~Tensor();
private:
// remove all assignment operator
Tensor(const Tensor& tensor) = delete;
Tensor(const Tensor&& tensor) = delete;
Tensor& operator=(const Tensor&) = delete;
Tensor& operator=(const Tensor&&) = delete;
public:
/**
* @brief create tensor with shape, data type and dimension type.
* @param shape tensor shape.
* @param type data type.
* @param dimType dimension type.
* @return created tensor.
* @warning memory for data won't be acquired. call backend's onAcquireBuffer to get memory ready.
*/
static Tensor* createDevice(const std::vector<int>& shape, halide_type_t type, DimensionType dimType = TENSORFLOW);
/**
* @brief create tensor with shape and dimension type. data type is represented by `T`.
* @param shape tensor shape.
* @param dimType dimension type.
* @return created tensor.
* @warning memory for data won't be acquired. call backend's onAcquireBuffer to get memory ready.
*/
template <typename T>
static Tensor* createDevice(const std::vector<int>& shape, DimensionType dimType = TENSORFLOW) {
return createDevice(shape, halide_type_of<T>(), dimType);
}
/**
* @brief create tensor with shape, data type, data and dimension type.
* @param shape tensor shape.
* @param type data type.
* @param data data to save.
* @param dimType dimension type.
* @return created tensor.
*/
static Tensor* create(const std::vector<int>& shape, halide_type_t type, void* data = NULL,
DimensionType dimType = TENSORFLOW);
/**
* @brief create tensor with shape, data and dimension type. data type is represented by `T`.
* @param shape tensor shape.
* @param data data to save.
* @param dimType dimension type.
* @return created tensor.
*/
template <typename T>
static Tensor* create(const std::vector<int>& shape, void* data = NULL, DimensionType dimType = TENSORFLOW) {
return create(shape, halide_type_of<T>(), data, dimType);
}
public:
/**
* @brief for DEVICE tensor, copy data from given host tensor.
* @param hostTensor host tensor, the data provider.
* @return true for DEVICE tensor, and false for HOST tensor.
*/
bool copyFromHostTensor(const Tensor* hostTensor);
/**
* @brief for DEVICE tensor, copy data to given host tensor.
* @param hostTensor host tensor, the data consumer.
* @return true for DEVICE tensor, and false for HOST tensor.
*/
bool copyToHostTensor(Tensor* hostTensor) const;
/**
* @brief create HOST tensor from DEVICE tensor, with or without data copying.
* @param deviceTensor given device tensor.
* @param copyData copy data or not.
* @return created host tensor.
*/
static Tensor* createHostTensorFromDevice(const Tensor* deviceTensor, bool copyData = true);
public:
const halide_buffer_t& buffer() const {
return mBuffer;
}
halide_buffer_t& buffer() {
return mBuffer;
}
/**
* @brief get dimension type.
* @return dimension type.
*/
DimensionType getDimensionType() const;
/**
* @brief handle data type. used when data type code is halide_type_handle.
* @return handle data type.
*/
HandleDataType getHandleDataType() const;
/**
* @brief set data type.
* @param type data type defined in 'Type_generated.h'.
*/
void setType(int type);
/**
* @brief get data type.
* @return data type.
*/
inline halide_type_t getType() const {
return mBuffer.type;
}
/**
* @brief visit host memory, data type is represented by `T`.
* @return data point in `T` type.
*/
template <typename T>
T* host() const {
return (T*)mBuffer.host;
}
/**
* @brief visit device memory.
* @return device data ID. what the ID means varies between backends.
*/
uint64_t deviceId() const {
return mBuffer.device;
}
public:
int dimensions() const {
return mBuffer.dimensions;
}
/**
* @brief get all dimensions' extent.
* @return dimensions' extent.
*/
std::vector<int> shape() const;
/**
* @brief calculate number of bytes needed to store data taking reordering flag into account.
* @return bytes needed to store data
*/
int size() const;
/**
* @brief calculate number of elements needed to store data taking reordering flag into account.
* @return elements needed to store data
*/
inline int elementSize() const {
return size() / mBuffer.type.bytes();
}
public:
inline int width() const {
if (getDimensionType() == TENSORFLOW) {
return mBuffer.dim[2].extent;
}
return mBuffer.dim[3].extent;
}
inline int height() const {
if (getDimensionType() == TENSORFLOW) {
return mBuffer.dim[1].extent;
}
return mBuffer.dim[2].extent;
}
inline int channel() const {
if (getDimensionType() == TENSORFLOW) {
return mBuffer.dim[3].extent;
}
return mBuffer.dim[1].extent;
}
inline int batch() const {
return mBuffer.dim[0].extent;
}
// visit dimension's extent & stride
inline int stride(int index) const {
return mBuffer.dim[index].stride;
}
inline int length(int index) const {
return mBuffer.dim[index].extent;
}
inline void setStride(int index, int stride) {
mBuffer.dim[index].stride = stride;
}
inline void setLength(int index, int length) {
mBuffer.dim[index].extent = length;
}
public:
/**
* @brief print tensor data. for DEBUG use only.
*/
void print() const;
private:
halide_buffer_t mBuffer;
struct InsideDescribe* mDescribe;
private:
friend class TensorUtils;
};
} // namespace MNN
#endif /* Tensor_hpp */

View File

@ -4,5 +4,5 @@ platform :ios
target 'playground' do target 'playground' do
platform :ios, '8.0' platform :ios, '8.0'
pod 'MNN' pod 'MNN', :path => "../../"
end end

View File

@ -1,6 +1,12 @@
file(GLOB_RECURSE MNN_EXPR_SRCS "${CMAKE_CURRENT_LIST_DIR}/*.cpp") file(GLOB_RECURSE MNN_EXPR_SRCS "${CMAKE_CURRENT_LIST_DIR}/*.cpp")
add_library(MNNExpress OBJECT ${MNN_EXPR_SRCS}) option(MNN_EXPR_ENABLE_PROFILER "Support profile Expr's op cost" OFF)
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNExpress>) IF (MNN_EXPR_ENABLE_PROFILER)
list(APPEND MNN_TARGETS MNNExpress) add_definitions(-DMNN_EXPR_ENABLE_PROFILER)
SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE) ENDIF()
SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE) IF(MNN_SEP_BUILD)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "../")
add_library(MNN_Express SHARED ${MNN_EXPR_SRCS})
target_link_libraries(MNN_Express MNN)
ELSE()
add_library(MNNExpress OBJECT ${MNN_EXPR_SRCS})
ENDIF()

View File

@ -68,7 +68,7 @@ MNN_PUBLIC VARP _Sub(VARP a, VARP b, std::vector<float> coeff);
//OtherOPs //OtherOPs
template<typename T> template<typename T>
MNN_PUBLIC VARP _Cast(VARP x) { VARP _Cast(VARP x) {
return _Cast(x, halide_type_of<T>()); return _Cast(x, halide_type_of<T>());
} }
MNN_PUBLIC VARP _Cast(VARP x, halide_type_t dtype); MNN_PUBLIC VARP _Cast(VARP x, halide_type_t dtype);

View File

@ -1,56 +1,29 @@
#!/bin/sh #!/bin/sh
set -e echo "Change directory to MNN_SOURCE_ROOT/project/ios before running this script"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" echo "Current PWD: ${PWD}"
pushd ${SCRIPT_DIR}
rm -rf iOS_64 rm -rf ios_64
mkdir iOS_64 mkdir ios_64
cd iOS_64 cd ios_64
cmake ../../../ -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=../../../cmake/ios.toolchain.cmake -DMNN_METAL=ON -DIOS_ARCH="arm64" -DMNN_AAPL_FMWK=ON -DENABLE_BITCODE=0 -G Xcode cmake ../../../ -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=../../../cmake/ios.toolchain.cmake -DMNN_METAL=ON -DIOS_ARCH="arm64" -DENABLE_BITCODE=0 -G Xcode
echo "Building AArch64" echo "Building AArch64"
xcodebuild ONLY_ACTIVE_ARCH=NO CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO -configuration Release -scheme MNN -target MNN -sdk iphoneos -quiet xcodebuild ONLY_ACTIVE_ARCH=NO -configuration Release -scheme MNN -target MNN -sdk iphoneos -quiet
cd ../ cd ../
rm -rf iOS_32 rm -rf ios_32
mkdir iOS_32 mkdir ios_32
cd iOS_32 cd ios_32
cmake ../../../ -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=../../../cmake/ios.toolchain.cmake -DMNN_METAL=ON -DIOS_ARCH="armv7;armv7s" -DMNN_AAPL_FMWK=ON -DENABLE_BITCODE=0 -G Xcode cmake ../../../ -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=../../../cmake/ios.toolchain.cmake -DMNN_METAL=ON -DIOS_ARCH="armv7;armv7s" -DENABLE_BITCODE=0 -G Xcode
echo "Building AArch32" echo "Building AArch32"
xcodebuild ONLY_ACTIVE_ARCH=NO CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO -configuration Release -scheme MNN -target MNN -sdk iphoneos -quiet xcodebuild ONLY_ACTIVE_ARCH=NO -configuration Release -scheme MNN -target MNN -sdk iphoneos -quiet
cd ../ cd ../
rm -rf SIM_32 mv ios_32/Release-iphoneos/MNN.framework/MNN ios_32/Release-iphoneos/MNN.framework/MNN_32
mkdir SIM_32
cd SIM_32
cmake ../../../ -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=../../../cmake/ios.toolchain.cmake -DMNN_METAL=ON -DPLATFORM=SIMULATOR -DENABLE_BITCODE=0 -DMNN_AAPL_FMWK=ON -G Xcode
echo "Building Simulator32"
xcodebuild ONLY_ACTIVE_ARCH=NO CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO -configuration Release -scheme MNN -target MNN -sdk iphonesimulator -quiet
cd ../
rm -rf SIM_64
mkdir SIM_64
cd SIM_64
cmake ../../../ -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=../../../cmake/ios.toolchain.cmake -DMNN_METAL=ON -DPLATFORM=SIMULATOR64 -DENABLE_BITCODE=0 -DMNN_AAPL_FMWK=ON -G Xcode
echo "Building Simulator64"
xcodebuild ONLY_ACTIVE_ARCH=NO CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO -configuration Release -scheme MNN -target MNN -sdk iphonesimulator -quiet
cd ../
echo "Moving Slices"
rm -rf output/
mkdir output
mv iOS_64/Release-iphoneos/MNN.framework/MNN ./MNN_iOS64
mv iOS_32/Release-iphoneos/MNN.framework/MNN ./MNN_iOS32
mv SIM_32/Release-iphonesimulator/MNN.framework/MNN ./MNN_SIM_32
mv SIM_64/Release-iphonesimulator/MNN.framework/MNN ./MNN_SIM_64
mv iOS_32/Release-iphoneos/MNN.framework output/
echo "Creating Fat Binary" echo "Creating Fat Binary"
lipo -create ./MNN_iOS64 ./MNN_iOS32 ./MNN_SIM_64 ./MNN_SIM_32 -output output/MNN.framework/MNN lipo -create ios_32/Release-iphoneos/MNN.framework/MNN_32 ios_64/Release-iphoneos/MNN.framework/MNN -output ios_32/Release-iphoneos/MNN.framework/MNN
echo "Cleaning up" rm ios_32/Release-iphoneos/MNN.framework/MNN_32
rm ./MNN_iOS64 ./MNN_iOS32 ./MNN_SIM_64 ./MNN_SIM_32 echo "Patching Framework Headers"
echo "Patching framework Headers" rm -rf ./MNN.framework
cp -R ../../include/MNN/expr output/MNN.framework/Headers/expr cp -R ios_32/Release-iphoneos/MNN.framework ./MNN.framework
echo "Unified Framework built at ${PWD}/output/" cp -R ../../include/MNN/expr ./MNN.framework/Headers/expr
popd

View File

@ -1,18 +1,23 @@
if(MNN_ARM82) if(CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64" OR IOS_ARCH STREQUAL "arm64")
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64" OR IOS_ARCH STREQUAL "arm64")
file(GLOB MNN_ARM82_SRCS "${CMAKE_CURRENT_LIST_DIR}/*.cpp") file(GLOB MNN_ARM82_SRCS "${CMAKE_CURRENT_LIST_DIR}/*.cpp")
file(GLOB MNN_ARM82_SRCS_ASM "${CMAKE_CURRENT_LIST_DIR}/asm/arm64/*") file(GLOB MNN_ARM82_SRCS_ASM "${CMAKE_CURRENT_LIST_DIR}/asm/arm64/*")
IF(MNN_SEP_BUILD)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "../../../")
add_library(
MNN_Arm82
SHARED
${MNN_ARM82_SRCS}
${MNN_ARM82_SRCS_ASM}
)
target_link_libraries(MNN_Arm82 MNN)
ELSE()
add_library(
MNN_Arm82
OBJECT
${MNN_ARM82_SRCS}
${MNN_ARM82_SRCS_ASM}
)
ENDIF()
add_definitions(-march=armv8.2a+fp16) add_definitions(-march=armv8.2a+fp16)
add_library( target_include_directories(MNN_Arm82 PRIVATE ${CMAKE_CURRENT_LIST_DIR}/asm/)
MNNARM82
OBJECT
${MNN_ARM82_SRCS}
${MNN_ARM82_SRCS_ASM}
)
target_include_directories(MNNARM82 PRIVATE ${CMAKE_CURRENT_LIST_DIR}/asm/)
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNARM82>)
list(APPEND MNN_TARGETS MNNARM82)
SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE)
SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE)
endif()
endif() endif()

View File

@ -18,5 +18,3 @@ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^aarch64" OR ARCHS STREQUAL "arm64")
else() else()
# Building fat binary requires multiple seperate builds and lipo-by-hand under CMake's design # Building fat binary requires multiple seperate builds and lipo-by-hand under CMake's design
endif() endif()
SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE)
SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE)

View File

@ -4,28 +4,16 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "(x86_64)|(X86_64)|(x64)|(X64)|(amd64)|(AMD64)
endif() endif()
add_definitions(-DMNN_USE_SSE) add_definitions(-DMNN_USE_SSE)
FILE(GLOB MNN_X8664_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp) FILE(GLOB MNN_X8664_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp)
FILE(GLOB MNN_AVX_SRC ${CMAKE_CURRENT_LIST_DIR}/avx/*.cpp)
FILE(GLOB MNN_SSE_SRC ${CMAKE_CURRENT_LIST_DIR}/sse/*.cpp)
add_library(MNNX8664 OBJECT ${MNN_X8664_SRC}) add_library(MNNX8664 OBJECT ${MNN_X8664_SRC})
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNX8664>) add_library(MNNAVX OBJECT ${MNN_AVX_SRC})
list(APPEND MNN_TARGETS MNNX8664) add_library(MNNSSE OBJECT ${MNN_SSE_SRC})
IF(MNN_USE_AVX) add_dependencies(MNNX8664 MNNAVX MNNSSE)
FILE(GLOB MNN_AVX_SRC ${CMAKE_CURRENT_LIST_DIR}/avx/*.cpp) if(WIN32 OR MSVC)
add_library(MNNAVX OBJECT ${MNN_AVX_SRC}) target_compile_options(MNNAVX PRIVATE /arch:AVX /wd4267)
if(WIN32 OR MSVC) else()
target_compile_options(MNNAVX PRIVATE /arch:AVX /wd4267) target_compile_options(MNNAVX PRIVATE -mavx)
else() endif()
target_compile_options(MNNAVX PRIVATE -mavx) list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNX8664> $<TARGET_OBJECTS:MNNAVX> $<TARGET_OBJECTS:MNNSSE>)
endif()
add_dependencies(MNNX8664 MNNAVX)
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNAVX>)
list(APPEND MNN_TARGETS MNNAVX)
ENDIF()
IF(MNN_USE_SSE)
FILE(GLOB MNN_SSE_SRC ${CMAKE_CURRENT_LIST_DIR}/sse/*.cpp)
add_library(MNNSSE OBJECT ${MNN_SSE_SRC})
add_dependencies(MNNX8664 MNNSSE)
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNSSE>)
list(APPEND MNN_TARGETS MNNSSE)
ENDIF()
SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE)
SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE)
endif() endif()

View File

@ -1,8 +1,10 @@
if(MNN_METAL AND APPLE) if(MNN_METAL AND APPLE)
option(MNN_METAL_REGEN "Regenerate Metal Sources." ON)
FILE(GLOB MNN_Metal_SRC ${CMAKE_CURRENT_LIST_DIR}/*.mm) FILE(GLOB MNN_Metal_SRC ${CMAKE_CURRENT_LIST_DIR}/*.mm)
FILE(GLOB MNN_Metal_KERNELS_SRC ${CMAKE_CURRENT_LIST_DIR}/*.metal) FILE(GLOB MNN_Metal_KERNELS_SRC ${CMAKE_CURRENT_LIST_DIR}/*.metal)
message(STATUS "Generating mnn.metallib at ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib") message(STATUS "Generating mnn.metallib at ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib")
IF(DEFINED SDK_VERSION) IF(DEFINED SDK_VERSION)
#Defined by iOS toolchain
SET(METAL_SDK_PLAT "iphoneos") SET(METAL_SDK_PLAT "iphoneos")
ELSE() ELSE()
SET(METAL_SDK_PLAT "macosx") SET(METAL_SDK_PLAT "macosx")
@ -10,22 +12,21 @@ if(MNN_METAL AND APPLE)
message(STATUS "Compiling Metal Kernels with ${METAL_SDK_PLAT} SDK") message(STATUS "Compiling Metal Kernels with ${METAL_SDK_PLAT} SDK")
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib COMMAND xcrun -sdk ${METAL_SDK_PLAT} metal "${MNN_Metal_KERNELS_SRC}" -o ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib COMMAND_EXPAND_LISTS) add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib COMMAND xcrun -sdk ${METAL_SDK_PLAT} metal "${MNN_Metal_KERNELS_SRC}" -o ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib COMMAND_EXPAND_LISTS)
add_custom_target (MNNMetalLIB DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib COMMENT "Generating mnn.metallib") add_custom_target (MNNMetalLIB DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib COMMENT "Generating mnn.metallib")
file(REMOVE "${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm")
add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm"
COMMAND ${PYTHON_EXECUTABLE}
"${CMAKE_CURRENT_LIST_DIR}/MetalCodeGen.py"
"${CMAKE_CURRENT_LIST_DIR}/"
"${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm"
COMMENT "Metal Code Generation"
)
add_library(MNNMetal OBJECT ${MNN_Metal_SRC} "${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm") add_library(MNNMetal OBJECT ${MNN_Metal_SRC} "${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm")
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNMetal>) list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNMetal>)
list(APPEND MNN_TARGETS MNNMetal)
add_dependencies(MNNMetal MNNMetalLIB)
SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE)
SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE)
# This is just work around some CMake limitations and is really ugly # This is just work around some CMake limitations and is really ugly
#list(APPEND MNN_OBJECTS_TO_LINK ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib) list(APPEND MNN_OBJECTS_TO_LINK ${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib)
SET(MNN_METALLIB_PATH "${CMAKE_CURRENT_BINARY_DIR}/mnn.metallib" PARENT_SCOPE) IF(MNN_METAL_REGEN)
file(REMOVE "${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm")
add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm"
COMMAND ${PYTHON_EXECUTABLE}
"${CMAKE_CURRENT_LIST_DIR}/MetalCodeGen.py"
"${CMAKE_CURRENT_LIST_DIR}/"
"${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm"
COMMENT "Metal Code Generation"
)
add_custom_target (MNNMetalCodeGen DEPENDS "${CMAKE_CURRENT_LIST_DIR}/MetalOPRegister.mm" COMMENT "Registering MetalOps")
add_dependencies(MNNMetal MNNMetalCodeGen)
ENDIF()
add_dependencies(MNNMetal MNNMetalLIB)
endif() endif()

View File

@ -44,7 +44,7 @@ using namespace MNN;
static dispatch_once_t onceToken; static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{ dispatch_once(&onceToken, ^{
#if TARGET_OS_IOS #if TARGET_OS_IOS
NSString *path = [[NSBundle bundleForClass:[MNNMetalContext class]] pathForResource:@"mnn" ofType:@"metallib"]; NSString *path = [NSBundle.mainBundle pathForResource:@"mnn" ofType:@"metallib"];
#else #else
NSString *path = @"mnn.metallib"; NSString *path = @"mnn.metallib";
#endif #endif

View File

@ -1,24 +1,29 @@
if(MNN_OPENCL) add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc"
add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc" COMMAND ${PYTHON_EXECUTABLE}
COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_codegen.py"
"${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_codegen.py" "${CMAKE_CURRENT_LIST_DIR}/execution/cl/"
"${CMAKE_CURRENT_LIST_DIR}/execution/cl/" "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc"
"${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc" COMMENT "OpenCL Code Generation"
COMMENT "OpenCL Code Generation" )
) add_custom_target (MNN_CLCodeGen DEPENDS "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc")
add_custom_target (MNNOpenCLCodeGen DEPENDS "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc") file(GLOB_RECURSE MNN_OpenCL_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cc ${CMAKE_CURRENT_LIST_DIR}/*.cpp)
file(GLOB_RECURSE MNN_OpenCL_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cc ${CMAKE_CURRENT_LIST_DIR}/*.cpp) if (${CMAKE_SYSTEM_NAME} MATCHES "Android")
add_library(MNNOpenCL OBJECT ${MNN_OpenCL_SRC} "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc") add_definitions(-DMNN_USE_OPENCL_WRAPPER)
add_dependencies(MNNOpenCL MNNOpenCLCodeGen) add_definitions(-DCL_HPP_TARGET_OPENCL_VERSION=110)
target_include_directories(MNNOpenCL PRIVATE else()
${CMAKE_SOURCE_DIR}/include/ find_package(OpenCL REQUIRED)
${CMAKE_SOURCE_DIR}/3rd_party/half
)
if (${CMAKE_SYSTEM_NAME} MATCHES "Android")
add_definitions(-DMNN_USE_OPENCL_WRAPPER)
endif()
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNOpenCL>)
list(APPEND MNN_TARGETS MNNOpenCL)
SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE)
SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE)
endif() endif()
IF(MNN_SEP_BUILD)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "../../../")
add_library(MNN_CL SHARED ${MNN_OpenCL_SRC} "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc")
target_link_libraries(MNN_CL MNN ${OpenCL_LIBRARIES})
ELSE()
add_library(MNN_CL OBJECT ${MNN_OpenCL_SRC} "${CMAKE_CURRENT_LIST_DIR}/execution/cl/opencl_program.cc")
set(MNN_OCL_LIBS ${OpenCL_LIBRARIES} PARENT_SCOPE)
ENDIF()
add_dependencies(MNN_CL MNN_CLCodeGen)
target_include_directories(MNN_CL PRIVATE
${CMAKE_SOURCE_DIR}/include/
${CMAKE_SOURCE_DIR}/3rd_party/half
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,37 @@
#ifndef OPENGL_GLSL_SHADER_AUTO_GENERATE_H
#define OPENGL_GLSL_SHADER_AUTO_GENERATE_H
extern const char* glsl_convlutionDepthwise_glsl;
extern const char* glsl_softmaxWidth_glsl;
extern const char* glsl_softmaxChannel_glsl;
extern const char* glsl_eltwise_glsl;
extern const char* glsl_gemm16x16_glsl;
extern const char* glsl_preluWithChannel_glsl;
extern const char* glsl_image_copy_glsl;
extern const char* glsl_kernel2image_glsl;
extern const char* glsl_convolution1x1_glsl;
extern const char* glsl_col2im_glsl;
extern const char* glsl_avgpool_glsl;
extern const char* glsl_maxpool_glsl;
extern const char* glsl_im2col1x1_glsl;
extern const char* glsl_resizeBilinear_glsl;
extern const char* glsl_unary_glsl;
extern const char* glsl_resizeNearest_glsl;
extern const char* glsl_converter_glsl;
extern const char* glsl_roiPooling_glsl;
extern const char* glsl_blit_glsl;
extern const char* glsl_kernel2ImageDepthwise_glsl;
extern const char* glsl_clear_texture_glsl;
extern const char* glsl_permute_glsl;
extern const char* glsl_image_to_nchw_buffer_glsl;
extern const char* glsl_convolution_glsl;
extern const char* glsl_kernel2image_adreno_glsl;
extern const char* glsl_binary_glsl;
extern const char* glsl_relu_glsl;
extern const char* glsl_nc4hw4_buffer_to_image_glsl;
extern const char* glsl_nhwc_buffer_to_image_glsl;
extern const char* glsl_im2col_glsl;
extern const char* glsl_nchw_buffer_to_image_glsl;
extern const char* glsl_image_to_nhwc_buffer_glsl;
extern const char* glsl_image_to_nc4hw4_buffer_glsl;
extern const char* glsl_softmaxHeight_glsl;
#endif

View File

@ -1,23 +1,22 @@
if(MNN_OPENGL) FILE(GLOB MNN_OpenGL_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp)
FILE(GLOB_RECURSE MNN_OpenGL_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp) option(MNN_OPENGL_REGEN "Regenerate OpenGL Shaders." OFF)
add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/AllShader.cpp" "${CMAKE_CURRENT_LIST_DIR}/AllShader.hpp" IF(MNN_SEP_BUILD)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "../../../")
add_library(MNN_GL SHARED ${MNN_OpenGL_SRC} "${CMAKE_CURRENT_LIST_DIR}/AllShader.cpp")
target_link_libraries(MNN_GL MNN GLESv3 EGL)
ELSE()
add_library(MNN_GL OBJECT ${MNN_OpenGL_SRC} "${CMAKE_CURRENT_LIST_DIR}/AllShader.cpp")
ENDIF()
target_include_directories(MNN_GL PRIVATE ${CMAKE_CURRENT_LIST_DIR}/)
IF(MNN_OPENGL_REGEN)
add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/AllShader.cpp"
COMMAND ${PYTHON_EXECUTABLE} COMMAND ${PYTHON_EXECUTABLE}
"${CMAKE_CURRENT_LIST_DIR}/makeshader.py" "${CMAKE_CURRENT_LIST_DIR}/makeshader.py"
"${CMAKE_CURRENT_LIST_DIR}/glsl/" "${CMAKE_CURRENT_LIST_DIR}/glsl/"
"${CMAKE_CURRENT_LIST_DIR}/AllShader.hpp" "${CMAKE_SOURCE_DIR}/include/MNN/backend/opengl/AllShader.h"
"${CMAKE_CURRENT_LIST_DIR}/AllShader.cpp" "${CMAKE_CURRENT_LIST_DIR}/AllShader.cpp"
COMMENT "OpenGL Code Generation" COMMENT "OpenGL Code Generation"
) )
add_custom_target (MNNOpenGLCodeGen DEPENDS "${CMAKE_CURRENT_LIST_DIR}/AllShader.cpp" "${CMAKE_CURRENT_LIST_DIR}/AllShader.hpp") add_custom_target (MNN_GLCodeGen DEPENDS "${CMAKE_CURRENT_LIST_DIR}/AllShader.cpp")
add_dependencies(MNN_GL MNN_GLCodeGen)
add_library(MNNOpenGL OBJECT ${MNN_OpenGL_SRC} "${CMAKE_CURRENT_LIST_DIR}/AllShader.cpp") ENDIF()
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNOpenGL>)
list(APPEND MNN_TARGETS MNNOpenGL)
SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE)
SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE)
target_include_directories(MNNOpenGL PRIVATE ${CMAKE_CURRENT_LIST_DIR}/)
IF(MNN_OPENGL_REGEN)
add_dependencies(MNNOpenGL MNNOpenGLCodeGen)
ENDIF()
endif()

View File

@ -14,9 +14,7 @@ def findAllShader(path):
return output return output
def getName(fileName): def getName(fileName):
s1 = os.path.abspath(fileName).split("/")[-1] s1 = fileName.replace("/", "_")
s1 = "glsl/"+s1
s1 = s1.replace("/", "_")
s1 = s1.replace(".", "_") s1 = s1.replace(".", "_")
return s1 return s1
@ -25,6 +23,7 @@ def generateFile(headfile, sourcefile, shaders):
cpp = "#include \"AllShader.hpp\"\n" cpp = "#include \"AllShader.hpp\"\n"
for s in shaders: for s in shaders:
name = getName(s) name = getName(s)
print name
h += "extern const char* " + name + ";\n"; h += "extern const char* " + name + ";\n";
cpp += "const char* " + name + " = \n"; cpp += "const char* " + name + " = \n";
with open(s) as f: with open(s) as f:
@ -35,10 +34,8 @@ def generateFile(headfile, sourcefile, shaders):
cpp += "\""+l+"\\n\"\n" cpp += "\""+l+"\\n\"\n"
cpp += ";\n" cpp += ";\n"
h+= "#endif" h+= "#endif"
print("Writing OpenGL Shaders Header to:"+headfile)
with open(headfile, "w") as f: with open(headfile, "w") as f:
f.write(h); f.write(h);
print("Writing OpenGL Shaders Source to:"+sourcefile)
with open(sourcefile, "w") as f: with open(sourcefile, "w") as f:
f.write(cpp); f.write(cpp);

View File

@ -1,33 +1,37 @@
if(MNN_VULKAN) FILE(GLOB_RECURSE MNN_Vulkan_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp)
FILE(GLOB_RECURSE MNN_Vulkan_SRC ${CMAKE_CURRENT_LIST_DIR}/*.cpp) option(MNN_VULKAN_REGEN "Regenerate Vulkan Shader binaries. Requires FULL glslang suite with spirv-tools linked" OFF)
option(MNN_VULKAN_REGEN "Regenerate Vulkan Shader binaries. Requires FULL glslang suite with spirv-tools linked" OFF) include_directories("./")
if (CMAKE_SYSTEM_NAME MATCHES "^Android") IF(MNN_SEP_BUILD)
add_definitions(-DVK_USE_PLATFORM_ANDROID_KHR) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "../../../")
endif() add_library(
IF(MNN_VULKAN_REGEN) MNN_Vulkan
add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp" SHARED
COMMAND ${PYTHON_EXECUTABLE} ${MNN_Vulkan_SRC}
"${CMAKE_CURRENT_LIST_DIR}/compiler/makeshader.py" "${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp"
"${CMAKE_CURRENT_LIST_DIR}/execution/glsl/" )
"${CMAKE_SOURCE_DIR}/include/MNN/backend/vulkan/shaders/AllShader.h" target_link_libraries(MNN_Vulkan MNN)
"${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp" ELSE()
COMMENT "Vulkan Code Generation" add_library(
) MNN_Vulkan
add_custom_target (MNNVulkanCodeGen DEPENDS "${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp") OBJECT
ENDIF() ${MNN_Vulkan_SRC}
add_library( "${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp"
MNNVulkan )
OBJECT ENDIF()
${MNN_Vulkan_SRC} target_include_directories(MNN_Vulkan PRIVATE "${CMAKE_CURRENT_LIST_DIR}/include/" "${CMAKE_CURRENT_LIST_DIR}/component/" "${CMAKE_CURRENT_LIST_DIR}/runtime" "${CMAKE_CURRENT_LIST_DIR}/execution" "${CMAKE_CURRENT_LIST_DIR}/backend")
"${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp" if (CMAKE_SYSTEM_NAME MATCHES "^Android")
add_definitions(-DVK_USE_PLATFORM_ANDROID_KHR)
)
target_include_directories(MNNVulkan PRIVATE "${CMAKE_CURRENT_LIST_DIR}/include/" "${CMAKE_CURRENT_LIST_DIR}/component/" "${CMAKE_CURRENT_LIST_DIR}/runtime" "${CMAKE_CURRENT_LIST_DIR}/execution" "${CMAKE_CURRENT_LIST_DIR}/backend")
IF(MNN_VULKAN_REGEN)
add_dependencies(MNNVulkan MNNVulkanCodeGen)
ENDIF()
list(APPEND MNN_OBJECTS_TO_LINK $<TARGET_OBJECTS:MNNVulkan>)
list(APPEND MNN_TARGETS MNNVulkan)
SET(MNN_OBJECTS_TO_LINK "${MNN_OBJECTS_TO_LINK}" PARENT_SCOPE)
SET(MNN_TARGETS "${MNN_TARGETS}" PARENT_SCOPE)
endif() endif()
IF(MNN_VULKAN_REGEN)
add_custom_command(OUTPUT "${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp"
COMMAND ${PYTHON_EXECUTABLE}
"${CMAKE_CURRENT_LIST_DIR}/compiler/makeshader.py"
"${CMAKE_CURRENT_LIST_DIR}/execution/glsl/"
"${CMAKE_SOURCE_DIR}/include/MNN/backend/vulkan/shaders/AllShader.h"
"${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp"
COMMENT "Vulkan Code Generation"
)
add_custom_target (MNN_VulkanCodeGen DEPENDS "${CMAKE_CURRENT_LIST_DIR}/compiler/AllShader.cpp")
add_dependencies(MNN_Vulkan MNN_VulkanCodeGen)
ENDIF()

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,6 @@
IF(MNN_BUILD_TEST) IF(MNN_BUILD_TEST)
file(GLOB_RECURSE MNN_TEST_CPP_SOURCES ${CMAKE_CURRENT_LIST_DIR}/*.cpp) file(GLOB_RECURSE Files ${CMAKE_CURRENT_LIST_DIR}/*.cpp ${CMAKE_CURRENT_LIST_DIR}/*.mm)
file(GLOB_RECURSE MNN_TEST_OBJC_SOURCES ${CMAKE_CURRENT_LIST_DIR}/*.mm) add_executable(run_test.out ${Files})
set(MNN_TEST_SRCS "")
LIST(APPEND MNN_TEST_SRCS ${MNN_TEST_CPP_SOURCES})
IF(APPLE)
LIST(APPEND MNN_TEST_SRCS ${MNN_TEST_OBJC_SOURCES})
ENDIF()
add_executable(run_test.out ${MNN_TEST_SRCS})
target_link_libraries(run_test.out ${MNN_DEPS}) target_link_libraries(run_test.out ${MNN_DEPS})
target_include_directories(run_test.out PRIVATE ${CMAKE_CURRENT_LIST_DIR}/) target_include_directories(run_test.out PRIVATE ${CMAKE_CURRENT_LIST_DIR}/)
add_dependencies(run_test.out MNN_SCHEMA_GEN) add_dependencies(run_test.out MNN_SCHEMA_GEN)
@ -15,6 +9,6 @@ IF(MNN_BUILD_TEST)
target_link_libraries(run_test.out ${FOUNDATION}) target_link_libraries(run_test.out ${FOUNDATION})
endif() endif()
IF(WIN32 OR MSVC) IF(WIN32 OR MSVC)
target_compile_options(run_test.out PRIVATE /wd4244 /wd4267 /wd4305 /wd4251) target_compile_options(run_test.out PRIVATE /wd4244 /wd4267 /wd4305 /wd4251 /wd4065)
ENDIF() ENDIF()
ENDIF() ENDIF()

View File

@ -5,6 +5,7 @@
// Created by MNN on 2019/01/30. // Created by MNN on 2019/01/30.
// Copyright © 2018, Alibaba Group Holding Limited // Copyright © 2018, Alibaba Group Holding Limited
// //
#if defined(_MSC_VER) #if defined(_MSC_VER)
#include <Windows.h> #include <Windows.h>
#undef min #undef min

View File

@ -1,4 +1,7 @@
IF(MNN_BUILD_CONVERTER) IF(MNN_BUILD_CONVERTER)
set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS_ORIGIN})
set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS_ORIGIN})
SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/../../)
option(TF_CONVERT_ORIGIN "Fall Back to Origin Model Converter" OFF) option(TF_CONVERT_ORIGIN "Fall Back to Origin Model Converter" OFF)
option(TFMODEL_OPTIMIZE "Enable tensorflow model optimizer" OFF) option(TFMODEL_OPTIMIZE "Enable tensorflow model optimizer" OFF)
IF(MNN_PORTABLE_BUILD) IF(MNN_PORTABLE_BUILD)
@ -34,10 +37,6 @@ IF(MNN_BUILD_CONVERTER)
add_library(MNNConvertDeps STATIC ${COMMON_SRC} ${MNN_CONVERTER_BACKENDS_OBJECTS} ${CMAKE_SOURCE_DIR}/3rd_party/flatbuffers/src/util.cpp) add_library(MNNConvertDeps STATIC ${COMMON_SRC} ${MNN_CONVERTER_BACKENDS_OBJECTS} ${CMAKE_SOURCE_DIR}/3rd_party/flatbuffers/src/util.cpp)
ENDIF() ENDIF()
target_link_libraries(MNNConvertDeps PUBLIC ${MNN_DEPS} ${Protobuf_LIBRARIES}) target_link_libraries(MNNConvertDeps PUBLIC ${MNN_DEPS} ${Protobuf_LIBRARIES})
IF(MNN_PORTABLE_BUILD)
# protobuf::libprotobuf doesn't declare proper dependency on ZLIB
target_link_libraries(MNNConvertDeps PUBLIC z)
ENDIF()
IF(NOT MNN_BUILD_SHARED_LIBS) IF(NOT MNN_BUILD_SHARED_LIBS)
if(APPLE) if(APPLE)
target_link_libraries(MNNConvert -Wl,-all_load MNNConvertDeps -Wl,-noall_load) target_link_libraries(MNNConvert -Wl,-all_load MNNConvertDeps -Wl,-noall_load)

View File

@ -182,9 +182,9 @@ public:
#else #else
~LogMessageFatal() noexcept(false) { ~LogMessageFatal() noexcept(false) {
#endif #endif
std::cout << log_stream_.str()<<std::endl; // LOG(ERROR) << log_stream_.str();
std::cout.flush();
throw Error(log_stream_.str()); throw Error(log_stream_.str());
// throw Error("Make it Right!");
} }
std::ostringstream& stream() { std::ostringstream& stream() {
return log_stream_; return log_stream_;

View File

@ -48,4 +48,4 @@ if (MSVC OR WIN32)
target_compile_definitions(${TARGET} PRIVATE "_CRT_SECURE_NO_WARNINGS") target_compile_definitions(${TARGET} PRIVATE "_CRT_SECURE_NO_WARNINGS")
target_compile_options(${TARGET} PRIVATE "/wd4244" "/wd4305" "/wd4129") target_compile_options(${TARGET} PRIVATE "/wd4244" "/wd4305" "/wd4129")
endforeach() endforeach()
endif() endif()

View File

@ -1,50 +1,48 @@
IF(MNN_BUILD_TRAIN) include_directories(${CMAKE_CURRENT_LIST_DIR}/source/grad)
include_directories(${CMAKE_CURRENT_LIST_DIR}/source/grad) include_directories(${CMAKE_CURRENT_LIST_DIR}/source/optimizer)
include_directories(${CMAKE_CURRENT_LIST_DIR}/source/optimizer) include_directories(${CMAKE_CURRENT_LIST_DIR}/source/parameters)
include_directories(${CMAKE_CURRENT_LIST_DIR}/source/parameters) include_directories(${CMAKE_CURRENT_LIST_DIR}/source/module)
include_directories(${CMAKE_CURRENT_LIST_DIR}/source/module) include_directories(${CMAKE_CURRENT_LIST_DIR}/source/transformer)
include_directories(${CMAKE_CURRENT_LIST_DIR}/source/transformer) include_directories(${CMAKE_CURRENT_LIST_DIR}/source/data)
include_directories(${CMAKE_CURRENT_LIST_DIR}/source/data) include_directories(${CMAKE_CURRENT_LIST_DIR}/source/models)
include_directories(${CMAKE_CURRENT_LIST_DIR}/source/models) SET( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/../../)
file(GLOB GRAD ${CMAKE_CURRENT_LIST_DIR}/source/grad/*) file(GLOB GRAD ${CMAKE_CURRENT_LIST_DIR}/source/grad/*)
file(GLOB TRANSFORMER ${CMAKE_CURRENT_LIST_DIR}/source/transformer/*) file(GLOB TRANSFORMER ${CMAKE_CURRENT_LIST_DIR}/source/transformer/*)
file(GLOB MODULES ${CMAKE_CURRENT_LIST_DIR}/source/module/*) file(GLOB MODULES ${CMAKE_CURRENT_LIST_DIR}/source/module/*)
file(GLOB PARAMETER ${CMAKE_CURRENT_LIST_DIR}/source/parameters/*) file(GLOB PARAMETER ${CMAKE_CURRENT_LIST_DIR}/source/parameters/*)
file(GLOB OPTIMIZER ${CMAKE_CURRENT_LIST_DIR}/source/optimizer/*) file(GLOB OPTIMIZER ${CMAKE_CURRENT_LIST_DIR}/source/optimizer/*)
file(GLOB DATALOADER ${CMAKE_CURRENT_LIST_DIR}/source/data/*) file(GLOB DATALOADER ${CMAKE_CURRENT_LIST_DIR}/source/data/*)
file(GLOB MODELS ${CMAKE_CURRENT_LIST_DIR}/source/models/*) file(GLOB MODELS ${CMAKE_CURRENT_LIST_DIR}/source/models/*)
add_library(MNNTrain SHARED ${GRAD} ${BASIC_INCLUDE} ${PARAMETER} ${OPTIMIZER} ${MODULES} ${DATALOADER} ${TRANSFORMER} ${MODELS}) add_library(MNNTrain SHARED ${GRAD} ${BASIC_INCLUDE} ${PARAMETER} ${OPTIMIZER} ${MODULES} ${DATALOADER} ${TRANSFORMER} ${MODELS})
target_link_libraries(MNNTrain ${MNN_DEPS}) target_link_libraries(MNNTrain ${MNN_DEPS})
IF(CMAKE_BUILD_TYPE MATCHES Release) IF(CMAKE_BUILD_TYPE MATCHES Release)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
ENDIF()
add_executable(transformer.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/transformerExecution.cpp)
target_link_libraries(transformer.out MNNTrain)
add_executable(train.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/train.cpp ${SCHEMA} ${BASIC_INCLUDE})
target_link_libraries(train.out ${MNN_DEPS})
add_executable(rawDataTransform.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/rawDataTransform.cpp ${SCHEMA} ${BASIC_INCLUDE})
target_link_libraries(rawDataTransform.out ${MNN_DEPS})
include_directories(${CMAKE_SOURCE_DIR}/3rd_party/imageHelper/)
add_executable(dataTransformer.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/dataTransformer.cpp ${SCHEMA} ${BASIC_INCLUDE})
target_link_libraries(dataTransformer.out ${MNN_DEPS})
option(MNN_USE_OPENCV "Use opencv" OFF)
file(GLOB DEMOSOURCE ${CMAKE_CURRENT_LIST_DIR}/source/demo/*)
add_executable(runTrainDemo.out ${DEMOSOURCE} ${BASIC_INCLUDE})
target_link_libraries(runTrainDemo.out MNNTrain)
if (MNN_USE_OPENCV)
set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS_ORIGIN})
set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS_ORIGIN})
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
add_definitions(-D MNN_USE_OPENCV)
target_link_libraries(runTrainDemo.out ${OpenCV_LIBS})
endif()
ENDIF() ENDIF()
add_executable(transformer.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/transformerExecution.cpp)
target_link_libraries(transformer.out MNNTrain)
add_executable(train.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/train.cpp ${SCHEMA} ${BASIC_INCLUDE})
target_link_libraries(train.out MNN)
add_executable(rawDataTransform.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/rawDataTransform.cpp ${SCHEMA} ${BASIC_INCLUDE})
include_directories(../../3rd_party/imageHelper/)
add_executable(dataTransformer.out ${CMAKE_CURRENT_LIST_DIR}/source/exec/dataTransformer.cpp ${SCHEMA} ${BASIC_INCLUDE})
target_link_libraries(dataTransformer.out MNN)
option(MNN_USE_OPENCV "Use opencv" OFF)
file(GLOB DEMOSOURCE ${CMAKE_CURRENT_LIST_DIR}/source/demo/*)
add_executable(runTrainDemo.out ${DEMOSOURCE} ${BASIC_INCLUDE})
target_link_libraries(runTrainDemo.out MNNTrain)
if (MNN_USE_OPENCV)
set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS_ORIGIN})
set(CMAKE_C_FLAGS ${CMAKE_C_FLAGS_ORIGIN})
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
add_definitions(-D MNN_USE_OPENCV)
target_link_libraries(runTrainDemo.out ${OpenCV_LIBS})
endif()

View File

@ -87,4 +87,4 @@ private:
} // namespace Train } // namespace Train
} // namespace MNN } // namespace MNN
#endif // DataLoader_hpp #endif // DataLoader_hpp