[update] ignore MNN.framework

This commit is contained in:
游薪渝(揽清) 2025-02-13 17:18:16 +08:00
parent 9b892785b1
commit e202c8f8fa
37 changed files with 6 additions and 6314 deletions

1
.gitignore vendored
View File

@ -363,3 +363,4 @@ pymnn_build/
MNN_compression_pb2.py
project/ios/MNNLLMForiOS/MNN.framework
project/MNNLLMForiOS/Chat
project/MNNLLMForiOS/MNN.framework

View File

@ -1,62 +0,0 @@
//
// AutoTime.hpp
// MNN
//
// Created by MNN on 2018/07/27.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_AutoTime_hpp
#define MNN_AutoTime_hpp
#include <stdint.h>
#include <stdio.h>
#include <MNN/MNNDefine.h>
namespace MNN {
class MNN_PUBLIC Timer {
public:
Timer();
~Timer();
Timer(const Timer&) = delete;
Timer(const Timer&&) = delete;
Timer& operator=(const Timer&) = delete;
Timer& operator=(const Timer&&) = delete;
// reset timer
void reset();
// get duration (us) from init or latest reset.
uint64_t durationInUs();
// Get Current Time
uint64_t current() const {
return mLastResetTime;
}
protected:
uint64_t mLastResetTime;
};
/** time tracing util. prints duration between init and deinit. */
class MNN_PUBLIC AutoTime : Timer {
public:
AutoTime(int line, const char* func);
~AutoTime();
AutoTime(const AutoTime&) = delete;
AutoTime(const AutoTime&&) = delete;
AutoTime& operator=(const AutoTime&) = delete;
AutoTime& operator=(const AutoTime&&) = delete;
private:
int mLine;
char* mName;
};
} // namespace MNN
#ifdef MNN_OPEN_TIME_TRACE
#define AUTOTIME MNN::AutoTime ___t(__LINE__, __func__)
#else
#define AUTOTIME
#endif
#endif /* AutoTime_hpp */

View File

@ -1,44 +0,0 @@
//
// ErrorCode.hpp
// MNN
//
// Created by MNN on 2018/09/18.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_ErrorCode_h
#define MNN_ErrorCode_h
namespace MNN {
enum ErrorCode {
#ifdef NO_ERROR
#undef NO_ERROR
#endif // NO_ERROR
NO_ERROR = 0,
OUT_OF_MEMORY = 1,
NOT_SUPPORT = 2,
COMPUTE_SIZE_ERROR = 3,
NO_EXECUTION = 4,
INVALID_VALUE = 5,
// User error
INPUT_DATA_ERROR = 10,
CALL_BACK_STOP = 11,
// Op Resize Error
TENSOR_NOT_SUPPORT = 20,
TENSOR_NEED_DIVIDE = 21,
// File error
FILE_CREATE_FAILED = 30,
FILE_REMOVE_FAILED = 31,
FILE_OPEN_FAILED = 32,
FILE_CLOSE_FAILED = 33,
FILE_RESIZE_FAILED = 34,
FILE_SEEK_FAILED = 35,
FILE_NOT_EXIST = 36,
FILE_UNMAP_FAILED = 37
};
} // namespace MNN
#endif /* ErrorCode_h */

View File

@ -1,308 +0,0 @@
#ifndef MNN_HALIDE_HALIDERUNTIME_H
#define MNN_HALIDE_HALIDERUNTIME_H
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
// Note that you should not use "inline" along with HALIDE_ALWAYS_INLINE;
// it is not necessary, and may produce warnings for some build configurations.
#ifdef _MSC_VER
#define HALIDE_ALWAYS_INLINE __forceinline
#define HALIDE_NEVER_INLINE __declspec(noinline)
#else
#define HALIDE_ALWAYS_INLINE __attribute__((always_inline)) inline
#define HALIDE_NEVER_INLINE __attribute__((noinline))
#endif
/** \file
*
* This file declares the routines used by Halide internally in its
* runtime. On platforms that support weak linking, these can be
* replaced with user-defined versions by defining an extern "C"
* function with the same name and signature.
*
* When doing Just In Time (JIT) compilation methods on the Func being
* compiled must be called instead. The corresponding methods are
* documented below.
*
* All of these functions take a "void *user_context" parameter as their
* first argument; if the Halide kernel that calls back to any of these
* functions has been compiled with the UserContext feature set on its Target,
* then the value of that pointer passed from the code that calls the
* Halide kernel is piped through to the function.
*
* Some of these are also useful to call when using the default
* implementation. E.g. halide_shutdown_thread_pool.
*
* Note that even on platforms with weak linking, some linker setups
* may not respect the override you provide. E.g. if the override is
* in a shared library and the halide object files are linked directly
* into the output, the builtin versions of the runtime functions will
* be called. See your linker documentation for more details. On
* Linux, LD_DYNAMIC_WEAK=1 may help.
*
*/
// Forward-declare to suppress warnings if compiling as C.
struct halide_buffer_t;
/** Types in the halide type system. They can be ints, unsigned ints,
* or floats (of various bit-widths), or a handle (which is always 64-bits).
* Note that the int/uint/float values do not imply a specific bit width
* (the bit width is expected to be encoded in a separate value).
*/
typedef enum halide_type_code_t
{
halide_type_int = 0, //!< signed integers
halide_type_uint = 1, //!< unsigned integers
halide_type_float = 2, //!< IEEE floating point numbers
halide_type_handle = 3, //!< opaque pointer type (void *)
halide_type_bfloat = 4 //!< floating point numbers in the bfloat format
} halide_type_code_t;
// Note that while __attribute__ can go before or after the declaration,
// __declspec apparently is only allowed before.
#ifndef HALIDE_ATTRIBUTE_ALIGN
#ifdef _MSC_VER
#define HALIDE_ATTRIBUTE_ALIGN(x) __declspec(align(x))
#else
#define HALIDE_ATTRIBUTE_ALIGN(x) __attribute__((aligned(x)))
#endif
#endif
/** A runtime tag for a type in the halide type system. Can be ints,
* unsigned ints, or floats of various bit-widths (the 'bits'
* field). Can also be vectors of the same (by setting the 'lanes'
* field to something larger than one). This struct should be
* exactly 32-bits in size. */
struct halide_type_t {
/** The basic type code: signed integer, unsigned integer, or floating point. */
#if __cplusplus >= 201103L
HALIDE_ATTRIBUTE_ALIGN(1) halide_type_code_t code; // halide_type_code_t
#else
HALIDE_ATTRIBUTE_ALIGN(1) uint8_t code; // halide_type_code_t
#endif
/** The number of bits of precision of a single scalar value of this type. */
HALIDE_ATTRIBUTE_ALIGN(1) uint8_t bits;
/** How many elements in a vector. This is 1 for scalar types. */
HALIDE_ATTRIBUTE_ALIGN(2) uint16_t lanes;
#ifdef __cplusplus
/** Construct a runtime representation of a Halide type from:
* code: The fundamental type from an enum.
* bits: The bit size of one element.
* lanes: The number of vector elements in the type. */
HALIDE_ALWAYS_INLINE halide_type_t(halide_type_code_t code, uint8_t bits, uint16_t lanes = 1)
: code(code), bits(bits), lanes(lanes) {
}
/** Default constructor is required e.g. to declare halide_trace_event
* instances. */
HALIDE_ALWAYS_INLINE halide_type_t() : code((halide_type_code_t)0), bits(0), lanes(0) {}
/** Compare two types for equality. */
HALIDE_ALWAYS_INLINE bool operator==(const halide_type_t &other) const {
return (code == other.code &&
bits == other.bits &&
lanes == other.lanes);
}
HALIDE_ALWAYS_INLINE bool operator!=(const halide_type_t &other) const {
return !(*this == other);
}
/** Size in bytes for a single element, even if width is not 1, of this type. */
HALIDE_ALWAYS_INLINE int bytes() const { return (bits + 7) / 8; }
#endif
};
/** An opaque struct containing per-GPU API implementations of the
* device functions. */
struct halide_device_interface_impl_t;
/** Each GPU API provides a halide_device_interface_t struct pointing
* to the code that manages device allocations. You can access these
* functions directly from the struct member function pointers, or by
* calling the functions declared below. Note that the global
* functions are not available when using Halide as a JIT compiler.
* If you are using raw halide_buffer_t in that context you must use
* the function pointers in the device_interface struct.
*
* The function pointers below are currently the same for every GPU
* API; only the impl field varies. These top-level functions do the
* bookkeeping that is common across all GPU APIs, and then dispatch
* to more API-specific functions via another set of function pointers
* hidden inside the impl field.
*/
struct halide_device_interface_t {
int (*device_malloc)(void *user_context, struct halide_buffer_t *buf,
const struct halide_device_interface_t *device_interface);
int (*device_free)(void *user_context, struct halide_buffer_t *buf);
int (*device_sync)(void *user_context, struct halide_buffer_t *buf);
void (*device_release)(void *user_context,
const struct halide_device_interface_t *device_interface);
int (*copy_to_host)(void *user_context, struct halide_buffer_t *buf);
int (*copy_to_device)(void *user_context, struct halide_buffer_t *buf,
const struct halide_device_interface_t *device_interface);
int (*device_and_host_malloc)(void *user_context, struct halide_buffer_t *buf,
const struct halide_device_interface_t *device_interface);
int (*device_and_host_free)(void *user_context, struct halide_buffer_t *buf);
int (*buffer_copy)(void *user_context, struct halide_buffer_t *src,
const struct halide_device_interface_t *dst_device_interface, struct halide_buffer_t *dst);
int (*device_crop)(void *user_context, const struct halide_buffer_t *src,
struct halide_buffer_t *dst);
int (*device_release_crop)(void *user_context, struct halide_buffer_t *buf);
int (*wrap_native)(void *user_context, struct halide_buffer_t *buf, uint64_t handle,
const struct halide_device_interface_t *device_interface);
int (*detach_native)(void *user_context, struct halide_buffer_t *buf);
const struct halide_device_interface_impl_t *impl;
};
typedef struct halide_dimension_t {
int32_t min, extent, stride;
// Per-dimension flags. None are defined yet (This is reserved for future use).
uint32_t flags;
#ifdef __cplusplus
HALIDE_ALWAYS_INLINE halide_dimension_t() : min(0), extent(0), stride(0), flags(0) {}
HALIDE_ALWAYS_INLINE halide_dimension_t(int32_t m, int32_t e, int32_t s, uint32_t f = 0) :
min(m), extent(e), stride(s), flags(f) {}
HALIDE_ALWAYS_INLINE bool operator==(const halide_dimension_t &other) const {
return (min == other.min) &&
(extent == other.extent) &&
(stride == other.stride) &&
(flags == other.flags);
}
HALIDE_ALWAYS_INLINE bool operator!=(const halide_dimension_t &other) const {
return !(*this == other);
}
#endif
} halide_dimension_t;
#ifdef __cplusplus
} // extern "C"
#endif
typedef enum {halide_buffer_flag_host_dirty = 1,
halide_buffer_flag_device_dirty = 2} halide_buffer_flags;
/**
* The raw representation of an image passed around by generated
* Halide code. It includes some stuff to track whether the image is
* not actually in main memory, but instead on a device (like a
* GPU). For a more convenient C++ wrapper, use Halide::Buffer<T>. */
typedef struct halide_buffer_t {
/** A device-handle for e.g. GPU memory used to back this buffer. */
uint64_t device;
/** The interface used to interpret the above handle. */
const struct halide_device_interface_t *device_interface;
/** A pointer to the start of the data in main memory. In terms of
* the Halide coordinate system, this is the address of the min
* coordinates (defined below). */
uint8_t* host;
/** flags with various meanings. */
uint64_t flags;
/** The type of each buffer element. */
struct halide_type_t type;
/** The dimensionality of the buffer. */
int32_t dimensions;
/** The shape of the buffer. Halide does not own this array - you
* must manage the memory for it yourself. */
halide_dimension_t *dim;
/** Pads the buffer up to a multiple of 8 bytes */
void *padding;
} halide_buffer_t;
#ifdef __cplusplus
namespace {
template<typename T> struct check_is_pointer;
template<typename T> struct check_is_pointer<T *> {};
}
/** Construct the halide equivalent of a C type */
template<typename T>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of() {
// Create a compile-time error if T is not a pointer (without
// using any includes - this code goes into the runtime).
check_is_pointer<T> check;
(void)check;
return halide_type_t(halide_type_handle, 64);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<float>() {
return halide_type_t(halide_type_float, 32);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<double>() {
return halide_type_t(halide_type_float, 64);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<bool>() {
return halide_type_t(halide_type_uint, 1);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<uint8_t>() {
return halide_type_t(halide_type_uint, 8);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<uint16_t>() {
return halide_type_t(halide_type_uint, 16);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<uint32_t>() {
return halide_type_t(halide_type_uint, 32);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<uint64_t>() {
return halide_type_t(halide_type_uint, 64);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<int8_t>() {
return halide_type_t(halide_type_int, 8);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<int16_t>() {
return halide_type_t(halide_type_int, 16);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<int32_t>() {
return halide_type_t(halide_type_int, 32);
}
template<>
HALIDE_ALWAYS_INLINE halide_type_t halide_type_of<int64_t>() {
return halide_type_t(halide_type_int, 64);
}
#endif
#endif // HALIDE_HALIDERUNTIME_H

View File

@ -1,182 +0,0 @@
//
// ImageProcess.hpp
// MNN
//
// Created by MNN on 2018/09/19.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_ImageProcess_hpp
#define MNN_ImageProcess_hpp
#include <MNN/ErrorCode.hpp>
#include <MNN/Matrix.h>
#include <MNN/Tensor.hpp>
namespace MNN {
namespace CV {
enum ImageFormat {
RGBA = 0,
RGB = 1,
BGR = 2,
GRAY = 3,
BGRA = 4,
YCrCb = 5,
YUV = 6,
HSV = 7,
XYZ = 8,
BGR555 = 9,
BGR565 = 10,
YUV_NV21 = 11,
YUV_NV12 = 12,
YUV_I420 = 13,
HSV_FULL = 14,
};
enum Filter { NEAREST = 0, BILINEAR = 1, BICUBIC = 2 };
enum Wrap { CLAMP_TO_EDGE = 0, ZERO = 1, REPEAT = 2 };
/**
* handle image process for tensor.
* step:
* 1: Do transform compute and get points
* 2: Sample line and do format convert
* 3: Turn RGBA to float tensor, and do sub and normalize
*/
class MNN_PUBLIC ImageProcess {
public:
struct Inside;
struct Config {
/** data filter */
Filter filterType = NEAREST;
/** format of source data */
ImageFormat sourceFormat = RGBA;
/** format of destination data */
ImageFormat destFormat = RGBA;
// Only valid if the dest type is float
float mean[4] = {0.0f, 0.0f, 0.0f, 0.0f};
float normal[4] = {1.0f, 1.0f, 1.0f, 1.0f};
/** edge wrapper */
Wrap wrap = CLAMP_TO_EDGE;
};
public:
/**
* @brief create image process with given config for given tensor.
* @param config given config.
* @param dstTensor given tensor.
* @return image processor.
*/
static ImageProcess* create(const Config& config, const Tensor* dstTensor = nullptr);
/**
* @brief create image process with given config for given tensor.
* @param means given means
* @param meanCount given means count
* @param normals given normals
* @param normalCount given normal count
* @param sourceFormat format of source data
* @param destFormat format of destination data
* @param dstTensor given tensor.
* @return image processor.
*/
static ImageProcess* create(const ImageFormat sourceFormat = RGBA, const ImageFormat destFormat = RGBA,
const float* means = nullptr, const int meanCount = 0, const float* normals = nullptr,
const int normalCount = 0, const Tensor* dstTensor = nullptr);
~ImageProcess();
static void destroy(ImageProcess* imageProcess);
/**
* @brief get affine transform matrix.
* @return affine transform matrix.
*/
inline const Matrix& matrix() const {
return mTransform;
}
void setMatrix(const Matrix& matrix);
/**
* @brief convert source data to given tensor.
* @param source source data.
* @param iw source width.
* @param ih source height.
* @param stride number of elements per row. eg: 100 width RGB contains at least 300 elements.
* @param dest given tensor.
* @return result code.
*/
ErrorCode convert(const uint8_t* source, int iw, int ih, int stride, Tensor* dest);
/**
* @brief convert source data to given tensor.
* @param source source data.
* @param iw source width.
* @param ih source height.
* @param stride number of elements per row. eg: 100 width RGB contains at least 300 elements.
* @param dest dest data.
* @param ow output width.
* @param oh output height.
* @param outputBpp output bpp, if 0, set as the save and config.destFormat.
* @param outputStride output stride, if 0, set as ow * outputBpp.
* @param type Only support halide_type_of<uint8_t> and halide_type_of<float>.
* @return result code.
*/
ErrorCode convert(const uint8_t* source, int iw, int ih, int stride, void* dest, int ow, int oh, int outputBpp = 0,
int outputStride = 0, halide_type_t type = halide_type_of<float>());
/**
* @brief create tensor with given data.
* @param w image width.
* @param h image height.
* @param bpp bytes per pixel.
* @param p pixel data pointer.
* @return created tensor.
*/
template <typename T>
static Tensor* createImageTensor(int w, int h, int bpp, void* p = nullptr) {
return createImageTensor(halide_type_of<T>(), w, h, bpp, p);
}
static Tensor* createImageTensor(halide_type_t type, int w, int h, int bpp, void* p = nullptr);
/**
* @brief set padding value when wrap=ZERO.
* @param value padding value.
* @return void.
*/
void setPadding(uint8_t value) {
mPaddingValue = value;
}
/**
* @brief set to draw mode.
* @param void
* @return void.
*/
void setDraw();
/**
* @brief draw color to regions of img.
* @param img the image to draw.
* @param w the image's width.
* @param h the image's height.
* @param c the image's channel.
* @param regions the regions to draw, size is [num * 3] contain num x { y, xl, xr }
* @param num regions num
* @param color the color to draw.
* @return void.
*/
void draw(uint8_t* img, int w, int h, int c, const int* regions, int num, const uint8_t* color);
private:
ImageProcess(const Config& config);
Matrix mTransform;
Matrix mTransformInvert;
Inside* mInside;
uint8_t mPaddingValue = 0;
};
} // namespace CV
} // namespace MNN
#endif /* MNN_ImageProcess_hpp */

View File

@ -1,515 +0,0 @@
//
// Interpreter.hpp
// MNN
//
// Created by MNN on 2018/07/23.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_Interpreter_hpp
#define MNN_Interpreter_hpp
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <MNN/ErrorCode.hpp>
#include <MNN/MNNForwardType.h>
#include <MNN/Tensor.hpp>
namespace MNN {
/** session schedule config */
struct ScheduleConfig {
/** which tensor should be kept */
std::vector<std::string> saveTensors;
/** forward type */
MNNForwardType type = MNN_FORWARD_CPU;
/** CPU:number of threads in parallel , Or GPU: mode setting*/
union {
int numThread = 4;
int mode;
};
/** subpath to run */
struct Path {
std::vector<std::string> inputs;
std::vector<std::string> outputs;
enum Mode {
/**
* Op Mode
* - inputs means the source op, can NOT be empty.
* - outputs means the sink op, can be empty.
* The path will start from source op, then flow when encounter the sink op.
* The sink op will not be compute in this path.
*/
Op = 0,
/**
* Tensor Mode
* - inputs means the inputs tensors, can NOT be empty.
* - outputs means the outputs tensors, can NOT be empty.
* It will find the pipeline that compute outputs from inputs.
*/
Tensor = 1
};
/** running mode */
Mode mode = Op;
};
Path path;
/** backup backend used to create execution when desinated backend do NOT support any op */
MNNForwardType backupType = MNN_FORWARD_CPU;
/** extra backend config */
BackendConfig* backendConfig = nullptr;
};
class Session;
struct Content;
class Tensor;
class Backend;
class Runtime;
class MNN_PUBLIC OperatorInfo {
struct Info;
public:
/** Operator's name*/
const std::string& name() const;
/** Operator's type*/
const std::string& type() const;
/** Operator's flops, in M*/
float flops() const;
protected:
OperatorInfo();
~OperatorInfo();
Info* mContent;
};
typedef std::function<bool(const std::vector<Tensor*>&, const std::string& /*opName*/)> TensorCallBack;
typedef std::function<bool(const std::vector<Tensor*>&, const OperatorInfo*)> TensorCallBackWithInfo;
typedef std::pair< std::map<MNNForwardType, std::shared_ptr<Runtime>>, std::shared_ptr<Runtime>> RuntimeInfo;
/**
* @brief get mnn version info.
* @return mnn version string.
*/
MNN_PUBLIC const char* getVersion();
/** net data holder. multiple sessions could share same net. */
class MNN_PUBLIC Interpreter {
public:
/**
* @brief create net from file.
* @param file given file.
* @return created net if success, NULL otherwise.
*/
static Interpreter* createFromFile(const char* file);
/**
* @brief create net from buffer.
* @param buffer given data buffer.
* @param size size of data buffer.
* @return created net if success, NULL otherwise.
*/
static Interpreter* createFromBuffer(const void* buffer, size_t size);
~Interpreter();
/**
* @brief destroy Interpreter
* @param model given Interpreter to release.
*/
static void destroy(Interpreter* net);
enum SessionMode {
/** About CallBack, Default Session_Debug*/
/** runSessionWithCallBack is allowed and can get internal op info*/
Session_Debug = 0,
/** runSessionWithCallBack is not valid and can't get any info of op in session*/
Session_Release = 1,
/** About input tenosr, Default Session_Input_Inside*/
/** The input tensor is alloced by session, input data after session resized*/
Session_Input_Inside = 2,
/** The input tensor is alloced by user, set input data before session resize*/
Session_Input_User = 3,
/** The output tensor depends on session, and can't be separate used*/
Session_Output_Inside = 4,
/** The output tensor can be separated from session*/
Session_Output_User = 5,
/** Try Resize Session when create Session or not, default direct: */
Session_Resize_Direct = 6,
Session_Resize_Defer = 7,
/** Determine the Execution's forward type is determine by user or auto determine */
Session_Backend_Fix = 8, // Use the backend user set, when not support use default backend
Session_Backend_Auto = 9, // Auto Determine the Op type by MNN
/** Determine static memory whether recyle in resizeSession or just cache the memory */
Session_Memory_Collect = 10, // Recycle static memory when session resize in case memory explosion
Session_Memory_Cache = 11, // Cache the static memory for next forward usage
/** Determine whether use codegen function */
Session_Codegen_Disable = 12, // Disable codegen in case extra build codegen cost
Session_Codegen_Enable = 13, // Enable codegen
/** Dynamic Reisze Optimization */
Session_Resize_Check = 14, // Open Trace for resize
Session_Resize_Fix = 15, // Apply Resize Optimization
/** Set for Module's traceOrOptimize API.
Module_Forward_Seperate:
when inputs is not empty , Module's onForward will only infer shape and alloc memory.
when inputs is empty , Module's onForward will only runSession to compute content.
Default is Module_Forward_Combine
*/
Module_Forward_Separate = 16,
Module_Forward_Combine = 17,
};
/**
* @brief The API shoud be called before create session.
* @param mode session mode
*/
void setSessionMode(SessionMode mode);
/**
* @brief The API shoud be called before create session.
* If the cache exist, try to load cache from file.
* After createSession, try to save cache to file.
* @param cacheFile cache file name
* @param keySize depercerate, for future use.
*/
void setCacheFile(const char* cacheFile, size_t keySize = 128);
/**
* @brief The API shoud be called before create session.
* @param file external data file name
* @param keySize depercerate, for future use.
*/
void setExternalFile(const char* file, size_t flag = 128);
/**
* @brief The API shoud be called after last resize session.
* If resize session generate new cache info, try to rewrite cache file.
* If resize session do not generate any new cache info, just do nothing.
* @param session given session
* @param flag Protected param, not used now
*/
ErrorCode updateCacheFile(Session *session, int flag = 0);
enum HintMode {
// Max Op number for async tuning
MAX_TUNING_NUMBER = 0,
// Strictly check model file or not, default 1. if set 0, will not check model file valid/invalid
STRICT_CHECK_MODEL = 1,
MEM_ALLOCATOR_TYPE = 2,
// Winograd unit candidates count, default 3. if set 0, will use less unit candidates for less memory at the expense of performance.
WINOGRAD_MEMORY_LEVEL = 3,
// Geometry Compute option, default is 0xFFFF
GEOMETRY_COMPUTE_MASK = 4,
// 0: Close dynamic quant;
// 1: For general convolution, use one scale&zeropoint to quant.
DYNAMIC_QUANT_OPTIONS = 5,
// For Mobile CPU with big-litter core, set decrease rate to let MNN divide task differential by CPU's performance
// 0-100, 50 means litter core has 50% capacity of large core
// Default is 50
CPU_LITTLECORE_DECREASE_RATE = 6,
// 0: Do not quantize
// 1: Only quantize key, use int8 asymmetric quantization
// 2: Only quantize value, use fp8 quantization
// 3: quantize both key and value
// 4: quantize query, key and value, and use gemm int8 kernel to compute K*V
QKV_QUANT_OPTIONS = 7,
// size limit of kvcache in memory (for a single layer)
// if the size of kvcache exceeds the limit, it will be moved to disk
KVCACHE_SIZE_LIMIT = 8,
// Op encoder number for commit
OP_ENCODER_NUMBER_FOR_COMMIT = 9,
// KVCache Info
KVCACHE_INFO = 10,
// mmap allocate file size, KB
MMAP_FILE_SIZE = 11,
USE_CACHED_MMAP = 12
};
enum ExternalPathType {
// Path of the kvcache directory
EXTERNAL_PATH_KVCACHE_DIR = 0,
// Mid Buffer Cache File
EXTERNAL_FEATUREMAP_DIR = 1,
// Weight Buffer Cache File
EXTERNAL_WEIGHT_DIR = 2,
// Other types ...
};
enum GeometryComputeMask {
// Support Region Fuse
GEOMETRCOMPUTEMASK_FUSEREGION = 1 << 0,
// Support Region Fuse to input with multi-region, eg: pad + concat
GEOMETRCOMPUTEMASK_FUSEREGION_MULTI = 1 << 1,
// Use loop instead of raster + compute if possible
GEOMETRCOMPUTEMASK_USELOOP = 1 << 2,
// Support Geometry Cache, if shape changed, will try recompute, and then run compute if failed
GEOMETRCOMPUTEMASK_OPENCACHE = 1 << 3,
// Full option open mask, for example, if want to close useloop, can set mask as (GEOMETRCOMPUTEMASK_ALL - GEOMETRCOMPUTEMASK_USELOOP)
GEOMETRCOMPUTEMASK_ALL = 0xFFFF,
};
/**
* @brief The API shoud be called before create session.
* @param mode Hint type
* @param value Hint value
*/
void setSessionHint(HintMode mode, int value);
public:
/**
* @brief create runtimeInfo separately with schedule config.
* @param configs session schedule configs.
*/
static RuntimeInfo createRuntime(const std::vector<ScheduleConfig>& configs);
/**
* @brief create session with schedule config. created session will be managed in net.
* @param config session schedule config.
* @return created session if success, NULL otherwise.
*/
Session* createSession(const ScheduleConfig& config);
/**
* @brief create session with schedule config and user-specified runtime.
* @param config session schedule config, runtime runtimeInfo used by the created session.
* @return created session if success, NULL otherwise.
*/
Session* createSession(const ScheduleConfig& config, const RuntimeInfo& runtime);
/**
* @brief create multi-path session with schedule configs. created session will be managed in net.
* @param configs session schedule configs.
* @return created session if success, NULL otherwise.
*/
Session* createMultiPathSession(const std::vector<ScheduleConfig>& configs);
/**
* @brief create multi-path session with schedule configs and user-specified runtime.
created session will be managed in net.
* @param configs session schedule configs.
* @return created session if success, NULL otherwise.
*/
Session* createMultiPathSession(const std::vector<ScheduleConfig>& configs, const RuntimeInfo& runtime);
/**
* @brief release session.
* @param session given session.
* @return true if given session is held by net and is freed.
*/
bool releaseSession(Session* session);
/**
* @brief call this function to get tensors ready. output tensor buffer (host or deviceId) should be retrieved
* after resize of any input tensor.
* @param session given session.
*/
void resizeSession(Session* session);
/**
* @brief call this function to get tensors ready. output tensor buffer (host or deviceId) should be retrieved
* after resize of any input tensor.
* @param session given session.
* @param needRelloc, 1 means need realloc.
*/
void resizeSession(Session* session, int needRelloc);
/**
* @brief call this function if don't need resize or create session any more, it will save a few memory that equal
* to the size of model buffer
*/
void releaseModel();
/**
* @brief Get the model buffer for user to save
* @return std::make_pair(modelBuffer, modelSize).
* @example:
* std::ofstream output("trainResult.alinn")
* auto buffer = net->getModelBuffer();
* output.write((const char*)buffer.first, buffer.second);
*/
std::pair<const void*, size_t> getModelBuffer() const;
/**
* @brief Get the model's version info.
* @return const char* of model's version info like "2.0.0";
* If model is not loaded or model no version info, return "version info not found".
*/
const char* getModelVersion() const;
/**
* @brief update Session's Tensor to model's Const Op
* @param session given session.
* @return result of running.
*/
ErrorCode updateSessionToModel(Session* session);
/**
* @brief run session.
* @param session given session.
* @return result of running.
*/
ErrorCode runSession(Session* session) const;
/*
* @brief run session.
* @param session given session.
* @param before callback before each op. return true to run the op; return false to skip the op.
* @param after callback after each op. return true to continue running; return false to interrupt the session.
* @param sync synchronously wait for finish of execution or not.
* @return result of running.
*/
ErrorCode runSessionWithCallBack(const Session* session, const TensorCallBack& before, const TensorCallBack& end,
bool sync = false) const;
/*
* @brief run session.
* @param session given session.
* @param before callback before each op. return true to run the op; return false to skip the op.
* @param after callback after each op. return true to continue running; return false to interrupt the session.
* @param sync synchronously wait for finish of execution or not.
* @return result of running.
*/
ErrorCode runSessionWithCallBackInfo(const Session* session, const TensorCallBackWithInfo& before,
const TensorCallBackWithInfo& end, bool sync = false) const;
/**
* @brief get input tensor for given name.
* @param session given session.
* @param name given name. if NULL, return first input.
* @return tensor if found, NULL otherwise.
*/
Tensor* getSessionInput(const Session* session, const char* name);
/**
* @brief get output tensor for given name.
* @param session given session.
* @param name given name. if NULL, return first output.
* @return tensor if found, NULL otherwise.
*/
Tensor* getSessionOutput(const Session* session, const char* name);
enum SessionInfoCode {
/** memory session used in MB, float* */
MEMORY = 0,
/** float operation needed in session in M, float* */
FLOPS = 1,
/** Backends in session in M, int*, length >= 1 + number of configs when create session */
BACKENDS = 2,
/** Resize Info, int* , the mean different from API
Interpreter::getSessionInfo: 0: ready to execute, 1: need malloc, 2: need resize
RuntimeManager::getInfo: 0: no resize, 1: re-malloc, 2: resize
*/
RESIZE_STATUS = 3,
/** Mode / NumberThread, int* */
THREAD_NUMBER = 4,
ALL
};
/**
* @brief get session info
* @param session given session.
* @param code given info code.
* @param ptr given info ptr, see SessionInfoCode for detail
* @return true if support the code, false otherwise.
*/
bool getSessionInfo(const Session* session, SessionInfoCode code, void* ptr);
/**
* @brief get all output tensors.
* @param session given session.
* @return all output tensors mapped with name.
*/
const std::map<std::string, Tensor*>& getSessionOutputAll(const Session* session) const;
/**
* @brief get all input tensors.
* @param session given session.
* @return all input tensors mapped with name.
*/
const std::map<std::string, Tensor*>& getSessionInputAll(const Session* session) const;
public:
/**
* @brief resize given tensor.
* @param tensor given tensor.
* @param dims new dims. at most 6 dims.
*/
void resizeTensor(Tensor* tensor, const std::vector<int>& dims);
/**
* @brief resize given tensor by nchw.
* @param batch / N.
* @param channel / C.
* @param height / H.
* @param width / W
*/
void resizeTensor(Tensor* tensor, int batch, int channel, int height, int width);
/**
* @brief get backend used to create given tensor.
* @param session given session.
* @param tensor given tensor.
* @return backend used to create given tensor, may be NULL.
*/
const Backend* getBackend(const Session* session, const Tensor* tensor) const;
/**
* @brief get business code (model identifier).
* @return business code.
*/
const char* bizCode() const;
/**
* @brief get model UUID
* @return Model UUID.
*/
const char* uuid() const;
private:
static Interpreter* createFromBufferInternal(Content* net, bool enforceAuth);
Content* mNet = nullptr;
Interpreter(Content* net);
Interpreter(const Interpreter&) = delete;
Interpreter(const Interpreter&&) = delete;
Interpreter& operator=(const Interpreter&) = delete;
Interpreter& operator=(const Interpreter&&) = delete;
void waitSessionFinish(const Session* session) const;
#ifdef MNN_INTERNAL_ENABLED
void logForRunSession(const Session* session, float time, const char* api) const;
#endif
};
} // namespace MNN
#endif /* Interpreter_hpp */

View File

@ -1,81 +0,0 @@
//
// MNNDefine.h
// MNN
//
// Created by MNN on 2018/08/09.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNNDefine_h
#define MNNDefine_h
#include <assert.h>
#include <stdio.h>
#if defined(__APPLE__)
#include <TargetConditionals.h>
#if TARGET_OS_IPHONE
#define MNN_BUILD_FOR_IOS
#endif
#endif
#ifdef MNN_USE_LOGCAT
#if defined(__OHOS__)
#include <hilog/log.h>
#define MNN_ERROR(format, ...) {char logtmp[4096]; snprintf(logtmp, 4096, format, ##__VA_ARGS__); OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_DOMAIN, "MNNJNI", (const char*)logtmp);}
#define MNN_PRINT(format, ...) {char logtmp[4096]; snprintf(logtmp, 4096, format, ##__VA_ARGS__); OH_LOG_Print(LOG_APP, LOG_DEBUG, LOG_DOMAIN, "MNNJNI", (const char*)logtmp);}
#else
#include <android/log.h>
#define MNN_ERROR(format, ...) __android_log_print(ANDROID_LOG_ERROR, "MNNJNI", format, ##__VA_ARGS__)
#define MNN_PRINT(format, ...) __android_log_print(ANDROID_LOG_INFO, "MNNJNI", format, ##__VA_ARGS__)
#endif
#elif defined MNN_BUILD_FOR_IOS
// on iOS, stderr prints to XCode debug area and syslog prints Console. You need both.
#include <syslog.h>
#define MNN_PRINT(format, ...) syslog(LOG_WARNING, format, ##__VA_ARGS__); fprintf(stderr, format, ##__VA_ARGS__)
#define MNN_ERROR(format, ...) syslog(LOG_WARNING, format, ##__VA_ARGS__); fprintf(stderr, format, ##__VA_ARGS__)
#else
#define MNN_PRINT(format, ...) printf(format, ##__VA_ARGS__)
#define MNN_ERROR(format, ...) printf(format, ##__VA_ARGS__)
#endif
#ifdef DEBUG
#define MNN_ASSERT(x) \
{ \
int res = (x); \
if (!res) { \
MNN_ERROR("Error for %s, %d\n", __FILE__, __LINE__); \
assert(res); \
} \
}
#else
#define MNN_ASSERT(x)
#endif
#define FUNC_PRINT(x) MNN_PRINT(#x "=%d in %s, %d \n", x, __func__, __LINE__);
#define FUNC_PRINT_ALL(x, type) MNN_PRINT(#x "=" #type " %" #type " in %s, %d \n", x, __func__, __LINE__);
#define MNN_CHECK(success, log) \
if(!(success)){ \
MNN_ERROR("Check failed: %s ==> %s\n", #success, #log); \
}
#if defined(_MSC_VER)
#if defined(BUILDING_MNN_DLL)
#define MNN_PUBLIC __declspec(dllexport)
#elif defined(USING_MNN_DLL)
#define MNN_PUBLIC __declspec(dllimport)
#else
#define MNN_PUBLIC
#endif
#else
#define MNN_PUBLIC __attribute__((visibility("default")))
#endif
#define STR_IMP(x) #x
#define STR(x) STR_IMP(x)
#define MNN_VERSION_MAJOR 3
#define MNN_VERSION_MINOR 0
#define MNN_VERSION_PATCH 4
#define MNN_VERSION STR(MNN_VERSION_MAJOR) "." STR(MNN_VERSION_MINOR) "." STR(MNN_VERSION_PATCH)
#endif /* MNNDefine_h */

View File

@ -1,120 +0,0 @@
//
// MNNForwardType.h
// MNN
//
// Created by MNN on 2019/01/19.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNNForwardType_h
#define MNNForwardType_h
#include <stdint.h>
#include <stddef.h>
typedef enum {
MNN_FORWARD_CPU = 0,
/*
Firtly find the first available backends not equal to CPU
If no other backends, use cpu
*/
MNN_FORWARD_AUTO = 4,
/*Hand write metal*/
MNN_FORWARD_METAL = 1,
/*NVIDIA GPU API*/
MNN_FORWARD_CUDA = 2,
/*Android / Common Device GPU API*/
MNN_FORWARD_OPENCL = 3,
MNN_FORWARD_OPENGL = 6,
MNN_FORWARD_VULKAN = 7,
/*Android 8.1's NNAPI or CoreML for ios*/
MNN_FORWARD_NN = 5,
/*User can use API from Backend.hpp to add or search Backend*/
MNN_FORWARD_USER_0 = 8,
MNN_FORWARD_USER_1 = 9,
MNN_FORWARD_USER_2 = 10,
MNN_FORWARD_USER_3 = 11,
MNN_FORWARD_ALL = 12,
/* Apply arm extension instruction set to accelerate some Ops, this forward type
is only used in MNN internal, and will be active automatically when user set forward type
to be MNN_FORWARD_CPU and extension instruction set is valid on hardware.
*/
MNN_FORWARD_CPU_EXTENSION = 13,
// use for shared memory on android device
MNN_MEMORY_AHARDWAREBUFFER = 14
} MNNForwardType;
typedef enum {
// For the OpenCL backend, all five of the following options are valid. The user is allowed to enable any one of them.
// For the Vulkan backend, only options MNN_GPU_TUNING_NONE, MNN_GPU_TUNING_HEAVY, and MNN_GPU_TUNING_WIDE are valid. The user is allowed to enable any one of these three.
MNN_GPU_TUNING_NONE = 1 << 0, /* Forbidden tuning, performance not good.(OpenCL/Vulkan) */
MNN_GPU_TUNING_HEAVY = 1 << 1, /* Heavily tuning, usually not suggested.(OpenCL/Vulkan) */
MNN_GPU_TUNING_WIDE = 1 << 2, /* Widely tuning, performance good. Default.(OpenCL/Vulkan) */
MNN_GPU_TUNING_NORMAL = 1 << 3, /* Normal tuning, performance may be ok.(OpenCL) */
MNN_GPU_TUNING_FAST = 1 << 4, /* Fast tuning, performance may not good.(OpenCL) */
// For the OpenCL backend, the following two options are both valid. The user could try OpenCL_MEMORY_BUFFER and OpenCL_MEMORY_IMAGE both, and then choose the better one based on performance.
// For the Vulkan backend, neither option is valid. The user uses the CMake option MNN_VULKAN_IMAGE to select between image memory mode and buffer memory mode.
MNN_GPU_MEMORY_BUFFER = 1 << 6, /* OpenCL_MEMORY_BUFFER */
MNN_GPU_MEMORY_IMAGE = 1 << 7, /* OpenCL_MEMORY_IMAGE */
// For the OpenCL backend, the following two options are effective only on Qualcomm GPUs. When using a Qualcomm GPU, the user could try both options and choose the better one based on performance.
// For the Vulkan backend, only option MNN_GPU_RECORD_BATCH is valid. When MNN_GPU_RECORD_BATCH is enabled, all ops would share one commandBuffer.
MNN_GPU_RECORD_OP = 1 << 8, /* The kernels in one op execution record into one recording.(OpenCL) */
MNN_GPU_RECORD_BATCH = 1 << 9, /* 10 kernels record into one recording.(OpenCL) All ops share one commandBuffer.(Vulkan) */
} MNNGpuMode;
#ifdef __cplusplus
namespace MNN {
struct BackendConfig {
enum MemoryMode { Memory_Normal = 0, Memory_High, Memory_Low };
MemoryMode memory = Memory_Normal;
enum PowerMode { Power_Normal = 0, Power_High, Power_Low };
PowerMode power = Power_Normal;
enum PrecisionMode { Precision_Normal = 0, Precision_High, Precision_Low, Precision_Low_BF16 };
PrecisionMode precision = Precision_Normal;
/** user defined context */
union {
void* sharedContext = nullptr;
size_t flags; // Valid for CPU Backend
};
};
/** acquire runtime status by Runtime::getCurrentStatus with following keys,
*/
enum RuntimeStatus {
/**
* get status whether this runtime support 16-bits float point arithmetic
*/
STATUS_SUPPORT_FP16,
/**
* get status whether this runtime support dot-product arithmetic
*/
STATUS_SUPPORT_DOT_PRODUCT,
/**
* get status whether this runtime support power-low (means low priority for opencl)
*/
STATUS_SUPPORT_POWER_LOW,
/**
* emum total number
*/
STATUS_COUNT
};
}; // namespace MNN
#endif
#endif /* MNNForwardType_h */

View File

@ -1,79 +0,0 @@
//
// MNNSharedContext.h
// MNN
//
// Created by MNN on 2018/10/11.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNNSharedContext_h
#define MNNSharedContext_h
#include "MNNDefine.h"
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h> /*uint32_t*/
#ifdef MNN_VULKAN
struct MNNVulkanContext {
VkInstance pInstance;
VkPhysicalDevice pPhysicalDevice;
VkDevice pDevice;
VkQueue pQueue;
uint32_t iQueueFamilyIndex;
};
struct MNNVulkanTensorContent {
VkBuffer buffer;
VkDeviceSize size;
VkDeviceSize offset;
halide_type_t realType;
int32_t mask; // For future usage
};
#endif
#ifdef MNN_METAL
struct MNNMetalSharedContext {
id<MTLDevice> device;
id<MTLCommandQueue> queue;
};
struct MNNMetalTensorContent {
id<MTLBuffer> buffer;
int32_t offset;
id<MTLTexture> texture;
halide_type_t type;
int32_t mask;
int32_t forFuture[8];
};
MNN_PUBLIC int MNNMetalGetTensorContent(MNNMetalTensorContent* content, void* tensor);
#endif
#ifdef MNN_USER_SET_DEVICE
struct MNNDeviceContext {
// When one gpu card has multi devices, choose which device. set deviceId
uint32_t deviceId = 0;
// When has multi gpu cards, choose which card. set platformId
uint32_t platformId = 0;
// User set number of gpu cards
uint32_t platformSize = 0;
// User set OpenCL context ptr
void *contextPtr = nullptr;
// User set OpenGL shared data
void *glShared = nullptr;
};
#endif
#ifdef __cplusplus
}
#endif
#endif /* MNNSharedContext_h */

File diff suppressed because it is too large Load Diff

View File

@ -1,580 +0,0 @@
//
// Rect.h
// MNN
//
// Modified by jiangxiaotang on 2018/09/19.
// Copyright © 2018, Alibaba Group Holding Limited
//
/*
* Copyright 2006 The Android Open Source Project
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/* Generated by tools/bookmaker from include/core/Rect.h and docs/SkRect_Reference.bmh
on 2018-07-13 08:15:11. Additional documentation and examples can be found at:
https://skia.org/user/api/SkRect_Reference
You may edit either file directly. Structural changes to public interfaces require
editing both files. After editing docs/SkRect_Reference.bmh, run:
bookmaker -b docs -i include/core/Rect.h -p
to create an updated version of this file.
*/
#ifndef MNN_Rect_DEFINED
#define MNN_Rect_DEFINED
#include <math.h>
#include <algorithm>
#include <utility>
#include <MNN/MNNDefine.h>
namespace MNN {
namespace CV {
struct Point {
float fX;
float fY;
void set(float x, float y) {
fX = x;
fY = y;
}
};
/** \struct Rect
Rect holds four float coordinates describing the upper and
lower bounds of a rectangle. Rect may be created from outer bounds or
from position, width, and height. Rect describes an area; if its right
is less than or equal to its left, or if its bottom is less than or equal to
its top, it is considered empty.
*/
struct MNN_PUBLIC Rect {
float fLeft; //!< smaller x-axis bounds
float fTop; //!< smaller y-axis bounds
float fRight; //!< larger x-axis bounds
float fBottom; //!< larger y-axis bounds
/** Returns constructed Rect set to (0, 0, 0, 0).
Many other rectangles are empty; if left is equal to or greater than right,
or if top is equal to or greater than bottom. Setting all members to zero
is a convenience, but does not designate a special empty rectangle.
@return bounds (0, 0, 0, 0)
*/
static constexpr Rect MakeEmpty() {
return Rect{0, 0, 0, 0};
}
#ifdef SK_SUPPORT_LEGACY_RECTMAKELARGEST
/** Deprecated.
*/
static Rect MakeLargest() {
return {SK_ScalarMin, SK_ScalarMin, SK_ScalarMax, SK_ScalarMax};
}
#endif
/** Returns constructed Rect set to float values (0, 0, w, h). Does not
validate input; w or h may be negative.
Passing integer values may generate a compiler warning since Rect cannot
represent 32-bit integers exactly. Use SkIRect for an exact integer rectangle.
@param w float width of constructed Rect
@param h float height of constructed Rect
@return bounds (0, 0, w, h)
*/
static constexpr Rect MakeWH(float w, float h) {
return Rect{0, 0, w, h};
}
/** Returns constructed Rect set to integer values (0, 0, w, h). Does not validate
input; w or h may be negative.
Use to avoid a compiler warning that input may lose precision when stored.
Use SkIRect for an exact integer rectangle.
@param w integer width of constructed Rect
@param h integer height of constructed Rect
@return bounds (0, 0, w, h)
*/
static Rect MakeIWH(int w, int h) {
Rect r;
r.set(0, 0, (float)(w), (float)(h));
return r;
}
/** Returns constructed Rect set to (l, t, r, b). Does not sort input; Rect may
result in fLeft greater than fRight, or fTop greater than fBottom.
@param l float stored in fLeft
@param t float stored in fTop
@param r float stored in fRight
@param b float stored in fBottom
@return bounds (l, t, r, b)
*/
static constexpr Rect MakeLTRB(float l, float t, float r, float b) {
return Rect{l, t, r, b};
}
/** Returns constructed Rect set to (x, y, x + w, y + h). Does not validate input;
w or h may be negative.
@param x stored in fLeft
@param y stored in fTop
@param w added to x and stored in fRight
@param h added to y and stored in fBottom
@return bounds at (x, y) with width w and height h
*/
static constexpr Rect MakeXYWH(float x, float y, float w, float h) {
return Rect{x, y, x + w, y + h};
}
/** Returns true if fLeft is equal to or greater than fRight, or if fTop is equal
to or greater than fBottom. Call sort() to reverse rectangles with negative
width() or height().
@return true if width() or height() are zero or negative
*/
bool isEmpty() const {
// We write it as the NOT of a non-empty rect, so we will return true if any values
// are NaN.
return !(fLeft < fRight && fTop < fBottom);
}
/** Returns true if fLeft is equal to or less than fRight, or if fTop is equal
to or less than fBottom. Call sort() to reverse rectangles with negative
width() or height().
@return true if width() or height() are zero or positive
*/
bool isSorted() const {
return fLeft <= fRight && fTop <= fBottom;
}
/** Returns left edge of Rect, if sorted. Call isSorted() to see if Rect is valid.
Call sort() to reverse fLeft and fRight if needed.
@return fLeft
*/
float x() const {
return fLeft;
}
/** Returns top edge of Rect, if sorted. Call isEmpty() to see if Rect may be invalid,
and sort() to reverse fTop and fBottom if needed.
@return fTop
*/
float y() const {
return fTop;
}
/** Returns left edge of Rect, if sorted. Call isSorted() to see if Rect is valid.
Call sort() to reverse fLeft and fRight if needed.
@return fLeft
*/
float left() const {
return fLeft;
}
/** Returns top edge of Rect, if sorted. Call isEmpty() to see if Rect may be invalid,
and sort() to reverse fTop and fBottom if needed.
@return fTop
*/
float top() const {
return fTop;
}
/** Returns right edge of Rect, if sorted. Call isSorted() to see if Rect is valid.
Call sort() to reverse fLeft and fRight if needed.
@return fRight
*/
float right() const {
return fRight;
}
/** Returns bottom edge of Rect, if sorted. Call isEmpty() to see if Rect may be invalid,
and sort() to reverse fTop and fBottom if needed.
@return fBottom
*/
float bottom() const {
return fBottom;
}
/** Returns span on the x-axis. This does not check if Rect is sorted, or if
result fits in 32-bit float; result may be negative or infinity.
@return fRight minus fLeft
*/
float width() const {
return fRight - fLeft;
}
/** Returns span on the y-axis. This does not check if Rect is sorted, or if
result fits in 32-bit float; result may be negative or infinity.
@return fBottom minus fTop
*/
float height() const {
return fBottom - fTop;
}
/** Returns average of left edge and right edge. Result does not change if Rect
is sorted. Result may overflow to infinity if Rect is far from the origin.
@return midpoint in x
*/
float centerX() const {
// don't use floatHalf(fLeft + fBottom) as that might overflow before the 0.5
return 0.5f * (fLeft) + 0.5f * (fRight);
}
/** Returns average of top edge and bottom edge. Result does not change if Rect
is sorted.
@return midpoint in y
*/
float centerY() const {
// don't use floatHalf(fTop + fBottom) as that might overflow before the 0.5
return 0.5f * (fTop) + 0.5f * (fBottom);
}
/** Sets Rect to (0, 0, 0, 0).
Many other rectangles are empty; if left is equal to or greater than right,
or if top is equal to or greater than bottom. Setting all members to zero
is a convenience, but does not designate a special empty rectangle.
*/
void setEmpty() {
*this = MakeEmpty();
}
/** Sets Rect to (left, top, right, bottom).
left and right are not sorted; left is not necessarily less than right.
top and bottom are not sorted; top is not necessarily less than bottom.
@param left stored in fLeft
@param top stored in fTop
@param right stored in fRight
@param bottom stored in fBottom
*/
void set(float left, float top, float right, float bottom) {
fLeft = left;
fTop = top;
fRight = right;
fBottom = bottom;
}
/** Sets Rect to (left, top, right, bottom).
left and right are not sorted; left is not necessarily less than right.
top and bottom are not sorted; top is not necessarily less than bottom.
@param left stored in fLeft
@param top stored in fTop
@param right stored in fRight
@param bottom stored in fBottom
*/
void setLTRB(float left, float top, float right, float bottom) {
this->set(left, top, right, bottom);
}
/** Sets Rect to (left, top, right, bottom).
All parameters are promoted from integer to scalar.
left and right are not sorted; left is not necessarily less than right.
top and bottom are not sorted; top is not necessarily less than bottom.
@param left promoted to float and stored in fLeft
@param top promoted to float and stored in fTop
@param right promoted to float and stored in fRight
@param bottom promoted to float and stored in fBottom
*/
void iset(int left, int top, int right, int bottom) {
fLeft = (float)(left);
fTop = (float)(top);
fRight = (float)(right);
fBottom = (float)(bottom);
}
/** Sets Rect to (0, 0, width, height).
width and height may be zero or negative. width and height are promoted from
integer to float, large values may lose precision.
@param width promoted to float and stored in fRight
@param height promoted to float and stored in fBottom
*/
void isetWH(int width, int height) {
fLeft = fTop = 0;
fRight = (float)(width);
fBottom = (float)(height);
}
/** Sets Rect to (x, y, x + width, y + height). Does not validate input;
width or height may be negative.
@param x stored in fLeft
@param y stored in fTop
@param width added to x and stored in fRight
@param height added to y and stored in fBottom
*/
void setXYWH(float x, float y, float width, float height) {
fLeft = x;
fTop = y;
fRight = x + width;
fBottom = y + height;
}
/** Sets Rect to (0, 0, width, height). Does not validate input;
width or height may be negative.
@param width stored in fRight
@param height stored in fBottom
*/
void setWH(float width, float height) {
fLeft = 0;
fTop = 0;
fRight = width;
fBottom = height;
}
/** Returns Rect offset by (dx, dy).
If dx is negative, Rect returned is moved to the left.
If dx is positive, Rect returned is moved to the right.
If dy is negative, Rect returned is moved upward.
If dy is positive, Rect returned is moved downward.
@param dx added to fLeft and fRight
@param dy added to fTop and fBottom
@return Rect offset on axes, with original width and height
*/
Rect makeOffset(float dx, float dy) const {
return MakeLTRB(fLeft + dx, fTop + dy, fRight + dx, fBottom + dy);
}
/** Returns Rect, inset by (dx, dy).
If dx is negative, Rect returned is wider.
If dx is positive, Rect returned is narrower.
If dy is negative, Rect returned is taller.
If dy is positive, Rect returned is shorter.
@param dx added to fLeft and subtracted from fRight
@param dy added to fTop and subtracted from fBottom
@return Rect inset symmetrically left and right, top and bottom
*/
Rect makeInset(float dx, float dy) const {
return MakeLTRB(fLeft + dx, fTop + dy, fRight - dx, fBottom - dy);
}
/** Returns Rect, outset by (dx, dy).
If dx is negative, Rect returned is narrower.
If dx is positive, Rect returned is wider.
If dy is negative, Rect returned is shorter.
If dy is positive, Rect returned is taller.
@param dx subtracted to fLeft and added from fRight
@param dy subtracted to fTop and added from fBottom
@return Rect outset symmetrically left and right, top and bottom
*/
Rect makeOutset(float dx, float dy) const {
return MakeLTRB(fLeft - dx, fTop - dy, fRight + dx, fBottom + dy);
}
/** Offsets Rect by adding dx to fLeft, fRight; and by adding dy to fTop, fBottom.
If dx is negative, moves Rect to the left.
If dx is positive, moves Rect to the right.
If dy is negative, moves Rect upward.
If dy is positive, moves Rect downward.
@param dx offset added to fLeft and fRight
@param dy offset added to fTop and fBottom
*/
void offset(float dx, float dy) {
fLeft += dx;
fTop += dy;
fRight += dx;
fBottom += dy;
}
/** Offsets Rect so that fLeft equals newX, and fTop equals newY. width and height
are unchanged.
@param newX stored in fLeft, preserving width()
@param newY stored in fTop, preserving height()
*/
void offsetTo(float newX, float newY) {
fRight += newX - fLeft;
fBottom += newY - fTop;
fLeft = newX;
fTop = newY;
}
/** Insets Rect by (dx, dy).
If dx is positive, makes Rect narrower.
If dx is negative, makes Rect wider.
If dy is positive, makes Rect shorter.
If dy is negative, makes Rect taller.
@param dx added to fLeft and subtracted from fRight
@param dy added to fTop and subtracted from fBottom
*/
void inset(float dx, float dy) {
fLeft += dx;
fTop += dy;
fRight -= dx;
fBottom -= dy;
}
/** Outsets Rect by (dx, dy).
If dx is positive, makes Rect wider.
If dx is negative, makes Rect narrower.
If dy is positive, makes Rect taller.
If dy is negative, makes Rect shorter.
@param dx subtracted to fLeft and added from fRight
@param dy subtracted to fTop and added from fBottom
*/
void outset(float dx, float dy) {
this->inset(-dx, -dy);
}
private:
static bool Intersects(float al, float at, float ar, float ab, float bl, float bt, float br, float bb) {
float L = std::max(al, bl);
float R = std::min(ar, br);
float T = std::max(at, bt);
float B = std::min(ab, bb);
return L < R && T < B;
}
public:
/** Constructs Rect to intersect from (left, top, right, bottom). Does not sort
construction.
Returns true if Rect intersects construction.
Returns false if either construction or Rect is empty, or do not intersect.
@param left x-axis minimum of constructed Rect
@param top y-axis minimum of constructed Rect
@param right x-axis maximum of constructed Rect
@param bottom y-axis maximum of constructed Rect
@return true if construction and Rect have area in common
*/
bool intersects(float left, float top, float right, float bottom) const {
return Intersects(fLeft, fTop, fRight, fBottom, left, top, right, bottom);
}
/** Returns true if Rect intersects r.
Returns false if either r or Rect is empty, or do not intersect.
@param r Rect to intersect
@return true if r and Rect have area in common
*/
bool intersects(const Rect& r) const {
return Intersects(fLeft, fTop, fRight, fBottom, r.fLeft, r.fTop, r.fRight, r.fBottom);
}
/** Returns true if a intersects b.
Returns false if either a or b is empty, or do not intersect.
@param a Rect to intersect
@param b Rect to intersect
@return true if a and b have area in common
*/
static bool Intersects(const Rect& a, const Rect& b) {
return Intersects(a.fLeft, a.fTop, a.fRight, a.fBottom, b.fLeft, b.fTop, b.fRight, b.fBottom);
}
/** Sets Rect to the union of itself and r.
Asserts if r is empty and SK_DEBUG is defined.
If Rect is empty, sets Rect to r.
May produce incorrect results if r is empty.
@param r expansion Rect
*/
void joinNonEmptyArg(const Rect& r) {
MNN_ASSERT(!r.isEmpty());
// if we are empty, just assign
if (fLeft >= fRight || fTop >= fBottom) {
*this = r;
} else {
this->joinPossiblyEmptyRect(r);
}
}
/** Sets Rect to the union of itself and the construction.
May produce incorrect results if Rect or r is empty.
@param r expansion Rect
*/
void joinPossiblyEmptyRect(const Rect& r) {
fLeft = std::min(fLeft, r.left());
fTop = std::min(fTop, r.top());
fRight = std::max(fRight, r.right());
fBottom = std::max(fBottom, r.bottom());
}
/** Returns true if: fLeft <= x < fRight && fTop <= y < fBottom.
Returns false if Rect is empty.
@param x test Point x-coordinate
@param y test Point y-coordinate
@return true if (x, y) is inside Rect
*/
bool contains(float x, float y) const {
return x >= fLeft && x < fRight && y >= fTop && y < fBottom;
}
/** Swaps fLeft and fRight if fLeft is greater than fRight; and swaps
fTop and fBottom if fTop is greater than fBottom. Result may be empty;
and width() and height() will be zero or positive.
*/
void sort() {
using std::swap;
if (fLeft > fRight) {
swap(fLeft, fRight);
}
if (fTop > fBottom) {
swap(fTop, fBottom);
}
}
/** Returns Rect with fLeft and fRight swapped if fLeft is greater than fRight; and
with fTop and fBottom swapped if fTop is greater than fBottom. Result may be empty;
and width() and height() will be zero or positive.
@return sorted Rect
*/
Rect makeSorted() const {
return MakeLTRB(std::min(fLeft, fRight), std::min(fTop, fBottom), std::max(fLeft, fRight),
std::max(fTop, fBottom));
}
/** Returns pointer to first scalar in Rect, to treat it as an array with four
entries.
@return pointer to fLeft
*/
const float* asScalars() const {
return &fLeft;
}
};
} // namespace CV
} // namespace MNN
#endif

View File

@ -1,320 +0,0 @@
//
// Tensor.hpp
// MNN
//
// Created by MNN on 2018/08/14.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_Tensor_hpp
#define MNN_Tensor_hpp
#include <vector>
#include <MNN/HalideRuntime.h>
#include <MNN/MNNDefine.h>
namespace MNN {
/**
* data container.
* data for host tensor is saved in `host` field. its memory is allocated malloc directly.
* data for device tensor is saved in `deviceId` field. its memory is allocated by session's backend.
* usually, device tensors are created by engine (like net, session).
* meanwhile, host tensors could be created by engine or user.
*/
class MNN_PUBLIC Tensor {
public:
struct InsideDescribe;
/** dimension type used to create tensor */
enum DimensionType {
/** for tensorflow net type. uses NHWC as data format. */
TENSORFLOW,
/** for caffe net type. uses NCHW as data format. */
CAFFE,
/** for caffe net type. uses NC4HW4 as data format. */
CAFFE_C4
};
/** handle type */
enum HandleDataType {
/** default handle type */
HANDLE_NONE = 0,
/** string handle type */
HANDLE_STRING = 1
};
/** Tensor map type : Read or Write*/
enum MapType {
/** map Tensor for writing data*/
MAP_TENSOR_WRITE = 0,
MAP_TENSOR_READ = 1
};
public:
/**
* @brief create a tensor with dimension size and type without acquire memory for data.
* @param dimSize dimension size.
* @param type dimension type.
*/
Tensor(int dimSize = 4, DimensionType type = CAFFE);
/**
* @brief create a tensor with same shape as given tensor.
* @param tensor shape provider.
* @param type dimension type.
* @param allocMemory acquire memory for data or not.
* @warning tensor data won't be copied.
*/
Tensor(const Tensor* tensor, DimensionType type = CAFFE, bool allocMemory = true);
/** deinitializer */
~Tensor();
private:
Tensor(bool deepCopy, const Tensor* tensor);
// remove all assignment operator
Tensor(const Tensor& tensor) = delete;
Tensor(const Tensor&& tensor) = delete;
Tensor& operator=(const Tensor&) = delete;
Tensor& operator=(const Tensor&&) = delete;
public:
/**
* @brief create tensor with shape, data type and dimension type.
* @param shape tensor shape.
* @param type data type.
* @param dimType dimension type.
* @return created tensor.
* @warning memory for data won't be acquired. call backend's onAcquireBuffer to get memory ready.
*/
static Tensor* createDevice(const std::vector<int>& shape, halide_type_t type, DimensionType dimType = TENSORFLOW);
/**
* @brief create tensor with shape and dimension type. data type is represented by `T`.
* @param shape tensor shape.
* @param dimType dimension type.
* @return created tensor.
* @warning memory for data won't be acquired. call backend's onAcquireBuffer to get memory ready.
*/
template <typename T>
static Tensor* createDevice(const std::vector<int>& shape, DimensionType dimType = TENSORFLOW) {
return createDevice(shape, halide_type_of<T>(), dimType);
}
/**
* @brief create tensor with shape, data type, data and dimension type.
* @param shape tensor shape.
* @param type data type.
* @param data data to save.
* @param dimType dimension type.
* @return created tensor.
*/
static Tensor* create(const std::vector<int>& shape, halide_type_t type, void* data = NULL,
DimensionType dimType = TENSORFLOW);
/**
* @brief create tensor with shape, data and dimension type. data type is represented by `T`.
* @param shape tensor shape.
* @param data data to save.
* @param dimType dimension type.
* @return created tensor.
*/
template <typename T>
static Tensor* create(const std::vector<int>& shape, void* data = NULL, DimensionType dimType = TENSORFLOW) {
return create(shape, halide_type_of<T>(), data, dimType);
}
/**
* @brief copy tensor.
* @param src tensor
* @param deepCopy whether create new content and copy, currently only support deepCopy = false
*/
static Tensor* clone(const Tensor* src, bool deepCopy = false);
/**
* @brief delete tensor.
* @param src tensor
*/
static void destroy(Tensor* tensor);
public:
/**
* @brief for DEVICE tensor, copy data from given host tensor.
* @param hostTensor host tensor, the data provider.
* @return true for DEVICE tensor, and false for HOST tensor.
*/
bool copyFromHostTensor(const Tensor* hostTensor);
/**
* @brief for DEVICE tensor, copy data to given host tensor.
* @param hostTensor host tensor, the data consumer.
* @return true for DEVICE tensor, and false for HOST tensor.
*/
bool copyToHostTensor(Tensor* hostTensor) const;
/**
* @brief create HOST tensor from DEVICE tensor, with or without data copying.
* @param deviceTensor given device tensor.
* @param copyData copy data or not.
* @return created host tensor.
*/
static Tensor* createHostTensorFromDevice(const Tensor* deviceTensor, bool copyData = true);
public:
const halide_buffer_t& buffer() const {
return mBuffer;
}
halide_buffer_t& buffer() {
return mBuffer;
}
/**
* @brief get dimension type.
* @return dimension type.
*/
DimensionType getDimensionType() const;
/**
* @brief handle data type. used when data type code is halide_type_handle.
* @return handle data type.
*/
HandleDataType getHandleDataType() const;
/**
* @brief set data type.
* @param type data type defined in 'Type_generated.h'.
*/
void setType(int type);
/**
* @brief get data type.
* @return data type.
*/
inline halide_type_t getType() const {
return mBuffer.type;
}
/**
* @brief visit host memory, data type is represented by `T`.
* @return data point in `T` type.
*/
template <typename T>
T* host() const {
return (T*)mBuffer.host;
}
/**
* @brief visit device memory.
* @return device data ID. what the ID means varies between backends.
*/
uint64_t deviceId() const {
return mBuffer.device;
}
public:
int dimensions() const {
return mBuffer.dimensions;
}
/**
* @brief get all dimensions' extent.
* @return dimensions' extent.
*/
std::vector<int> shape() const;
/**
* @brief calculate number of bytes needed to store data taking reordering flag into account.
* @return bytes needed to store data
*/
int size() const;
size_t usize() const;
/**
* @brief calculate number of elements needed to store data taking reordering flag into account.
* @return elements needed to store data
*/
inline int elementSize() const {
return size() / mBuffer.type.bytes();
}
public:
inline int width() const {
if (getDimensionType() == TENSORFLOW) {
return mBuffer.dim[2].extent;
}
return mBuffer.dim[3].extent;
}
inline int height() const {
if (getDimensionType() == TENSORFLOW) {
return mBuffer.dim[1].extent;
}
return mBuffer.dim[2].extent;
}
inline int channel() const {
if (getDimensionType() == TENSORFLOW) {
return mBuffer.dim[3].extent;
}
return mBuffer.dim[1].extent;
}
inline int batch() const {
return mBuffer.dim[0].extent;
}
// visit dimension's extent & stride
inline int stride(int index) const {
return mBuffer.dim[index].stride;
}
inline int length(int index) const {
return mBuffer.dim[index].extent;
}
inline void setStride(int index, int stride) {
mBuffer.dim[index].stride = stride;
}
inline void setLength(int index, int length) {
mBuffer.dim[index].extent = length;
}
/**
* @brief For GPU and Other Device, get memory directly, see MNNSharedContext for detail
* @return Success or not. If type != tensor's backend's type or type is cpu , return false
*/
bool getDeviceInfo(void* dst, int forwardType) const;
public:
/**
* @brief print tensor data. for DEBUG use only.
*/
void print() const;
/**
*@brief print tensor shape
*/
void printShape() const;
public:
/**
* @brief map/umap GPU Tensor, to get host ptr
*/
void* map(MapType mtype, DimensionType dtype);
void unmap(MapType mtype, DimensionType dtype, void* mapPtr);
/**
* @brief wait until the tensor is ready to read / write
* @param mtype wait for read or write
* @param finish wait for command flush or finish
*/
int wait(MapType mtype, bool finish);
/**
* @brief set GPU tensor device ptr, and inform memory type
*/
bool setDevicePtr(const void* devicePtr, int memoryType);
private:
halide_buffer_t mBuffer;
struct InsideDescribe* mDescribe;
private:
friend class TensorUtils;
};
} // namespace MNN
#endif /* Tensor_hpp */

View File

@ -1,27 +0,0 @@
//
// calib3d.hpp
// MNN
//
// Created by MNN on 2022/07/14.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef CALIB3D_HPP
#define CALIB3D_HPP
#include <MNN/MNNDefine.h>
#include <MNN/expr/Expr.hpp>
#include <MNN/expr/NeuralNetWorkOp.hpp>
#include "types.hpp"
namespace MNN {
namespace CV {
MNN_PUBLIC VARP Rodrigues(VARP src);
MNN_PUBLIC std::pair<VARP, VARP> solvePnP(VARP objectPoints, VARP imagePoints, VARP cameraMatrix, VARP distCoeffs,
bool useExtrinsicGuess = false);
} // CV
} // MNN
#endif // CALIB3D_HPP

View File

@ -1,33 +0,0 @@
//
// core.hpp
// MNN
//
// Created by MNN on 2023/04/18.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef CORE_HPP
#define CORE_HPP
#include <MNN/MNNDefine.h>
#include <MNN/expr/Expr.hpp>
#include <MNN/expr/NeuralNetWorkOp.hpp>
#include "types.hpp"
namespace MNN {
namespace CV {
enum DecompTypes {
DECOMP_LU = 0,
DECOMP_SVD = 1,
DECOMP_EIG = 2,
DECOMP_CHOLESKY = 3,
DECOMP_QR = 4,
DECOMP_NORMAL = 16
};
MNN_PUBLIC std::pair<bool, VARP> solve(VARP src1, VARP src2, int flags = DECOMP_LU);
} // CV
} // MNN
#endif // CORE_HPP

View File

@ -1,18 +0,0 @@
//
// cv.hpp
// MNN
//
// Created by MNN on 2021/09/02.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef CV_HPP
#define CV_HPP
#include "types.hpp"
#include "core.hpp"
#include "calib3d.hpp"
#include "imgcodecs.hpp"
#include "imgproc/imgproc.hpp"
#endif // CV_HPP

View File

@ -1,45 +0,0 @@
//
// imgcodecs.hpp
// MNN
//
// Created by MNN on 2021/08/26.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef IMGCODECS_HPP
#define IMGCODECS_HPP
#include <MNN/MNNDefine.h>
#include <MNN/expr/Expr.hpp>
namespace MNN {
namespace CV {
using namespace Express;
enum ImreadModes {
IMREAD_GRAYSCALE = 0, // uint8_t gray
IMREAD_COLOR = 1, // uint8_t bgr
IMREAD_ANYDEPTH = 4, // float bgr
};
enum ImwriteFlags {
IMWRITE_JPEG_QUALITY = 1, // jpg, default is 95
};
MNN_PUBLIC bool haveImageReader(const std::string& filename);
MNN_PUBLIC bool haveImageWriter(const std::string& filename);
MNN_PUBLIC VARP imdecode(const std::vector<uint8_t>& buf, int flags);
MNN_PUBLIC std::pair<bool, std::vector<uint8_t>> imencode(std::string ext, VARP img,
const std::vector<int>& params = std::vector<int>());
MNN_PUBLIC VARP imread(const std::string& filename, int flags = IMREAD_COLOR);
MNN_PUBLIC bool imwrite(const std::string& filename, VARP img,
const std::vector<int>& params = std::vector<int>());
} // CV
} // MNN
#endif // IMGCODECS_HPP

View File

@ -1,200 +0,0 @@
//
// color.hpp
// MNN
//
// Created by MNN on 2021/08/18.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef COLOR_HPP
#define COLOR_HPP
#include <MNN/MNNDefine.h>
#include <MNN/expr/Expr.hpp>
#include "../types.hpp"
namespace MNN {
namespace CV {
using namespace Express;
enum ColorConversionCodes {
COLOR_BGR2BGRA = 0,
COLOR_RGB2RGBA = COLOR_BGR2BGRA,
COLOR_BGRA2BGR = 1,
COLOR_RGBA2RGB = COLOR_BGRA2BGR,
COLOR_BGR2RGBA = 2,
COLOR_RGB2BGRA = COLOR_BGR2RGBA,
COLOR_RGBA2BGR = 3,
COLOR_BGRA2RGB = COLOR_RGBA2BGR,
COLOR_BGR2RGB = 4,
COLOR_RGB2BGR = COLOR_BGR2RGB,
COLOR_BGRA2RGBA = 5,
COLOR_RGBA2BGRA = COLOR_BGRA2RGBA,
COLOR_BGR2GRAY = 6,
COLOR_RGB2GRAY = 7,
COLOR_GRAY2BGR = 8,
COLOR_GRAY2RGB = COLOR_GRAY2BGR,
COLOR_GRAY2BGRA = 9,
COLOR_GRAY2RGBA = COLOR_GRAY2BGRA,
COLOR_BGRA2GRAY = 10,
COLOR_RGBA2GRAY = 11,
COLOR_BGR2BGR565 = 12,
COLOR_RGB2BGR565 = 13,
COLOR_BGR5652BGR = 14,
COLOR_BGR5652RGB = 15,
COLOR_BGRA2BGR565 = 16,
COLOR_RGBA2BGR565 = 17,
COLOR_BGR5652BGRA = 18,
COLOR_BGR5652RGBA = 19,
COLOR_GRAY2BGR565 = 20,
COLOR_BGR5652GRAY = 21,
COLOR_BGR2BGR555 = 22,
COLOR_RGB2BGR555 = 23,
COLOR_BGR5552BGR = 24,
COLOR_BGR5552RGB = 25,
COLOR_BGRA2BGR555 = 26,
COLOR_RGBA2BGR555 = 27,
COLOR_BGR5552BGRA = 28,
COLOR_BGR5552RGBA = 29,
COLOR_GRAY2BGR555 = 30,
COLOR_BGR5552GRAY = 31,
COLOR_BGR2XYZ = 32,
COLOR_RGB2XYZ = 33,
COLOR_XYZ2BGR = 34,
COLOR_XYZ2RGB = 35,
COLOR_BGR2YCrCb = 36,
COLOR_RGB2YCrCb = 37,
COLOR_YCrCb2BGR = 38,
COLOR_YCrCb2RGB = 39,
COLOR_BGR2HSV = 40,
COLOR_RGB2HSV = 41,
COLOR_BGR2Lab = 44,
COLOR_RGB2Lab = 45,
COLOR_BGR2Luv = 50,
COLOR_RGB2Luv = 51,
COLOR_BGR2HLS = 52,
COLOR_RGB2HLS = 53,
COLOR_HSV2BGR = 54,
COLOR_HSV2RGB = 55,
COLOR_Lab2BGR = 56,
COLOR_Lab2RGB = 57,
COLOR_Luv2BGR = 58,
COLOR_Luv2RGB = 59,
COLOR_HLS2BGR = 60,
COLOR_HLS2RGB = 61,
COLOR_BGR2HSV_FULL = 66,
COLOR_RGB2HSV_FULL = 67,
COLOR_BGR2HLS_FULL = 68,
COLOR_RGB2HLS_FULL = 69,
COLOR_HSV2BGR_FULL = 70,
COLOR_HSV2RGB_FULL = 71,
COLOR_HLS2BGR_FULL = 72,
COLOR_HLS2RGB_FULL = 73,
COLOR_LBGR2Lab = 74,
COLOR_LRGB2Lab = 75,
COLOR_LBGR2Luv = 76,
COLOR_LRGB2Luv = 77,
COLOR_Lab2LBGR = 78,
COLOR_Lab2LRGB = 79,
COLOR_Luv2LBGR = 80,
COLOR_Luv2LRGB = 81,
COLOR_BGR2YUV = 82,
COLOR_RGB2YUV = 83,
COLOR_YUV2BGR = 84,
COLOR_YUV2RGB = 85,
COLOR_YUV2RGB_NV12 = 90,
COLOR_YUV2BGR_NV12 = 91,
COLOR_YUV2RGB_NV21 = 92,
COLOR_YUV2BGR_NV21 = 93,
COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21,
COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21,
COLOR_YUV2RGBA_NV12 = 94,
COLOR_YUV2BGRA_NV12 = 95,
COLOR_YUV2RGBA_NV21 = 96,
COLOR_YUV2BGRA_NV21 = 97,
COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21,
COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21,
COLOR_YUV2RGB_YV12 = 98,
COLOR_YUV2BGR_YV12 = 99,
COLOR_YUV2RGB_IYUV = 100,
COLOR_YUV2BGR_IYUV = 101,
COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV,
COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV,
COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12,
COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12,
COLOR_YUV2RGBA_YV12 = 102,
COLOR_YUV2BGRA_YV12 = 103,
COLOR_YUV2RGBA_IYUV = 104,
COLOR_YUV2BGRA_IYUV = 105,
COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV,
COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV,
COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12,
COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12,
COLOR_YUV2GRAY_420 = 106,
COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420,
COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420,
COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420,
COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420,
COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420,
COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420,
COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420,
COLOR_YUV2RGB_UYVY = 107,
COLOR_YUV2BGR_UYVY = 108,
COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY,
COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY,
COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY,
COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY,
COLOR_YUV2RGBA_UYVY = 111,
COLOR_YUV2BGRA_UYVY = 112,
COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY,
COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY,
COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY,
COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY,
COLOR_YUV2RGB_YUY2 = 115,
COLOR_YUV2BGR_YUY2 = 116,
COLOR_YUV2RGB_YVYU = 117,
COLOR_YUV2BGR_YVYU = 118,
COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2,
COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2,
COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2,
COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2,
COLOR_YUV2RGBA_YUY2 = 119,
COLOR_YUV2BGRA_YUY2 = 120,
COLOR_YUV2RGBA_YVYU = 121,
COLOR_YUV2BGRA_YVYU = 122,
COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2,
COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2,
COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2,
COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2,
COLOR_YUV2GRAY_UYVY = 123,
COLOR_YUV2GRAY_YUY2 = 124,
COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY,
COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY,
COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2,
COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2,
COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2,
COLOR_RGBA2mRGBA = 125,
COLOR_mRGBA2RGBA = 126,
COLOR_RGB2YUV_I420 = 127,
COLOR_BGR2YUV_I420 = 128,
COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420,
COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420,
COLOR_RGBA2YUV_I420 = 129,
COLOR_BGRA2YUV_I420 = 130,
COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420,
COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420,
COLOR_RGB2YUV_YV12 = 131,
COLOR_BGR2YUV_YV12 = 132,
COLOR_RGBA2YUV_YV12 = 133,
COLOR_BGRA2YUV_YV12 = 134,
COLOR_COLORCVT_MAX = 143
};
MNN_PUBLIC VARP cvtColor(VARP src, int code, int dstCn = 0);
MNN_PUBLIC VARP cvtColorTwoPlane(VARP src1, VARP src2, int code);
MNN_PUBLIC VARP demosaicing(VARP src, int code, int dstCn = 0);
} // CV
} // MNN
#endif // COLOR_HPP

View File

@ -1,49 +0,0 @@
//
// draw.hpp
// MNN
//
// Created by MNN on 2021/08/26.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef DRAW_HPP
#define DRAW_HPP
#include <MNN/MNNDefine.h>
#include <MNN/expr/Expr.hpp>
#include <MNN/expr/NeuralNetWorkOp.hpp>
#include "../types.hpp"
namespace MNN {
namespace CV {
enum LineTypes {
FILLED = -1,
LINE_4 = 4,
LINE_8 = 8,
LINE_AA = 16
};
MNN_PUBLIC void arrowedLine(VARP& img, Point pt1, Point pt2, const Scalar& color,
int thickness=1, int line_type=8, int shift=0, double tipLength=0.1);
MNN_PUBLIC void circle(VARP& img, Point center, int radius, const Scalar& color,
int thickness=1, int line_type=8, int shift=0);
MNN_PUBLIC void ellipse(VARP& img, Point center, Size axes, double angle,
double start_angle, double end_angle, const Scalar& color,
int thickness=1, int line_type=8, int shift=0);
MNN_PUBLIC void line(VARP& img, Point pt1, Point pt2, const Scalar& color,
int thickness = 1, int lineType = LINE_8, int shift = 0);
MNN_PUBLIC void rectangle(VARP& img, Point pt1, Point pt2, const Scalar& color,
int thickness = 1, int lineType = LINE_8, int shift = 0);
MNN_PUBLIC void drawContours(VARP& img, std::vector<std::vector<Point>> _contours, int contourIdx, const Scalar& color,
int thickness = 1, int lineType = LINE_8);
MNN_PUBLIC void fillPoly(VARP& img, std::vector<std::vector<Point>> pts, const Scalar& color,
int line_type = LINE_8, int shift = 0, Point offset = {0, 0});
} // CV
} // MNN
#endif // DRAW_HPP

View File

@ -1,74 +0,0 @@
//
// filter.hpp
// MNN
//
// Created by MNN on 2021/08/18.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef FILTER_HPP
#define FILTER_HPP
#include <MNN/MNNDefine.h>
#include <MNN/expr/Expr.hpp>
#include <MNN/expr/MathOp.hpp>
#include <MNN/expr/NeuralNetWorkOp.hpp>
#include "../types.hpp"
namespace MNN {
namespace CV {
MNN_PUBLIC VARP bilateralFilter(VARP src, int d, double sigmaColor, double sigmaSpace,
int borderType = REFLECT);
MNN_PUBLIC VARP blur(VARP src, Size ksize, int borderType = REFLECT);
MNN_PUBLIC VARP boxFilter(VARP src, int ddepth, Size ksize,
bool normalize = true, int borderType = REFLECT);
MNN_PUBLIC VARP dilate(VARP src, VARP kernel,
int iterations = 1, int borderType = CONSTANT);
MNN_PUBLIC VARP erode(VARP src, VARP kernel,
int iterations = 1, int borderType = CONSTANT);
MNN_PUBLIC VARP filter2D(VARP src, int ddepth, VARP kernel,
double delta = 0, int borderType = REFLECT);
MNN_PUBLIC VARP GaussianBlur(VARP src, Size ksize, double sigmaX,
double sigmaY = 0, int borderType = REFLECT);
MNN_PUBLIC std::pair<VARP, VARP> getDerivKernels(int dx, int dy, int ksize,
bool normalize = false);
MNN_PUBLIC VARP getGaborKernel(Size ksize, double sigma, double theta, double lambd,
double gamma, double psi = MNN_PI * 0.5);
MNN_PUBLIC VARP getGaussianKernel(int n, double sigma);
MNN_PUBLIC VARP getStructuringElement(int shape, Size ksize);
MNN_PUBLIC VARP Laplacian(VARP src, int ddepth, int ksize = 1,
double scale = 1, double delta = 0, int borderType = REFLECT);
MNN_PUBLIC VARP pyrDown(VARP src, Size dstsize = {}, int borderType = REFLECT);
MNN_PUBLIC VARP pyrUp(VARP src, Size dstsize = {}, int borderType = REFLECT);
MNN_PUBLIC VARP Scharr(VARP src, int ddepth, int dx, int dy,
double scale = 1, double delta = 0, int borderType = REFLECT);
MNN_PUBLIC VARP sepFilter2D(VARP src, int ddepth, VARP& kernelX, VARP& kernelY,
double delta = 0, int borderType = REFLECT);
MNN_PUBLIC VARP Sobel(VARP src, int ddepth, int dx, int dy, int ksize = 3,
double scale = 1, double delta = 0, int borderType = REFLECT);
MNN_PUBLIC std::pair<VARP, VARP> spatialGradient(VARP src, int ksize = 3,
int borderType = REFLECT);
MNN_PUBLIC VARP sqrBoxFilter(VARP src, int ddepth, Size ksize,
bool normalize = true, int borderType = REFLECT);
} // CV
} // MNN
#endif // FILTER_HPP

View File

@ -1,75 +0,0 @@
//
// geometric.hpp
// MNN
//
// Created by MNN on 2021/08/19.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef GEOMETRIC_HPP
#define GEOMETRIC_HPP
#include <MNN/MNNDefine.h>
#include <MNN/expr/Expr.hpp>
#include <MNN/ImageProcess.hpp>
#include "../types.hpp"
namespace MNN {
namespace CV {
enum InterpolationFlags {
INTER_NEAREST = 0,
INTER_LINEAR = 1,
INTER_CUBIC = 2,
INTER_AREA = 3,
INTER_LANCZOS4 = 4,
INTER_LINEAR_EXACT = 5,
INTER_NEAREST_EXACT = 6,
INTER_MAX = 7,
WARP_FILL_OUTLIERS = 8,
WARP_INVERSE_MAP = 16
};
enum BorderTypes {
BORDER_CONSTANT = 0,
BORDER_REPLICATE = 1,
BORDER_REFLECT = 2,
BORDER_WRAP = 3,
BORDER_REFLECT_101 = 4,
BORDER_TRANSPARENT = 5,
BORDER_REFLECT101 = BORDER_REFLECT_101,
BORDER_DEFAULT = BORDER_REFLECT_101,
BORDER_ISOLATED = 16
};
MNN_PUBLIC std::pair<VARP, VARP> convertMaps(VARP map1, VARP map2, int dstmap1type,
bool interpolation = false);
MNN_PUBLIC Matrix getAffineTransform(const Point src[], const Point dst[]);
MNN_PUBLIC Matrix getPerspectiveTransform(const Point src[], const Point dst[]);
MNN_PUBLIC VARP getRectSubPix(VARP image, Size patchSize, Point center);
MNN_PUBLIC Matrix getRotationMatrix2D(Point center, double angle, double scale);
MNN_PUBLIC Matrix invertAffineTransform(Matrix M);
MNN_PUBLIC VARP remap(VARP src, VARP map1, VARP map2, int interpolation, int borderMode = BORDER_CONSTANT, int borderValue = 0);
MNN_PUBLIC VARP resize(VARP src, Size dsize, double fx = 0, double fy = 0,
int interpolation = INTER_LINEAR, int code = -1,
std::vector<float> mean = {}, std::vector<float> norm = {});
MNN_PUBLIC VARP warpAffine(VARP src, Matrix M, Size dsize,
int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT, int borderValue = 0,
int code = -1, std::vector<float> mean = {}, std::vector<float> norm = {});
MNN_PUBLIC VARP warpPerspective(VARP src, Matrix M, Size dsize,
int flags = INTER_LINEAR, int borderMode = BORDER_CONSTANT,
int borderValue = 0);
MNN_PUBLIC VARP undistortPoints(VARP src, VARP cameraMatrix, VARP distCoeffs);
} // CV
} // MNN
#endif // GEOMETRIC_HPP

View File

@ -1,24 +0,0 @@
//
// histograms.hpp
// MNN
//
// Created by MNN on 2022/07/26.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef HISTOGRAMS_HPP
#define HISTOGRAMS_HPP
#include <MNN/MNNDefine.h>
#include <MNN/expr/Expr.hpp>
namespace MNN {
namespace CV {
using namespace Express;
MNN_PUBLIC VARP calcHist(VARPS images, const std::vector<int>& channels, VARP mask,
const std::vector<int>& histSize, const std::vector<float>& ranges, bool accumulate = false);
} // CV
} // MNN
#endif // HISTOGRAMS_HPP

View File

@ -1,20 +0,0 @@
//
// imgproc.hpp
// MNN
//
// Created by MNN on 2021/08/13.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef IMGPROC_HPP
#define IMGPROC_HPP
#include "filter.hpp"
#include "geometric.hpp"
#include "draw.hpp"
#include "miscellaneous.hpp"
#include "color.hpp"
#include "structural.hpp"
#include "histograms.hpp"
#endif // IMGPROC_HPP

View File

@ -1,49 +0,0 @@
//
// miscellaneous.hpp
// MNN
//
// Created by MNN on 2021/08/20.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MISCELLANEOUS_HPP
#define MISCELLANEOUS_HPP
#include <MNN/MNNDefine.h>
#include <MNN/expr/Expr.hpp>
namespace MNN {
namespace CV {
using namespace Express;
enum AdaptiveThresholdTypes {
ADAPTIVE_THRESH_MEAN_C = 0,
ADAPTIVE_THRESH_GAUSSIAN_C = 1
};
enum ThresholdTypes {
THRESH_BINARY = 0,
THRESH_BINARY_INV = 1,
THRESH_TRUNC = 2,
THRESH_TOZERO = 3,
THRESH_TOZERO_INV = 4,
THRESH_MASK = 7,
THRESH_OTSU = 8,
THRESH_TRIANGLE = 16
};
MNN_PUBLIC VARP adaptiveThreshold(VARP src, double maxValue, int adaptiveMethod, int thresholdType, int blockSize, double C);
MNN_PUBLIC VARP blendLinear(VARP src1, VARP src2, VARP weight1, VARP weight2);
MNN_PUBLIC void distanceTransform(VARP src, VARP& dst, VARP& labels, int distanceType, int maskSize, int labelType = 0);
MNN_PUBLIC int floodFill(VARP image, std::pair<int, int> seedPoint, float newVal);
MNN_PUBLIC VARP integral(VARP src, int sdepth = -1);
MNN_PUBLIC VARP threshold(VARP src, double thresh, double maxval, int type);
} // CV
} // MNN
#endif // MISCELLANEOUS_HPP

View File

@ -1,56 +0,0 @@
//
// structural.hpp
// MNN
//
// Created by MNN on 2021/12/01.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef STRUCTURAL_HPP
#define STRUCTURAL_HPP
#include <MNN/MNNDefine.h>
#include "../types.hpp"
namespace MNN {
namespace CV {
enum RetrievalModes {
RETR_EXTERNAL = 0,
RETR_LIST = 1,
RETR_CCOMP = 2,
RETR_TREE = 3,
RETR_FLOODFILL = 4
};
enum ContourApproximationModes {
CHAIN_APPROX_NONE = 1,
CHAIN_APPROX_SIMPLE = 2,
CHAIN_APPROX_TC89_L1 = 3,
CHAIN_APPROX_TC89_KCOS = 4
};
class RotatedRect
{
public:
//! default constructor
RotatedRect() {}
//! returns the rectangle mass center
Point2f center;
//! returns width and height of the rectangle
Size2f size;
//! returns the rotation angle. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle.
float angle;
};
typedef std::vector<Point> POINTS;
MNN_PUBLIC std::vector<VARP> findContours(VARP image, int mode, int method, Point offset = {0, 0});
MNN_PUBLIC double contourArea(VARP _contour, bool oriented = false);
MNN_PUBLIC std::vector<int> convexHull(VARP _points, bool clockwise = false, bool returnPoints = true);
MNN_PUBLIC RotatedRect minAreaRect(VARP _points);
MNN_PUBLIC Rect2i boundingRect(VARP points);
MNN_PUBLIC int connectedComponentsWithStats(VARP image, VARP& labels, VARP& statsv, VARP& centroids, int connectivity = 8);
MNN_PUBLIC VARP boxPoints(RotatedRect box);
} // CV
} // MNN
#endif // STRUCTURAL_HPP

View File

@ -1,454 +0,0 @@
//
// types.hpp
// MNN
//
// Created by MNN on 2021/08/18.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef TYPES_HPP
#define TYPES_HPP
#include <MNN/expr/Expr.hpp>
namespace MNN {
namespace CV {
using namespace Express;
#define MNN_PI 3.1415926535897932384626433832795
typedef signed char schar;
typedef unsigned char uchar;
// Size Start
template<typename _Tp> class Size_
{
public:
typedef _Tp value_type;
//! default constructor
Size_();
Size_(_Tp _width, _Tp _height);
Size_(const Size_& sz);
Size_(Size_&& sz);
Size_& operator = (const Size_& sz);
Size_& operator = (Size_&& sz);
//! the area (width*height)
_Tp area() const;
//! aspect ratio (width/height)
double aspectRatio() const;
//! true if empty
bool empty() const;
//! conversion of another data type.
template<typename _Tp2> operator Size_<_Tp2>() const;
_Tp width; //!< the width
_Tp height; //!< the height
};
typedef Size_<int> Size2i;
typedef Size_<int64_t> Size2l;
typedef Size_<float> Size2f;
typedef Size_<double> Size2d;
typedef Size2i Size;
template<typename _Tp> inline
Size_<_Tp>::Size_()
: width(0), height(0) {}
template<typename _Tp> inline
Size_<_Tp>::Size_(_Tp _width, _Tp _height)
: width(_width), height(_height) {}
template<typename _Tp> inline
Size_<_Tp>::Size_(const Size_& sz)
: width(sz.width), height(sz.height) {}
template<typename _Tp> inline
Size_<_Tp>::Size_(Size_&& sz)
: width(std::move(sz.width)), height(std::move(sz.height)) {}
template<typename _Tp> template<typename _Tp2> inline
Size_<_Tp>::operator Size_<_Tp2>() const
{
return Size_<_Tp2>(static_cast<_Tp2>(width), static_cast<_Tp2>(height));
}
template<typename _Tp> inline
Size_<_Tp>& Size_<_Tp>::operator = (const Size_<_Tp>& sz)
{
width = sz.width; height = sz.height;
return *this;
}
template<typename _Tp> inline
Size_<_Tp>& Size_<_Tp>::operator = (Size_<_Tp>&& sz)
{
width = std::move(sz.width); height = std::move(sz.height);
return *this;
}
template<typename _Tp> inline
_Tp Size_<_Tp>::area() const
{
return width * height;
}
template<typename _Tp> inline
bool Size_<_Tp>::empty() const
{
return width <= 0 || height <= 0;
}
template<typename _Tp> static inline
Size_<_Tp>& operator *= (Size_<_Tp>& a, _Tp b)
{
a.width *= b;
a.height *= b;
return a;
}
template<typename _Tp> static inline
Size_<_Tp> operator * (const Size_<_Tp>& a, _Tp b)
{
Size_<_Tp> tmp(a);
tmp *= b;
return tmp;
}
template<typename _Tp> static inline
Size_<_Tp>& operator /= (Size_<_Tp>& a, _Tp b)
{
a.width /= b;
a.height /= b;
return a;
}
template<typename _Tp> static inline
Size_<_Tp> operator / (const Size_<_Tp>& a, _Tp b)
{
Size_<_Tp> tmp(a);
tmp /= b;
return tmp;
}
template<typename _Tp> static inline
Size_<_Tp>& operator += (Size_<_Tp>& a, const Size_<_Tp>& b)
{
a.width += b.width;
a.height += b.height;
return a;
}
template<typename _Tp> static inline
Size_<_Tp> operator + (const Size_<_Tp>& a, const Size_<_Tp>& b)
{
Size_<_Tp> tmp(a);
tmp += b;
return tmp;
}
template<typename _Tp> static inline
Size_<_Tp>& operator -= (Size_<_Tp>& a, const Size_<_Tp>& b)
{
a.width -= b.width;
a.height -= b.height;
return a;
}
template<typename _Tp> static inline
Size_<_Tp> operator - (const Size_<_Tp>& a, const Size_<_Tp>& b)
{
Size_<_Tp> tmp(a);
tmp -= b;
return tmp;
}
template<typename _Tp> static inline
bool operator == (const Size_<_Tp>& a, const Size_<_Tp>& b)
{
return a.width == b.width && a.height == b.height;
}
template<typename _Tp> static inline
bool operator != (const Size_<_Tp>& a, const Size_<_Tp>& b)
{
return !(a == b);
}
// Size End
// Point Start
template<typename _Tp> class Point_
{
public:
typedef _Tp value_type;
//! default constructor
Point_();
Point_(_Tp _x, _Tp _y);
Point_(const Point_& pt);
Point_(Point_&& pt);
Point_(const Size_<_Tp>& sz);
Point_& operator = (const Point_& pt);
Point_& operator = (Point_&& pt);
template<typename _Tp2> operator Point_<_Tp2>() const;
_Tp x; //!< x coordinate of the point
_Tp y; //!< y coordinate of the point
};
typedef Point_<int> Point2i;
typedef Point_<int64_t> Point2l;
typedef Point_<float> Point2f;
typedef Point_<double> Point2d;
template<typename _Tp> inline
Point_<_Tp>::Point_()
: x(0), y(0) {}
template<typename _Tp> inline
Point_<_Tp>::Point_(_Tp _x, _Tp _y)
: x(_x), y(_y) {}
template<typename _Tp> inline
Point_<_Tp>::Point_(const Point_& pt)
: x(pt.x), y(pt.y) {}
template<typename _Tp> inline
Point_<_Tp>::Point_(Point_&& pt)
: x(std::move(pt.x)), y(std::move(pt.y)) {}
template<typename _Tp> inline
Point_<_Tp>::Point_(const Size_<_Tp>& sz)
: x(sz.width), y(sz.height) {}
template<typename _Tp> inline
Point_<_Tp>& Point_<_Tp>::operator = (const Point_& pt)
{
x = pt.x; y = pt.y;
return *this;
}
template<typename _Tp> inline
Point_<_Tp>& Point_<_Tp>::operator = (Point_&& pt)
{
x = std::move(pt.x); y = std::move(pt.y);
return *this;
}
template<typename _Tp> template<typename _Tp2> inline
Point_<_Tp>::operator Point_<_Tp2>() const
{
return Point_<_Tp2>(static_cast<_Tp2>(x), static_cast<_Tp2>(y));
}
template<typename _Tp> static inline
Point_<_Tp>& operator += (Point_<_Tp>& a, const Point_<_Tp>& b)
{
a.x += b.x;
a.y += b.y;
return a;
}
template<typename _Tp> static inline
Point_<_Tp> operator - (const Point_<_Tp>& a, const Point_<_Tp>& b)
{
return Point_<_Tp>( static_cast<_Tp>(a.x - b.x), static_cast<_Tp>(a.y - b.y) );
}
template<typename _Tp> static inline
bool operator != (const Point_<_Tp>& a, const Point_<_Tp>& b)
{
return a.x != b.x || a.y != b.y;
}
// Point End
// Rect Start
template<typename _Tp> class Rect_
{
public:
typedef _Tp value_type;
//! default constructor
Rect_();
Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height);
Rect_(const Rect_& r);
Rect_(Rect_&& r);
Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz);
Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2);
Rect_& operator = ( const Rect_& r );
Rect_& operator = ( Rect_&& r );
//! the top-left corner
Point_<_Tp> tl() const;
//! the bottom-right corner
Point_<_Tp> br() const;
//! size (width, height) of the rectangle
Size_<_Tp> size() const;
//! area (width*height) of the rectangle
_Tp area() const;
//! true if empty
bool empty() const;
_Tp x; //!< x coordinate of the top-left corner
_Tp y; //!< y coordinate of the top-left corner
_Tp width; //!< width of the rectangle
_Tp height; //!< height of the rectangle
};
typedef Rect_<int> Rect2i;
typedef Rect_<float> Rect2f;
typedef Rect_<double> Rect2d;
template<typename _Tp> inline
Rect_<_Tp>::Rect_()
: x(0), y(0), width(0), height(0) {}
template<typename _Tp> inline
Rect_<_Tp>::Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height)
: x(_x), y(_y), width(_width), height(_height) {}
template<typename _Tp> inline
Rect_<_Tp>::Rect_(const Rect_<_Tp>& r)
: x(r.x), y(r.y), width(r.width), height(r.height) {}
template<typename _Tp> inline
Rect_<_Tp>::Rect_(Rect_<_Tp>&& r)
: x(std::move(r.x)), y(std::move(r.y)), width(std::move(r.width)), height(std::move(r.height)) {}
template<typename _Tp> inline
Rect_<_Tp>::Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz)
: x(org.x), y(org.y), width(sz.width), height(sz.height) {}
template<typename _Tp> inline
Rect_<_Tp>::Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2)
{
x = std::min(pt1.x, pt2.x);
y = std::min(pt1.y, pt2.y);
width = std::max(pt1.x, pt2.x) - x;
height = std::max(pt1.y, pt2.y) - y;
}
template<typename _Tp> inline
Rect_<_Tp>& Rect_<_Tp>::operator = ( const Rect_<_Tp>& r )
{
x = r.x;
y = r.y;
width = r.width;
height = r.height;
return *this;
}
template<typename _Tp> inline
Rect_<_Tp>& Rect_<_Tp>::operator = ( Rect_<_Tp>&& r )
{
x = std::move(r.x);
y = std::move(r.y);
width = std::move(r.width);
height = std::move(r.height);
return *this;
}
template<typename _Tp> inline
Point_<_Tp> Rect_<_Tp>::tl() const
{
return Point_<_Tp>(x,y);
}
template<typename _Tp> inline
Point_<_Tp> Rect_<_Tp>::br() const
{
return Point_<_Tp>(x + width, y + height);
}
template<typename _Tp> inline
Size_<_Tp> Rect_<_Tp>::size() const
{
return Size_<_Tp>(width, height);
}
template<typename _Tp> inline
_Tp Rect_<_Tp>::area() const
{
const _Tp result = width * height;
return result;
}
template<typename _Tp> inline
bool Rect_<_Tp>::empty() const
{
return width <= 0 || height <= 0;
}
// Rect
// Scalar Start
template<typename _Tp> class Scalar_ {
public:
//! default constructor
Scalar_();
Scalar_(_Tp _r, _Tp _g, _Tp _b) {
val[0] = _r;
val[1] = _g;
val[2] = _b;
val[3] = 255;
};
Scalar_(_Tp _r, _Tp _g, _Tp _b, _Tp _a) {
val[0] = _r;
val[1] = _g;
val[2] = _b;
val[3] = _a;
};
_Tp val[4];
};
typedef Scalar_<double> Scalar;
// Scalar End
static void getVARPSize(VARP var, int* height, int* width, int* channel) {
auto info = var->getInfo();
if (!info) {
return;
}
auto dims = info->dim;
int num = dims.size();
if (num < 2) return;
if (num == 2) {
*height = dims[0];
*width = dims[1];
*channel = 1;
} else if (num == 3) {
*height = dims[0];
*width = dims[1];
*channel = dims[2];
} else if (info->order == NHWC) {
*channel = dims[num - 1];
*width = dims[num - 2];
*height = dims[num - 3];
} else { // NCHW
*width = dims[num - 1];
*height = dims[num - 2];
*channel = dims[num - 3];
}
}
static int getVARPHeight(VARP var) {
int h, w, c;
getVARPSize(var, &h, &w, &c);
return h;
}
static int getVARPWidth(VARP var) {
int h, w, c;
getVARPSize(var, &h, &w, &c);
return w;
}
static int getVARPChannel(VARP var) {
int h, w, c;
getVARPSize(var, &h, &w, &c);
return c;
}
static int getVARPByte(VARP var) {
return var->getInfo()->type.bytes();
}
} // CV
} // MNN
#endif // TYPES_HPP

View File

@ -1,155 +0,0 @@
//
// Executor.hpp
// MNN
//
// Created by MNN on 2019/07/25.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_Executor_hpp
#define MNN_Executor_hpp
#include <MNN/ErrorCode.hpp>
#include <MNN/expr/Expr.hpp>
#include <MNN/Tensor.hpp>
#include <MNN/Interpreter.hpp>
#include <vector>
#include <mutex>
#include <set>
#include <MNN/MNNForwardType.h>
namespace MNN {
class Backend;
class Execution;
class Runtime;
struct Op;
namespace Express {
struct RuntimeAttr;
struct ExecutorAttr;
class MNN_PUBLIC Executor {
public:
class ComputeCache;
struct DebugTools;
/**Internal Usage Begin*/
struct Requirement {
std::vector<bool> contentNeedContent;
std::vector<bool> shapeNeedContent;
};
~Executor();
Requirement getRequirement(Expr* expr) const;
ErrorCode computeInfo(Expr* expr);
void makeCache(const std::vector<EXPRP>& expr, bool forceCPU = false);
/**Internal Usage End*/
bool lazyEval = true;
enum LazyMode {
LAZY_FULL = 0,
// Don't compute content until user needed.
LAZY_CONTENT = 1 << 0,
// Expr can only compute once, it can reduce the create cost of expr
LAZY_COMPUTE_ONCE = 1 << 1,
};
uint32_t getLazyMode() const {
return mLazyMode;
}
void setLazyComputeMode(uint32_t mode);
void setGlobalExecutorConfig(MNNForwardType type, const BackendConfig& config, int numberThread);
int getCurrentRuntimeStatus(RuntimeStatus statusEnum);
enum GCFlag {
FULL,
PART
};
void gc(GCFlag flag = FULL);
static std::shared_ptr<Executor> getGlobalExecutor();
static std::shared_ptr<Executor> newExecutor(MNNForwardType type,
const BackendConfig& config,
int numberThread);
void resetProfile();
void dumpProfile();
struct SubGraph;
bool registerSubGraph(const std::string& submoduleName, VARPS outputs, VARPS inputs);
std::shared_ptr<SubGraph> findSubGraph(const std::string& submoduleName);
static RuntimeInfo getRuntime();
void setCallBack(TensorCallBackWithInfo&& before, TensorCallBackWithInfo&& after);
const DebugTools* getDebugTools() const {
return mDebug.get();
}
ExecutorAttr* getAttr() const;
class MNN_PUBLIC RuntimeManager {
public:
~RuntimeManager();
/**
* @param configs : schedule configs.
* @param cacheName : full path for cache file. Note: should choose location for reading and writing.
*/
static RuntimeManager* createRuntimeManager(const ScheduleConfig& config);
/**
* @param rtmgr : the rtmgr to destroy
*/
static void destroy(RuntimeManager* rtmgr);
/**
* Deceperate, the same as createRuntimeManager(configs[0])
* @param configs : schedule configs.
* @param cacheName : full path for cache file. Note: should choose location for reading and writing.
*/
static RuntimeManager* createRuntimeManager(std::vector<ScheduleConfig>& configs);
/**
* @brief set cache file. when file not exist -- create it, when file exist -- load it.
* When should use : When choose GPU backend or use AUTO backend.
* Calling Position: calling after createRuntimeManager.
*/
void setCache(std::string cacheName);
/**
* @brief set the path of external files or directory
* @param path -- The path of a file or directory on disk
* @param type -- Type of the external path (see "enum ExternalPathType" in Interpreter.hpp)
*/
void setExternalPath(std::string path, int type);
/**
* @brief set external file.
*/
void setExternalFile(std::string fileName);
/**
* @brief update cache file
* When should use : Together with setCache API. calling for first inference and when input shape is changed.
* Calling Position : calling after inference done.
*/
void updateCache();
std::vector<bool> isBackendSupport(const std::vector<MNNForwardType> type);
friend class Executor;
void setMode(Interpreter::SessionMode mode);
void setHint(Interpreter::HintMode mode, int value);
void setHintPtr(Interpreter::HintMode mode, void* value);
bool getInfo(Interpreter::SessionInfoCode code, void* ptr);
BackendConfig* getBnConfig();
const RuntimeAttr* getInside() const {
return mInside;
}
private:
std::mutex mLock;
RuntimeAttr* mInside;
friend class StaticModule;
RuntimeManager();
};
static bool getComputeInfo(EXPRP expr, Interpreter::SessionInfoCode code, void* ptr);
private:
std::shared_ptr<Runtime> _getOrCreateRuntime(MNNForwardType type, const BackendConfig* config, int numberThread, bool reset = true);
Executor(std::shared_ptr<Runtime> backend, MNNForwardType type, int numberThread);
void _makeCache(const std::vector<EXPRP>& outputs, bool forceCPU);
RuntimeInfo mRuntimeInfo;
std::shared_ptr<DebugTools> mDebug;
std::map<std::string, std::shared_ptr<SubGraph>> mSubGraph;
uint32_t mLazyMode = 0;
std::shared_ptr<ExecutorAttr> mAttr;
std::mutex mMutex;
};
} // namespace Express
} // namespace MNN
#endif

View File

@ -1,33 +0,0 @@
//
// ExecutorScope.hpp
// MNN
//
// Created by MNN on 2020/10/26.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_EXPR_EXECUTOR_SCOPE_HPP_
#define MNN_EXPR_EXECUTOR_SCOPE_HPP_
#include <MNN/expr/Executor.hpp>
namespace MNN {
namespace Express {
struct MNN_PUBLIC ExecutorScope final {
public:
ExecutorScope() = delete;
explicit ExecutorScope(const ExecutorScope&) = delete;
explicit ExecutorScope(const std::shared_ptr<Executor>& current);
explicit ExecutorScope(const std::string& scope_name,
const std::shared_ptr<Executor>& current);
virtual ~ExecutorScope();
static const std::shared_ptr<Executor> Current();
};
} // namespace MNN
} // namespace Express
#endif // MNN_EXPR_EXECUTOR_SCOPE_HPP_

View File

@ -1,279 +0,0 @@
//
// Expr.hpp
// MNN
//
// Created by MNN on 2019/06/10.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_Expr_hpp
#define MNN_Expr_hpp
#include <functional>
#include <string>
#include <vector>
#include <map>
#include <memory>
#include <MNN/HalideRuntime.h>
#include <MNN/MNNDefine.h>
namespace MNN {
struct BufferStorage;
struct OpT;
struct Op;
struct NetT;
class Tensor;
namespace Express {
class Variable;
class Expr;
class Executor;
typedef std::shared_ptr<Expr> EXPRP;
typedef std::weak_ptr<Expr> WeakEXPRP;
typedef std::vector<int> INTS;
enum Dimensionformat { NHWC, NC4HW4, NCHW };
class MNN_PUBLIC VARP {
public:
VARP() {
// Do nothing
}
VARP(std::shared_ptr<Variable> c) {
mContent = std::move(c);
}
VARP(Variable* c) {
mContent.reset(c);
}
Variable* get() const {
return mContent.get();
}
~ VARP() {
// Do nothing
}
VARP(const VARP& var) {
mContent = var.mContent;
}
VARP(VARP&& var) {
mContent = std::move(var.mContent);
}
VARP operator+(VARP var) const;
VARP operator-(VARP var) const;
VARP operator*(VARP var) const;
VARP operator/(VARP var) const;
VARP mean(INTS dims) const;
VARP sum(INTS dims) const;
bool operator==(const VARP& var) const {
return var.mContent == mContent;
}
bool operator<(const VARP& var) const {
return mContent < var.mContent;
}
bool operator<=(const VARP& var) const {
return mContent <= var.mContent;
}
VARP& operator=(const VARP& var) {
mContent = var.mContent;
return *this;
}
VARP& operator=(Variable* var) {
mContent.reset(var);
return *this;
}
Variable* operator->() const {
return mContent.get();
}
enum InputType {
INPUT = 0,
CONSTANT = 1,
TRAINABLE = 2,
};
bool fix(InputType type) const;
private:
friend class Variable;
std::shared_ptr<Variable> mContent;
};
inline bool operator==(Variable* src, VARP dst) {
return src == dst.get();
}
inline bool operator!=(Variable* src, VARP dst) {
return src != dst.get();
}
// inline bool operator<(VARP src, VARP dst) {
// return src.get() < dst.get();
// }
typedef std::vector<VARP> VARPS;
class MNN_PUBLIC Variable {
public:
struct Info {
Dimensionformat order = NHWC;
INTS dim;
halide_type_t type;
size_t size;
void syncSize();
};
const std::string& name() const;
void setName(const std::string& name);
bool setDevicePtr(const void* devicePtr, int memoryType);
bool copyToDevicePtr(void* devicePtr, int memoryType);
std::pair<EXPRP, int> expr() const {
return std::make_pair(mFrom, mFromIndex);
}
// If compute info error, return nullptr
const Info* getInfo();
bool resize(INTS dims);
template <typename T>
const T* readMap() {
return (const T*)readInternal();
}
template <typename T>
T* writeMap() {
return (T*)writeInternal();
}
void writeScaleMap(float scaleValue, float zeroPoint) {
writeScaleInternal(scaleValue, zeroPoint);
}
//Depecerate
void unMap();
bool input(VARP src);
static void replace(VARP dst, VARP src);
static VARP create(EXPRP expr, int index = 0);
static std::vector<VARP> load(const char* fileName);
static std::map<std::string, VARP> loadMap(const char* fileName);
static std::vector<VARP> load(const uint8_t* buffer, size_t length);
static std::map<std::string, VARP> loadMap(const uint8_t* buffer, size_t length);
static std::pair<std::map<std::string, VARP>, std::map<std::string, VARP>> getInputAndOutput(const std::map<std::string, VARP>& allVariable);
static std::vector<VARP> mapToSequence(const std::map<std::string, VARP>& source);
static std::vector<EXPRP> getExecuteOrder(const std::vector<VARP>& output);
static void save(const std::vector<VARP>& vars, const char* fileName);
static std::vector<int8_t> save(const std::vector<VARP>& vars);
static void save(const std::vector<VARP>& vars, NetT* dest);
// Pack a few Variable to compute in one pipeline
static void prepareCompute(const std::vector<VARP>& vars, bool forceCPU = false);
static void compute(const std::vector<VARP>& vars, bool forceCPU = false);
size_t linkNumber() const;
const std::vector<WeakEXPRP>& toExprs() const;
void setExpr(EXPRP expr, int index) {
mFrom = expr;
mFromIndex = index;
}
// Can't modify the tensor from this interface
const Tensor* getTensor() const;
private:
Variable(EXPRP expr, int index) {
mFrom = expr;
mFromIndex = index;
}
void* readInternal(bool forShape = false);
void* writeInternal(bool inform=true);
void informDirty();
void writeScaleInternal(float scaleValue, float zeroPoint, bool inform = true);
friend class Expr;
EXPRP mFrom;
int mFromIndex;
};
class MNN_PUBLIC Expr {
public:
struct Inside;
enum MemoryType {
COPY,
MOVE,
REF
};
static EXPRP create(Tensor* tensor, bool own = false);
static EXPRP create(Variable::Info&& info, const void* ptr, VARP::InputType type, MemoryType copy = COPY);
static EXPRP create(const OpT* op, std::vector<VARP> inputs, int outputSize = 1);
static EXPRP create(std::shared_ptr<BufferStorage> extra, std::vector<VARP>&& inputs, int outputSize = 1);
static EXPRP create(std::unique_ptr<OpT>&& op, std::vector<VARP> inputs, int outputSize = 1) {
return create(op.get(), inputs, outputSize);
}
void setName(const std::string& name);
const Op* get() const {
return mOp;
}
const std::vector<VARP>& inputs() const {
return mInputs;
}
int outputSize() const {
return (int)mOutputNames.size();
}
static void replace(EXPRP oldExpr, EXPRP newExpr);
bool requireInfo();
void visitOutputs(const std::function<bool(EXPRP, int)>& visit);
static void visit(EXPRP expr, const std::function<bool(EXPRP)>& before, const std::function<bool(EXPRP)>& after);
const std::vector<WeakEXPRP>& outputs() const {
return mTo;
}
~Expr();
const std::string& name() const {
return mName;
}
const std::string& outputName(int index) {
return mOutputNames[index];
}
VARP::InputType inputType() const {return mType;}
/** Internal Usage Begin */
Variable::Info* outputInfo(int index) const;
std::shared_ptr<BufferStorage> extra() const {
return mStorage;
}
bool setInfoDirty();
std::shared_ptr<Inside> inside() const {
return mInside;
}
bool valid() const {
return mValid;
}
bool visited() const {
return mVisited;
}
void setVisited(bool visited) {
mVisited = visited;
}
/** Internal Usage End */
private:
static void _addLinkForInputs(EXPRP expr);
Expr(int outputSize);
Expr(Tensor* tensor, bool own = false);
friend class Variable;
friend class VARP;
VARP::InputType mType;
const Op* mOp;
std::vector<VARP> mInputs;
std::vector<std::string> mOutputNames;
bool mValid = true;
std::shared_ptr<BufferStorage> mStorage;
std::string mName;
std::shared_ptr<Inside> mInside = nullptr;
bool mVisited = false;
std::vector<WeakEXPRP> mTo;
bool mCanDecompose = true;
friend class ExprModule;
};
} // namespace Express
} // namespace MNN
#endif /* Expr_hpp */

View File

@ -1,16 +0,0 @@
//
// ExprCreator.hpp
// MNN
//
// Created by MNN on 2019/06/27.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_ExprCreator_hpp
#define MNN_ExprCreator_hpp
#include <MNN/expr/Expr.hpp>
#include <MNN/expr/MathOp.hpp>
#include <MNN/expr/NeuralNetWorkOp.hpp>
#endif

View File

@ -1,147 +0,0 @@
//
// MathOp.hpp
// MNN
//
// Created by MNN on 2019/06/27.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_MathOp_HPP
#define MNN_MathOp_HPP
namespace MNN {
namespace Express {
//BinaryOPs
MNN_PUBLIC VARP _Add(VARP x, VARP y);
MNN_PUBLIC VARP _Subtract(VARP x, VARP y);
MNN_PUBLIC VARP _Multiply(VARP x, VARP y);
MNN_PUBLIC VARP _Divide(VARP x, VARP y);
MNN_PUBLIC VARP _Pow(VARP x, VARP y);
MNN_PUBLIC VARP _Minimum(VARP x, VARP y);
MNN_PUBLIC VARP _Maximum(VARP x, VARP y);
MNN_PUBLIC VARP _BiasAdd(VARP value, VARP bias);
MNN_PUBLIC VARP _Greater(VARP x, VARP y);
MNN_PUBLIC VARP _GreaterEqual(VARP x, VARP y);
MNN_PUBLIC VARP _Less(VARP x, VARP y);
MNN_PUBLIC VARP _FloorDiv(VARP x, VARP y);
MNN_PUBLIC VARP _SquaredDifference(VARP x, VARP y);
MNN_PUBLIC VARP _Equal(VARP x, VARP y);
MNN_PUBLIC VARP _LessEqual(VARP x, VARP y);
MNN_PUBLIC VARP _FloorMod(VARP x, VARP y);
MNN_PUBLIC VARP _Atan2(VARP x, VARP y);
MNN_PUBLIC VARP _LogicalOr(VARP x, VARP y);
MNN_PUBLIC VARP _NotEqual(VARP x, VARP y);
MNN_PUBLIC VARP _BitwiseAnd(VARP x, VARP y);
MNN_PUBLIC VARP _BitwiseOr(VARP x, VARP y);
MNN_PUBLIC VARP _BitwiseXor(VARP x, VARP y);
//UnaryOPs
MNN_PUBLIC VARP _Sign(VARP a);
MNN_PUBLIC VARP _Abs(VARP x);
MNN_PUBLIC VARP _Negative(VARP x);
MNN_PUBLIC VARP _Floor(VARP x);
MNN_PUBLIC VARP _Round(VARP x);
MNN_PUBLIC VARP _Ceil(VARP x);
MNN_PUBLIC VARP _Square(VARP x);
MNN_PUBLIC VARP _Sqrt(VARP x);
MNN_PUBLIC VARP _Rsqrt(VARP x);
MNN_PUBLIC VARP _Exp(VARP x);
MNN_PUBLIC VARP _Log(VARP x);
MNN_PUBLIC VARP _Sin(VARP x);
MNN_PUBLIC VARP _Sinh(VARP x);
MNN_PUBLIC VARP _Cos(VARP x);
MNN_PUBLIC VARP _Cosh(VARP x);
MNN_PUBLIC VARP _Tan(VARP x);
MNN_PUBLIC VARP _Asin(VARP x);
MNN_PUBLIC VARP _Asinh(VARP x);
MNN_PUBLIC VARP _Acos(VARP x);
MNN_PUBLIC VARP _Acosh(VARP x);
MNN_PUBLIC VARP _Atan(VARP x);
MNN_PUBLIC VARP _Atanh(VARP x);
MNN_PUBLIC VARP _Reciprocal(VARP x);
MNN_PUBLIC VARP _Log1p(VARP x);
MNN_PUBLIC VARP _Gelu(VARP x);
MNN_PUBLIC VARP _Tanh(VARP x);
MNN_PUBLIC VARP _Sigmoid(VARP x);
MNN_PUBLIC VARP _Erf(VARP x);
MNN_PUBLIC VARP _Erfc(VARP x);
MNN_PUBLIC VARP _Erfinv(VARP x);
MNN_PUBLIC VARP _Expm1(VARP x);
MNN_PUBLIC VARP _Hardswish(VARP x);
MNN_PUBLIC VARP _Silu(VARP x);
//ReduceOPs
MNN_PUBLIC VARP _ReduceSum(VARP input_variable, INTS axis = {}, bool keepDims = false);
MNN_PUBLIC VARP _ReduceMean(VARP input_variable, INTS axis = {}, bool keepDims = false);
MNN_PUBLIC VARP _ReduceMax(VARP input_variable, INTS axis = {}, bool keepDims = false);
MNN_PUBLIC VARP _ReduceMin(VARP input_variable, INTS axis = {}, bool keepDims = false);
MNN_PUBLIC VARP _ReduceProd(VARP input_variable, INTS axis = {}, bool keepDims = false);
MNN_PUBLIC VARP _ReduceAny(VARP input_variable, INTS axis = {}, bool keepDims = false);
MNN_PUBLIC VARP _ReduceAll(VARP input_variable, INTS axis = {}, bool keepDims = false);
MNN_PUBLIC VARP _ReduceSumMutable(VARP input_variable, VARP axis, bool keepDims = false);
MNN_PUBLIC VARP _ReduceMeanMutable(VARP input_variable, VARP axis, bool keepDims = false);
MNN_PUBLIC VARP _ReduceMaxMutable(VARP input_variable, VARP axis, bool keepDims = false);
MNN_PUBLIC VARP _ReduceMinMutable(VARP input_variable, VARP axis, bool keepDims = false);
MNN_PUBLIC VARP _ReduceProdMutable(VARP input_variable, VARP axis, bool keepDims = false);
MNN_PUBLIC VARP _ReduceAnyMutable(VARP input_variable, VARP axis, bool keepDims = false);
MNN_PUBLIC VARP _ReduceAllMutable(VARP input_variable, VARP axis, bool keepDims = false);
//EltwiseOPs
MNN_PUBLIC VARP _Prod(VARP a, VARP b, std::vector<float> coeff);
MNN_PUBLIC VARP _Sum(VARP a, VARP b, std::vector<float> coeff);
MNN_PUBLIC VARP _Max(VARP a, VARP b, std::vector<float> coeff);
MNN_PUBLIC VARP _Sub(VARP a, VARP b, std::vector<float> coeff);
MNN_PUBLIC VARP _EltwiseProdInt8(VARP x, VARP y,
std::vector<int8_t> x_weight, std::vector<int32_t> x_bias, std::vector<float> x_scale, std::vector<float> x_tensorScale,
std::vector<int8_t> y_weight, std::vector<int32_t> y_bias, std::vector<float> y_scale, std::vector<float> y_tensorScale,
std::vector<int8_t> output_weight, std::vector<int32_t> output_bias, std::vector<float> output_scale, std::vector<float> output_tensorScale);
MNN_PUBLIC VARP _EltwiseSumInt8(VARP x, VARP y,
std::vector<int8_t> x_weight, std::vector<int32_t> x_bias, std::vector<float> x_scale, std::vector<float> x_tensorScale,
std::vector<int8_t> y_weight, std::vector<int32_t> y_bias, std::vector<float> y_scale, std::vector<float> y_tensorScale,
std::vector<int8_t> output_weight, std::vector<int32_t> output_bias, std::vector<float> output_scale, std::vector<float> output_tensorScale);
MNN_PUBLIC VARP _EltwiseSubInt8(VARP x, VARP y,
std::vector<int8_t> x_weight, std::vector<int32_t> x_bias, std::vector<float> x_scale, std::vector<float> x_tensorScale,
std::vector<int8_t> y_weight, std::vector<int32_t> y_bias, std::vector<float> y_scale, std::vector<float> y_tensorScale,
std::vector<int8_t> output_weight, std::vector<int32_t> output_bias, std::vector<float> output_scale, std::vector<float> output_tensorScale);
MNN_PUBLIC VARP _EltwiseMaxInt8(VARP x, VARP y,
std::vector<int8_t> x_weight, std::vector<int32_t> x_bias, std::vector<float> x_scale, std::vector<float> x_tensorScale,
std::vector<int8_t> y_weight, std::vector<int32_t> y_bias, std::vector<float> y_scale, std::vector<float> y_tensorScale,
std::vector<int8_t> output_weight, std::vector<int32_t> output_bias, std::vector<float> output_scale, std::vector<float> output_tensorScale);
MNN_PUBLIC VARP _Mod(VARP x, VARP y);
//OtherOPs
template<typename T>
VARP _Cast(VARP x) {
return _Cast(x, halide_type_of<T>());
}
MNN_PUBLIC VARP _Cast(VARP x, halide_type_t dtype);
MNN_PUBLIC VARP _MatMul(VARP a, VARP b, bool tranposeA = false, bool tranposeB = false);
MNN_PUBLIC VARP _Normalize(VARP x, int32_t acrossSpatial, int32_t channelShared, float eps, std::vector<float> scale);
MNN_PUBLIC VARP _ArgMax(VARP input, int axis = 0);
MNN_PUBLIC VARP _ArgMin(VARP input, int axis = 0);
MNN_PUBLIC VARP _BatchMatMul(VARP x, VARP y, bool adj_x = false, bool adj_y = false);
MNN_PUBLIC VARP _UnravelIndex(VARP indices, VARP dims);
MNN_PUBLIC VARP _ScatterNd(VARP indices, VARP updates, VARP shape);
MNN_PUBLIC VARP _ScatterNd(VARP indices, VARP updates, VARP shape, VARP input);
MNN_PUBLIC VARP _ScatterNd(VARP indices, VARP updates, VARP shape, int reduction);
MNN_PUBLIC VARP _ScatterNd(VARP indices, VARP updates, VARP shape, VARP input, int reduction);
MNN_PUBLIC VARP _ScatterElements(VARP data, VARP indices, VARP updates, int reduction = -1);
MNN_PUBLIC VARP _ScatterElements(VARP data, VARP indices, VARP updates, VARP axis, int reduction = -1);
MNN_PUBLIC VARP _OneHot(VARP indices, VARP depth, VARP onValue, VARP offValue, int axis = -1);
MNN_PUBLIC VARP _BroadcastTo(VARP a, VARP shape);
MNN_PUBLIC VARP _LinSpace(VARP start, VARP stop, VARP num);
MNN_PUBLIC VARP _RandomUnifom(VARP shape, halide_type_t dtype, float low = 0.0f, float high = 1.0f, int seed0 = 0, int seed1 = 0);
MNN_PUBLIC VARP _CumSum(VARP x, int axis, bool exclusive = false, bool reverse = false);
MNN_PUBLIC VARP _CumProd(VARP x, int axis);
MNN_PUBLIC VARPS _Svd(VARP x);
MNN_PUBLIC VARP _Histogram(VARP x, int bin, int min, int max, int channel = -1);
#ifdef MNN_BUILD_AUDIO
MNN_PUBLIC VARP _Stft(VARP sample, VARP window, int n_fft, int hop_length, bool abse = true);
#endif
}; // namespace Express
}; // namespace MNN
#endif /* MathOp_HPP */

View File

@ -1,136 +0,0 @@
//
// Module.hpp
// MNN
//
// Created by MNN on 2019/11/25.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_Train_Module_hpp
#define MNN_Train_Module_hpp
#include <vector>
#include <MNN/expr/Expr.hpp>
#include <MNN/expr/Executor.hpp>
#include <MNN/MNNForwardType.h>
namespace MNN {
class Session;
namespace Express {
struct SubGraph;
class MNN_PUBLIC Module {
public:
Module() = default;
virtual ~Module() = default;
virtual std::vector<Express::VARP> onForward(const std::vector<Express::VARP>& inputs) = 0;
Express::VARP forward(Express::VARP input);
std::vector<Express::VARP> parameters() const;
bool loadParameters(const std::vector<Express::VARP>& parameters);
void setIsTraining(const bool isTraining);
bool getIsTraining();
void clearCache();
const std::string& name() const {
return mName;
};
void setName(std::string name) {
mName = std::move(name);
}
const std::string type() const {
return mType;
}
void setType(std::string type) {
mType = std::move(type);
}
// Return the parameter index
int addParameter(Express::VARP parameter);
void setParameter(Express::VARP parameter, int index);
static Module* createEmpty(const std::vector<Express::VARP>& parameters);
struct BackendInfo {
MNNForwardType type = MNN_FORWARD_CPU;
BackendConfig* config = nullptr;
};
struct Config {
// Load module as dynamic, default static
bool dynamic = false;
// for static mode, if the shape is mutable, set true, otherwise set false to avoid resizeSession freqencily
bool shapeMutable = true;
// Pre-rearrange weights or not. Disabled by default.
// The weights will be rearranged in a general way, so the best implementation
// may not be adopted if `rearrange` is enabled.
bool rearrange = false;
BackendInfo* backend = nullptr;
// base module
const Module* base = nullptr;
};
static Module* load(const std::vector<std::string>& inputs, const std::vector<std::string>& outputs, const uint8_t* buffer, size_t length, const Config* config = nullptr);
static Module* load(const std::vector<std::string>& inputs, const std::vector<std::string>& outputs, const char* fileName, const Config* config = nullptr);
// Shared RuntimeManager
static Module* load(const std::vector<std::string>& inputs, const std::vector<std::string>& outputs, const char* fileName, const std::shared_ptr<MNN::Express::Executor::RuntimeManager> rtMgr, const Config* config = nullptr);
static Module* load(const std::vector<std::string>& inputs, const std::vector<std::string>& outputs, const uint8_t* buffer, size_t length, const std::shared_ptr<MNN::Express::Executor::RuntimeManager> rtMgr, const Config* config = nullptr);
static Module* extract(std::vector<Express::VARP> inputs, std::vector<Express::VARP> outputs, bool fortrain, const std::map<std::string, SubGraph>& subGraph = {});
static Module* clone(const Module* module, const bool shareParams = false);
struct Info {
// Input info load from model
std::vector<Variable::Info> inputs;
// The Module's defaultFormat, NCHW or NHWC
Dimensionformat defaultFormat;
// Runtime Info
std::shared_ptr<MNN::Express::Executor::RuntimeManager> runTimeManager;
// Input Names By Order
std::vector<std::string> inputNames;
// Output Names By Order
std::vector<std::string> outputNames;
// The MNNConvert's Version build the module
std::string version;
// The bizCode of MNN model
std::string bizCode;
};
const Info* getInfo() const;
class CloneContext;
virtual Module* clone(CloneContext* ctx) const {
return nullptr;
}
void registerModel(const std::vector<std::shared_ptr<Module>>& children);
static void destroy(Module* m);
int traceOrOptimize(Interpreter::SessionMode stage);
std::vector<std::shared_ptr<Module>> getChildren() const { return mChildren; }
protected:
virtual int onOptimize(Interpreter::SessionMode stage) {
return 0;
}
virtual void onClearCache() {
}
Module* cloneBaseTo(CloneContext* ctx, Module* module) const;
std::vector<std::shared_ptr<Module>> mChildren;
std::vector<Express::VARP> mParameters;
private:
void _collectParameters(std::vector<Express::VARP>& result) const;
bool mIsTraining = true;
std::string mName;
std::string mType;
};
struct SubGraph {
std::vector<std::string> inputs;
std::vector<std::string> outputs;
std::shared_ptr<Module> m;
};
} // namespace Train
} // namespace MNN
#endif

View File

@ -1,182 +0,0 @@
//
// NeuralNetWorkOp.hpp
// MNN
//
// Created by MNN on 2019/06/27.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_NeuralNetWorkOp_HPP
#define MNN_NeuralNetWorkOp_HPP
#include <MNN/ImageProcess.hpp>
namespace MNN {
namespace Express {
enum PaddingMode {CAFFE, VALID, SAME};
enum PoolingMode {MAXPOOL, AVEPOOL};
enum PadValueMode {CONSTANT, REFLECT, SYMMETRIC, EDGE};
MNN_PUBLIC VARP _Input(INTS shape = {}, Dimensionformat data_format = NC4HW4, halide_type_t dtype = halide_type_of<float>()) ;
MNN_PUBLIC VARP _Clone(VARP source, bool deepCopy = false);
MNN_PUBLIC VARP _Scalar(const void* ptr, halide_type_t type);
template <typename T>
VARP _Scalar(T value) {
return _Scalar(&value, halide_type_of<T>());
}
MNN_PUBLIC VARP _Const(float value, INTS shape = {}, Dimensionformat format = NHWC);
MNN_PUBLIC VARP _Const(const void* ptr, INTS shape = {}, Dimensionformat format = NHWC,
halide_type_t type = halide_type_of<float>());
MNN_PUBLIC VARP _TrainableParam(float value, INTS dims, Dimensionformat format);
MNN_PUBLIC VARP _TrainableParam(const void* ptr, INTS dims, Dimensionformat format,
halide_type_t type = halide_type_of<float>());
MNN_PUBLIC VARP _InnerProduct(std::vector<float>&& weight, std::vector<float>&& bias, VARP x, INTS outputShape);
MNN_PUBLIC VARP _Conv(VARP weight, VARP bias, VARP x, PaddingMode pad = VALID, INTS stride = {1, 1},
INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0});
MNN_PUBLIC VARP _Conv(float weight, float bias, VARP x, INTS channel, INTS kernelSize, PaddingMode pad = VALID,
INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1);
MNN_PUBLIC VARP _Conv(std::vector<int8_t>&& weight, std::vector<float>&& bias, VARP x, INTS channel, INTS kernelSize,
PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}, bool relu = false, bool relu6 = false, int nbits = 8);
MNN_PUBLIC VARP _Conv(std::vector<float>&& weight, std::vector<float>&& bias, VARP x, INTS channel, INTS kernelSize,
PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}, bool relu = false, bool relu6 = false);
MNN_PUBLIC VARP _Deconv(VARP weight, VARP bias, VARP x, PaddingMode pad = VALID, INTS stride = {1, 1},
INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0});
MNN_PUBLIC VARP _Deconv(std::vector<float>&& weight, std::vector<float>&& bias, VARP x, INTS channel, INTS kernelSize,
PaddingMode pad, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0}, bool relu = false, bool relu6 = false);
MNN_PUBLIC VARP _MaxPool(VARP x, INTS kernel, INTS stride = {1, 1}, PaddingMode pad = VALID, INTS pads= {0, 0});
MNN_PUBLIC VARP _AvePool(VARP x, INTS kernel, INTS stride = {1, 1}, PaddingMode pad = VALID, INTS pads= {0, 0});
MNN_PUBLIC VARP _Reshape(VARP x, INTS shape, Dimensionformat original_format = NCHW);
MNN_PUBLIC VARP _Reshape(VARP x, VARP shape);
MNN_PUBLIC VARP _Scale(VARP x, int channels, std::vector<float>&& scales, std::vector<float>&& bias);
MNN_PUBLIC VARP _Relu(VARP x, float slope = 0.0f);
MNN_PUBLIC VARP _Relu6(VARP x, float minValue = 0.0f, float maxValue = 6.0f);
MNN_PUBLIC VARP _PRelu(VARP x, std::vector<float> &&slopes);
MNN_PUBLIC VARP _Softmax(VARP logits, int axis = -1);
MNN_PUBLIC VARP _Softplus(VARP features);
MNN_PUBLIC VARP _Softsign(VARP features);
MNN_PUBLIC std::vector<VARP> _Split(VARP value, INTS size_splits, int axis = 0);
MNN_PUBLIC VARP _Slice(VARP x, VARP starts, VARP sizes);
MNN_PUBLIC VARP _StridedSlice(VARP input, VARP begin, VARP end, VARP strided,
int32_t beginMask, int32_t endMask, int32_t ellipsisMask,
int32_t newAxisMask, int32_t shrinkAxisMask);
MNN_PUBLIC VARP _StridedSliceWrite(VARP input, VARP begin, VARP end, VARP strided, VARP write,
int32_t beginMask, int32_t endMask, int32_t ellipsisMask,
int32_t newAxisMask, int32_t shrinkAxisMask);
MNN_PUBLIC VARP _Concat(VARPS values, int axis);
MNN_PUBLIC VARP _Convert(VARP input, Dimensionformat format);
MNN_PUBLIC VARP _Transpose(VARP x, INTS perm);
MNN_PUBLIC VARP _Transpose(VARP x, VARP perm);
MNN_PUBLIC VARP _ChannelShuffle(VARP x, int group);
MNN_PUBLIC VARP _ChangeInputFormat(VARP input, Dimensionformat format);
MNN_PUBLIC VARP _Conv2DBackPropFilter(VARP input, VARP inputGrad, INTS kernelSize, PaddingMode pad = VALID, INTS stride = {1, 1}, INTS dilate = {1, 1}, int group = 1, INTS pads = {0, 0});
MNN_PUBLIC VARP _PoolGrad(VARP originInput, VARP originOutput, VARP inputGrad, INTS kernel, INTS stride, PoolingMode type, PaddingMode pad = VALID, INTS pads= {0, 0});
// FIXME: move the api to Array Ops
MNN_PUBLIC VARP _Reverse(VARP x, VARP axis);
MNN_PUBLIC VARP _ReverseSequence(VARP x, VARP y, int batchDim, int seqDim);
// FIXME: move the api to Image Ops
MNN_PUBLIC VARP _Crop(VARP images, VARP size, int axis, INTS offset);
MNN_PUBLIC VARP _Resize(VARP images, float xScale, float yScale);
MNN_PUBLIC VARP _Pad(VARP x, VARP paddings, PadValueMode mode = CONSTANT);
MNN_PUBLIC VARP _ExpandDims(VARP input, int axis);
MNN_PUBLIC VARP _ExpandDims(VARP input, VARP axis);
MNN_PUBLIC VARP _Shape(VARP input, bool nchw = false);
MNN_PUBLIC VARP _Stack(VARPS values, int axis=0);
enum InterpolationMethod {BILINEAR, NEAREST};
MNN_PUBLIC VARP _CropAndResize(VARP image, VARP boxes, VARP box_ind, VARP crop_size,
InterpolationMethod method, float extrapolation_value = 0.0);
MNN_PUBLIC VARP _Fill(VARP dims, VARP value);
MNN_PUBLIC VARP _Tile(VARP input, VARP multiples);
MNN_PUBLIC VARP _Gather(VARP params, VARP indices);
MNN_PUBLIC VARP _GatherV2(VARP params, VARP indices, VARP axis = nullptr);
MNN_PUBLIC VARP _Squeeze(VARP input, INTS axis = {});
MNN_PUBLIC VARP _Unsqueeze(VARP input, INTS axis = {});
MNN_PUBLIC VARP _BatchToSpaceND(VARP input, VARP block_shape, VARP crops);
MNN_PUBLIC VARP _GatherND(VARP params, VARP indices);
MNN_PUBLIC VARP _GatherElements(VARP params, VARP indices);
MNN_PUBLIC VARP _GatherElements(VARP params, VARP indices, VARP axis);
MNN_PUBLIC VARP _Selu(VARP features, float scale, float alpha);
MNN_PUBLIC VARP _Size(VARP input);
MNN_PUBLIC VARP _Elu(VARP features, float alpha=1.0);
MNN_PUBLIC VARP _Threshold(VARP features, float alpha=1.0);
MNN_PUBLIC VARP _MatrixBandPart(VARP input, VARP num_lower, VARP num_upper);
MNN_PUBLIC std::vector<VARP> _Moments(VARP x, INTS axis, VARP shift, bool keepDims);
MNN_PUBLIC VARP _SetDiff1D(VARP x, VARP y);
MNN_PUBLIC VARP _SpaceToDepth(VARP input, int block_size);
MNN_PUBLIC VARP _SpaceToBatchND(VARP input, VARP block_shape, VARP paddings);
MNN_PUBLIC VARP _ZerosLike(VARP input);
MNN_PUBLIC std::vector<VARP> _Unstack(VARP value, int axis=0);
MNN_PUBLIC VARP _Rank(VARP input);
MNN_PUBLIC VARP _Range(VARP start, VARP limit, VARP delta);
MNN_PUBLIC VARP _DepthToSpace(VARP input, int block_size);
MNN_PUBLIC VARP _PriorBox(VARP feature, VARP image,
std::vector<float> min_size, std::vector<float> max_size, std::vector<float>aspect_ratio,
bool flip, bool clip, std::vector<float>variance,
unsigned int img_h, unsigned int img_w, float step_h, float step_w, float offset = 0.5);
MNN_PUBLIC VARP _Permute(VARP input, INTS dims);
MNN_PUBLIC VARP _DetectionOutput(VARP location, VARP confidence, VARP priorbox,
unsigned int num_classes, bool share_location, int background_label_id,
float nms_threshhold, int nms_topk, int code_type,
bool variance_encoded_in_target,
int keep_top_k, float confidence_threshold, float visualize_threshold);
MNN_PUBLIC std::vector<VARP> _DetectionPostProcess(VARP encode_boxes, VARP class_predictions, VARP anchors,
int num_classes, int max_detections,
int max_class_per_detection, int detections_per_class,
float nms_threshold, float iou_threshold,
bool use_regular_nms, std::vector<float> centersize_encoding);
MNN_PUBLIC VARP _Interp(VARPS xs, float widthScale, float heightScale, int outputWidth, int outputHeight, int resizeType, bool alignCorners);
MNN_PUBLIC VARP _ZeroGrad(VARP x);
// Int8 Inference
MNN_PUBLIC VARP _Conv(std::vector<int8_t>&& weight, std::vector<int>&& bias, std::vector<float>&& scale, VARP x, INTS channel, INTS kernelSize,
PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads, bool relu, int nbits = 8);
MNN_PUBLIC VARP _Conv(std::vector<int8_t>&& weight, std::vector<int>&& bias, std::vector<float>&& scale,
VARP x, INTS channel, INTS kernelSize,
PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads, bool relu,
int8_t inputZeroPoint, int8_t outputZeroPoint,
int8_t minValue, int8_t maxValue, bool accumulateToInt16);
MNN_PUBLIC VARP _Conv(std::vector<int8_t>&& weight, std::vector<float>&& bias, std::vector<float>&& weightScale,
VARP x, INTS channel, INTS kernelSize,
PaddingMode pad, INTS stride, INTS dilate, int group, INTS pads, bool relu,
float scaleIn, float scaleOut,
int8_t inputZeroPoint, int8_t outputZeroPoint,
int8_t minValue, int8_t maxValue, float weightClampValue, bool accumulateToInt16);
MNN_PUBLIC VARP _CosineSimilarity(VARP input0, VARP input1, VARP inputDim);
enum GridSamplePaddingMode {GRID_SAMPLE_PADDING_ZEROS, GRID_SAMPLE_PADDING_BORDER, GRID_SAMPLE_PADDING_REFLECTION};
MNN_PUBLIC VARP _GridSample(VARP input, VARP grid, InterpolationMethod mode=BILINEAR, GridSamplePaddingMode paddingMode=GRID_SAMPLE_PADDING_ZEROS, bool alignCorners=false);
MNN_PUBLIC VARP _FloatToInt8(VARP x, VARP scale, char minValue, char maxValue);
MNN_PUBLIC VARP _FloatToInt8(VARP x, VARP scale, int8_t minValue, int8_t maxValue, int8_t zeroPoint);
MNN_PUBLIC VARP _Int8ToFloat(VARP x, VARP scale);
MNN_PUBLIC VARP _Int8ToFloat(VARP x, VARP scale, int8_t zeroPoint);
MNN_PUBLIC VARP _Select(VARP select, VARP input0, VARP input1);
MNN_PUBLIC std::vector<VARP> _TopKV2(VARP input0, VARP input1);
MNN_PUBLIC VARP _ImageProcess(VARP input, CV::ImageProcess::Config config, CV::Matrix matrix, int oh, int ow, int oc, int dtype, uint8_t padVal = 0);
MNN_PUBLIC VARP _Where(VARP x);
MNN_PUBLIC VARP _Sort(VARP x, int axis = -1, bool arg = false, bool descend = false);
MNN_PUBLIC VARP _Raster(const std::vector<VARP>& vars, const std::vector<int>& regions, const std::vector<int>& shape);
MNN_PUBLIC VARP _RasterRaw(const std::vector<VARP>& vars, const std::vector<int>& region, const std::vector<int>& shape, halide_type_t dataType, Dimensionformat format);
MNN_PUBLIC VARP _Nms(VARP boxes, VARP scores, int maxDetections, float iouThreshold = -1, float scoreThreshold = -1);
MNN_PUBLIC VARP _Im2Col(VARP x, INTS kernelSize, INTS dilate, INTS pads, INTS stride);
MNN_PUBLIC VARP _Col2Im(VARP x, VARP outputShape, INTS kernelSize, INTS dilate, INTS pads, INTS stride);
/**
Onnx's Loop
*/
MNN_PUBLIC VARPS _Loop(VARPS x, const std::string& submoduleName);
MNN_PUBLIC VARP _ROIPooling(VARP input, VARP roi, int pooledHeight, int pooledWidth, float spatialScale, bool outputGrad = false, VARP backwardDiff = nullptr);
MNN_PUBLIC VARP _ROIAlign(VARP input, VARP roi, int pooledHeight, int pooledWidth, float spatialScale, int samplingRatio, bool aligned, PoolingMode poolType, bool outputGrad = false, VARP backwardDiff = nullptr);
} // namespace Express
} // namespace MNN
#endif /* NeuralNetWorkOp_HPP */

View File

@ -1,64 +0,0 @@
//
// Optimizer.hpp
// MNN
//
// Created by MNN on 2019/08/20.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef Optimizer_hpp
#define Optimizer_hpp
#include <MNN/expr/Expr.hpp>
#include <MNN/MNNForwardType.h>
namespace MNN {
namespace Express {
class MNN_PUBLIC Optimizer {
public:
enum Device {
CPU = 0,
GPU = 1,
OTHER = 2,
AUTO = 3
};
struct Config {
Device device = CPU;
MNNForwardType forwardType = MNN_FORWARD_ALL;
int numThread = 4;
};
static std::shared_ptr<Optimizer> create(Config config);
struct Cost {
float compute; // MFlops
float memory; // MB
};
class Parameters {
public:
Parameters(int n);
virtual ~Parameters();
float* get() const {
return mValue;
}
int size() const {
return mSize;
}
private:
float* mValue;
int mSize;
};
virtual std::shared_ptr<Parameters> onGetParameters(const std::vector<VARP>& outputs) {
return nullptr;
}
//Given paramters and measure cost, the parameters must be the same as onGetParameters
virtual Cost onMeasure(const std::vector<VARP>& outputs, std::shared_ptr<Parameters> parameters = nullptr) = 0;
//Modify the output directly, the parameters must be the same as onGetParameters
virtual bool onExecute(const std::vector<VARP>& outputs, std::shared_ptr<Parameters> parameters = nullptr) = 0;
Optimizer() = default;
virtual ~Optimizer() = default;
};
} // namespace Express
} // namespace MNN
#endif

View File

@ -1,112 +0,0 @@
//
// RuntimeScope.hpp
// MNN
//
// Created by MNN on 2020/10/26.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_EXPR_SCOPE_HPP_
#define MNN_EXPR_SCOPE_HPP_
#include <cstdio>
#include <vector>
#include <string>
#include <mutex>
#include <MNN/Interpreter.hpp>
namespace MNN {
namespace Express {
template <typename T>
class Scope {
public:
Scope();
virtual ~Scope() = default;
struct ScopedContent {
std::string scope_name;
T content;
};
void EnterScope(const ScopedContent& current);
void EnterScope(const T& current);
void EnterScope(const std::string& scope_name, const T& current);
void ExitScope();
const ScopedContent& Current() const;
const T Content() const;
int ScopedLevel() const { return scoped_level_; }
private:
std::string MakeScopeName(const std::string& prefix, int level) const;
mutable std::mutex mutex_;
int scoped_level_ = 0;
std::vector<ScopedContent> scoped_contents_;
};
template <typename T>
Scope<T>::Scope() : scoped_level_(0) {
}
template <typename T>
void Scope<T>::EnterScope(const ScopedContent& current) {
std::lock_guard<std::mutex> lock(mutex_);
++scoped_level_;
scoped_contents_.push_back(current);
}
template <typename T>
void Scope<T>::EnterScope(const T& current) {
EnterScope("scope", current);
}
template <typename T>
void Scope<T>::EnterScope(const std::string& scope_name,
const T& current) {
std::lock_guard<std::mutex> lock(mutex_);
int scoped_level = ScopedLevel();
std::string name = MakeScopeName(scope_name, scoped_level++);
ScopedContent content{name, current};
++scoped_level_;
scoped_contents_.push_back(content);
}
template <typename T>
void Scope<T>::ExitScope() {
std::lock_guard<std::mutex> lock(mutex_);
--scoped_level_;
scoped_contents_.resize(scoped_level_);
}
template <typename T>
const typename Scope<T>::ScopedContent& Scope<T>::Current() const {
std::lock_guard<std::mutex> lock(mutex_);
MNN_CHECK(scoped_contents_.size() > 0, "Scope level should not be 0.");
return scoped_contents_.back();
}
template <typename T>
const T Scope<T>::Content() const {
std::lock_guard<std::mutex> lock(mutex_);
if (scoped_contents_.empty()) {
return nullptr;
}
return scoped_contents_.back().content;
}
template <typename T>
std::string Scope<T>::MakeScopeName(const std::string& prefix,
int level) const {
char s[16];
snprintf(s, 16, "%d", level);
return prefix + "/" + std::string(s);
}
} // namespace Express
} // namespace MNN
#endif // MNN_EXPR_SCOPE_HPP_

View File

@ -1,139 +0,0 @@
//
// llm.hpp
//
// Created by MNN on 2023/08/25.
// ZhaodeWang
//
#ifndef LLM_hpp
#define LLM_hpp
#include <vector>
#include <memory>
#include <string>
#include <fstream>
#include <sstream>
#include <iostream>
#include <streambuf>
#include <functional>
#include <unordered_map>
#include <MNN/expr/Expr.hpp>
#include <MNN/expr/Module.hpp>
#include <MNN/expr/MathOp.hpp>
#include <MNN/expr/NeuralNetWorkOp.hpp>
namespace MNN {
namespace Transformer {
class Tokenizer;
class Pipeline;
class LlmConfig;
class DiskEmbedding;
enum TuneType {
// op encoder number for commit
OP_ENCODER_NUMBER = 0,
};
struct KVMeta;
class MNN_PUBLIC Llm {
using PromptItem = std::pair<std::string, std::string>; // <role, content>
public:
enum Stage {
Prefill,
Decode
};
struct GenerateState {
// forward info
int prompt_len_ = 0;
int gen_seq_len_ = 0;
int all_seq_len_ = 0;
std::vector<int> history_ids_;
// time
int64_t vision_us_ = 0;
int64_t audio_us_ = 0;
int64_t prefill_us_ = 0;
int64_t decode_us_ = 0;
int current_token_ = 0;
std::vector<int> output_ids_;
std::ostream* os_ = nullptr;
std::string end_with_;
};
Llm(std::shared_ptr<LlmConfig> config);
virtual ~Llm();
static Llm* createLLM(const std::string& config_path);
void chat();
void reset();
void trace(bool start);
void tuning(TuneType type, std::vector<int> candidates);
virtual void load();
void switchMode(Stage stage);
void setKVCacheInfo(size_t add, size_t remove, int* reserve = nullptr, int n_reserve = 0);
MNN::Express::VARP forwardRaw(MNN::Express::VARP hiddenState, MNN::Express::VARP mask, MNN::Express::VARP inputPos);
virtual MNN::Express::VARP gen_attention_mask(int seq_len);
virtual MNN::Express::VARP embedding(const std::vector<int>& input_ids);
MNN::Express::VARP forward(const std::vector<int>& input_ids);
int sample(MNN::Express::VARP logits, const std::vector<int>& pre_ids, int offset = 0, int size = 0);
std::string apply_prompt_template(const std::string& user_content) const;
std::string apply_chat_template(const std::vector<PromptItem>& chat_prompts) const;
size_t getCurrentHistory() const;
void eraseHistory(size_t begin, size_t end);
void response(const std::string& user_content, std::ostream* os = &std::cout, const char* end_with = nullptr, int max_new_tokens = -1);
void response(const std::vector<PromptItem>& chat_prompts, std::ostream* os = &std::cout, const char* end_with = nullptr, int max_new_tokens = -1);
void generate_init(std::ostream* os = nullptr, const char* end_with = nullptr);
void generate(int max_token);
std::vector<int> generate(const std::vector<int>& input_ids, int max_new_tokens = -1);
bool stoped();
void print_speed();
// config function
std::string dump_config();
bool set_config(const std::string& content);
// lora function
size_t apply_lora(const std::string& lora_path);
Llm* create_lora(const std::string& lora_path);
bool release_module(size_t index);
bool select_module(size_t index);
// tokenier function
bool is_stop(int token_id);
std::string tokenizer_decode(int id);
virtual std::vector<int> tokenizer_encode(const std::string& query, bool use_template = true);
friend class Pipeline;
const GenerateState& getState() const {
return mState;
}
protected:
std::shared_ptr<KVMeta> mMeta;
std::shared_ptr<LlmConfig> config_;
std::shared_ptr<Tokenizer> tokenizer_;
std::shared_ptr<DiskEmbedding> disk_embedding_;
MNN::Express::VARP inputs_embeds_, attention_mask_, position_ids_;
std::shared_ptr<MNN::Express::Executor::RuntimeManager> runtime_manager_;
std::shared_ptr<MNN::Express::Executor::RuntimeManager> mllm_runtime_manager_;
std::vector<std::shared_ptr<MNN::Express::Module>> modules_;
std::vector<std::shared_ptr<MNN::Express::Module>> prefill_modules_, decode_modules_, current_modules_;
const MNN::Express::Module* base_module_ = nullptr;
void init_runtime();
virtual MNN::Express::VARP gen_position_ids(int seq_len);
bool mTracing = false;
GenerateState mState;
};
// Embedding start
class MNN_PUBLIC Embedding : public Llm {
public:
Embedding(std::shared_ptr<LlmConfig> config);
static Embedding* createEmbedding(const std::string& config_path, bool load = true);
static float dist(MNN::Express::VARP var0, MNN::Express::VARP var1);
virtual void load() override;
MNN::Express::VARP ids_embedding(const std::vector<int>& ids);
MNN::Express::VARP txt_embedding(const std::string& txt);
int dim() const;
private:
virtual MNN::Express::VARP gen_attention_mask(int seq_len) override;
virtual MNN::Express::VARP gen_position_ids(int seq_len) override;
};
// Embedding end
}
}
#endif // LLM_hpp

View File

@ -1,16 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>$(DEVELOPMENT_LANGUAGE)</string>
<key>CFBundleIdentifier</key>
<string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>$(PRODUCT_NAME)</string>
<key>CFBundlePackageType</key>
<string>FMWK</string>
</dict>
</plist>

View File

@ -15,8 +15,8 @@
"kind" : "remoteSourceControl",
"location" : "https://github.com/Yogayu/Chat.git",
"state" : {
"branch" : "main",
"revision" : "76b84fe18df1c2bc4b5c5f48ad7340feebb92f28"
"revision" : "e015bd9b36a737486c6ddc7c5af585ac5aee8015",
"version" : "1.0.0"
}
},
{
@ -40,10 +40,10 @@
{
"identity" : "mediapicker",
"kind" : "remoteSourceControl",
"location" : "https://github.com/exyte/MediaPicker.git",
"location" : "https://github.com/Yogayu/MediaPicker.git",
"state" : {
"revision" : "88769b1b69c2b5e5fa5b65522c08bc7b667a6cb8",
"version" : "2.2.3"
"revision" : "a09658984bfd1b30541daf02a9d7ffc690e17862",
"version" : "2.0.0"
}
},
{