MNN/source/core/Session.hpp

170 lines
5.1 KiB
C++
Raw Normal View History

2019-04-17 10:49:11 +08:00
//
// Session.hpp
// MNN
//
// Created by MNN on 2018/07/30.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef Session_hpp
#define Session_hpp
2020-11-05 16:41:56 +08:00
#include <MNN/Tensor.hpp>
2019-04-17 10:49:11 +08:00
#include <map>
#include <memory>
#include <vector>
#include "Pipeline.hpp"
#include "Schedule.hpp"
2020-11-05 16:41:56 +08:00
#include "core/Backend.hpp"
#include "core/Macro.h"
#include "shape/SizeComputer.hpp"
2022-01-04 10:50:40 +08:00
#define MNN_DEFAULT_TUNING_NUMBER 5
2019-04-17 10:49:11 +08:00
namespace MNN {
struct Net;
2019-04-17 10:49:11 +08:00
/** infer unit. multiple sessions could share one net. */
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
class MNN_PUBLIC Session {
2019-04-17 10:49:11 +08:00
public:
2022-01-04 10:50:40 +08:00
struct ModeGroup {
Interpreter::SessionMode callBackMode = Interpreter::Session_Debug;
Interpreter::SessionMode inputMode = Interpreter::Session_Input_Inside;
Interpreter::SessionMode outputMode = Interpreter::Session_Output_Inside;
Interpreter::SessionMode backendMode = Interpreter::Session_Backend_Fix;
Interpreter::SessionMode resizeMode = Interpreter::Session_Resize_Direct;
2023-04-27 15:11:05 +08:00
Interpreter::SessionMode memoryUsageMode = Interpreter::Session_Memory_Collect;
2023-07-18 09:36:26 +08:00
Interpreter::SessionMode codegenMode = Interpreter::Session_Codegen_Disable;
2022-01-04 10:50:40 +08:00
int maxTuningNumber = MNN_DEFAULT_TUNING_NUMBER;
int geometryMask = 0xFFFF;
bool checkNetBuffer = true;
2024-07-22 19:51:53 +08:00
RuntimeHint runtimeHint;
void setHint(Interpreter::HintMode hint, int magic);
void setMode(Interpreter::SessionMode mode);
2024-08-24 15:46:21 +08:00
void setExternalPath(std::string path, int type);
2022-01-04 10:50:40 +08:00
};
Session(Schedule::ScheduleInfo&& info, const ModeGroup& mode,
2020-11-05 16:41:56 +08:00
RuntimeInfo&& runtime);
2019-04-17 10:49:11 +08:00
~Session();
2022-12-30 15:18:58 +08:00
Session* clone(RuntimeInfo&& runtime, std::shared_ptr<Schedule::ScheduleInfo> sharedConst);
2024-09-12 12:57:57 +08:00
static void createPipelineBackend(Schedule::PipelineInfo& iter, RuntimeInfo& runtime);
2019-04-17 10:49:11 +08:00
public:
/**
* @brief infer.
* @return result code.
*/
ErrorCode run() const;
/**
* @brief infer with callbacks and sync option.
* @param enterCallback callback before each op.
* @param exitCallback callback after each op.
* @param sync wait until all ops done before return or not.
* @return result code.
*/
ErrorCode runWithCallBack(const TensorCallBackWithInfo& enterCallback, const TensorCallBackWithInfo& exitCallback,
bool sync = false) const;
2020-11-05 16:41:56 +08:00
bool getInfo(Interpreter::SessionInfoCode code, void* ptr) const;
2019-04-17 10:49:11 +08:00
2024-04-19 11:58:21 +08:00
void openResizeCheck();
ErrorCode fixResizeCache();
2019-04-17 10:49:11 +08:00
public:
/**
* @brief resize tensors and buffers responding to input changes.
* @return result code.
*/
2022-12-30 15:18:58 +08:00
ErrorCode resize();
2021-04-08 15:34:23 +08:00
2019-04-17 10:49:11 +08:00
/**
* @brief set if needs resize.
* @param flag needs resize or not.
*/
void setNeedResize(bool flag = true) {
mNeedResize = flag;
}
2021-04-08 15:34:23 +08:00
void setNeedMalloc(bool flag = true) {
mNeedMalloc = flag;
}
2022-09-30 10:02:52 +08:00
Runtime* getCPURuntime() {
return mRuntime.second.get();
}
const RuntimeInfo& getRuntime() const {
return mRuntime;
}
2022-09-30 10:02:52 +08:00
2019-04-17 10:49:11 +08:00
public:
/**
* @brief get backend that create the tensor.
* @param tensor given tensor.
* @return backend that create the tensor, NULL if the tensor is created by default backend (CPU backend).
*/
const Backend* getBackEnd(const Tensor* tensor) const;
/**
* @brief get input tensor for given op name.
* @param name given op name. if NULL, return first input tensor.
* @return input tensor if found, NULL otherwise.
*/
Tensor* getInput(const char* name) const;
/**
* @brief get output tensor for given op name.
* @param name given op name. if NULL, return first output tensor.
* @return output tensor if found, NULL otherwise.
*/
Tensor* getOutput(const char* name) const;
/**
* @brief get output tensors map.
* @return get output tensors map.
*/
const std::map<std::string, Tensor*>& getOutputAll() const;
const std::map<std::string, Tensor*>& getInputAll() const;
/**
* @brief check session is valid or not.
* @return session is valid or not.
*/
inline bool valid() const {
return mValid;
}
/**
* @brief update the session's const value to origin model's const blob.
* @return errorcode
*/
ErrorCode updateToModel(Net* net) const;
2022-01-04 10:50:40 +08:00
void waitAsyncResize();
2023-01-11 15:08:58 +08:00
bool hasAsyncWork();
2020-11-05 16:41:56 +08:00
bool loadCache(const void* buffer, size_t size);
std::pair<const void*, size_t> getCache();
2022-12-30 15:18:58 +08:00
Tensor* getTensor(int index) const;
Schedule::PipelineInfo& getPipelineInfo(int index) const;
2019-04-17 10:49:11 +08:00
protected:
2019-12-27 22:16:57 +08:00
const std::vector<std::shared_ptr<Pipeline>>& getPipelines() const {
2019-04-17 10:49:11 +08:00
return this->mPipelines;
}
private:
void _setUpTensorInfo(const Schedule::ScheduleInfo& info);
private:
2020-11-05 16:41:56 +08:00
RuntimeInfo mRuntime;
2019-12-27 22:16:57 +08:00
std::vector<std::shared_ptr<Pipeline>> mPipelines;
2020-11-05 16:41:56 +08:00
bool mNeedResize = true;
bool mValid = true;
2021-04-08 15:34:23 +08:00
bool mNeedMalloc = true;
2020-11-05 16:41:56 +08:00
Interpreter::SessionMode mCallBackMode;
2023-04-27 15:11:05 +08:00
Interpreter::SessionMode mMemoryUsageMode;
2023-07-18 09:36:26 +08:00
Interpreter::SessionMode mCodegenMode;
2022-12-30 15:18:58 +08:00
Schedule::ScheduleInfo mInfo;
ModeGroup mMode;
2019-04-17 10:49:11 +08:00
};
} // namespace MNN
#endif /* Session_hpp */