MNN/source/backend/cpu/CPUBackend.hpp

187 lines
5.6 KiB
C++

//
// CPUBackend.hpp
// MNN
//
// Created by MNN on 2018/07/06.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef CPUBackend_hpp
#define CPUBackend_hpp
#include <map>
#include <memory>
#include "core/Backend.hpp"
#include "core/Execution.hpp"
#include "core/BufferAllocator.hpp"
#include "MNN_generated.h"
namespace MNN {
class CPURuntime : public Runtime {
public:
friend class CPUBackend;
CPURuntime(const Backend::Info& info);
virtual ~ CPURuntime();
int onGetRuntimeStatus(RuntimeStatus statusEnum) const override;
virtual Backend* onCreate(const BackendConfig* config) const override;
virtual void onGabageCollect(int level) override;
virtual float onGetMemoryInMB() override;
virtual CompilerType onGetCompilerType() const override {
return Compiler_Loop;
}
void onConcurrencyBegin() const;
void onConcurrencyEnd() const;
virtual bool onCheckInfo(Backend::Info& info) const override;
private:
std::shared_ptr<EagerBufferAllocator> mStaticAllocator;
int mThreadNumber;
mutable int mTaskIndex;
BackendConfig::MemoryMode mMemory;
BackendConfig::PowerMode mPower;
BackendConfig::PrecisionMode mPrecision;
// Backend features
// CPU features
float mFlops = 0.0f;
static Backend*(*gExtraCreate)(const Runtime* runtime);
size_t mFlags = 0;
int mAllocator = 0;
};
struct CoreFunctions;
struct CoreInt8Functions;
class CPUResizeCache;
class CPUMemObj : public Backend::MemObj {
public:
CPUMemObj(BufferAllocator* allocator, MemChunk chunk, int size) : mAllocator(allocator), mChunk(chunk), mSize(size) {}
virtual ~ CPUMemObj() {
if (mAllocator) {
mAllocator->free(mChunk);
}
}
virtual MemChunk chunk() {
return mChunk;
}
inline int getSize() const {
return mSize;
}
private:
BufferAllocator* mAllocator;
MemChunk mChunk;
int mSize;
};
class CPUBackend : public Backend {
public:
CPUBackend(const CPURuntime* runtime, BackendConfig::PrecisionMode precision, BackendConfig::MemoryMode memory, MNNForwardType type = MNN_FORWARD_CPU, size_t flags = 0);
virtual ~CPUBackend();
// Return sizeDivide, scheduleNumber aligned memory
std::pair<int, int> multiThreadDivide(int size) const;
public:
virtual MemObj* onAcquire(const Tensor* nativeTensor, StorageType storageType) override;
virtual bool onClearBuffer() override;
virtual void onCopyBuffer(const Tensor* srcTensor, const Tensor* dstTensor) const override;
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op) override;
virtual void onExecuteBegin() const override;
virtual void onExecuteEnd() const override;
virtual void onResizeBegin() override;
virtual void onResizeEnd() override;
const CoreFunctions* functions() const {
return mCoreFunctions;
}
// Return element size for Tensor, conside pack
int getTensorSize(const Tensor* tensor, bool multiBytes = false) const;
const CoreInt8Functions* int8Functions() const {
return mInt8CoreFunctions;
}
public:
class Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const = 0;
};
static bool addCreator(OpType t, Creator* c);
int threadNumber() const {
return mRuntime->mThreadNumber;
}
BufferAllocator* getBufferAllocator(bool defer_allocator = true) const {
return mDynamicAllocator.get();
}
BackendConfig::MemoryMode memoryMode() const {
return mMemory;
}
BackendConfig::PrecisionMode precisionMode() const {
return mPrecisionMode;
}
CPUResizeCache* getCache() const {
return mCache;
}
virtual const Runtime* getRuntime() override;
#ifdef MNN_USE_THREAD_POOL
inline int taskIndex() const {return mRuntime->mTaskIndex;}
#endif
static void initCreatorMap();
static int getBytes(const Backend* backend, const Tensor* output);
static DataType getDataType(const Tensor* tensor);
protected:
MemObj* allocBuffer(int size, Tensor* dest, StorageType storageType);
const CoreFunctions* mCoreFunctions;
const CoreInt8Functions* mInt8CoreFunctions;
private:
std::shared_ptr<EagerBufferAllocator> mStaticAllocator;
std::shared_ptr<BufferAllocator> mDynamicAllocator;
CPURuntime* mRuntime;
BackendConfig::PrecisionMode mPrecisionMode;
BackendConfig::MemoryMode mMemory;
static std::map<OpType, CPUBackend::Creator*>* gCreator;
CPUResizeCache* mCache;
};
/** execution cast wrapper. insert tensor cast dynamic. */
class CastWrapExecution : public Execution {
public:
CastWrapExecution(Backend* backend, DataType runT)
: Execution(backend), mRunType(runT) {}
virtual ErrorCode onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) override;
private:
DataType mRunType;
};
#define REGISTER_CPU_OP_CREATOR(name, opType) \
void ___##name##__##opType##__() { \
static name _temp;\
CPUBackend::addCreator(opType, &_temp); \
}
#ifdef MNN_SUPPORT_DEPRECATED_OP
#define REGISTER_CPU_OP_CREATOR_OLD(name, opType) \
void ___##name##__##opType##__() { \
static name _temp;\
CPUBackend::addCreator(opType, &_temp); \
}
#else
#define REGISTER_CPU_OP_CREATOR_OLD(name, opType) \
void ___##name##__##opType##__() { \
}
#endif
} // namespace MNN
#endif /* CPUBackend_hpp */