MNN/source/backend/opencl/execution/buffer/ConvBufLowMemoryExecution.hpp

54 lines
2.2 KiB
C++
Raw Normal View History

2023-12-27 17:26:44 +08:00
//
// ConvBufLowMemoryExecution.hpp
// MNN
//
// Created by MNN on 2023/10/12.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifdef MNN_LOW_MEMORY
#ifndef MNN_OPENCL_BUFFER_CLOSED
#ifndef ConvBufLowMemoryExecution_hpp
#define ConvBufLowMemoryExecution_hpp
#include "core/ConvolutionCommon.hpp"
#include "ConvBufExecution.hpp"
namespace MNN {
namespace OpenCL {
2024-04-19 11:58:21 +08:00
class ConvBufLowMemoryExecution : public ConvBufCommonExecution, public CommonExecution {
2023-12-27 17:26:44 +08:00
public:
ConvBufLowMemoryExecution(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs, const MNN::Op *op, Backend *backend);
2024-04-19 11:58:21 +08:00
ConvBufLowMemoryExecution(std::shared_ptr<ConvBufResource> resource, const MNN::Op* op, Backend* backend);
2023-12-27 17:26:44 +08:00
virtual ~ConvBufLowMemoryExecution();
2024-09-12 12:57:57 +08:00
virtual ErrorCode onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
virtual ErrorCode onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
2023-12-27 17:26:44 +08:00
virtual bool onClone(Backend* bn, const Op* op, Execution** dst) override;
private:
2025-04-28 11:38:44 +08:00
void getInfoFromOpLowMemory(void *weight_ptr);
void set1x1WeightLowMemory();
void setGeneralWeightLowMemory();
2023-12-27 17:26:44 +08:00
void tuneGeneralCaseLowMemory(Tensor * input, Tensor * output);
2024-09-12 12:57:57 +08:00
void useFPWeightGemmLowMemory(Tensor * input, Tensor * output);
void tuneGemvLowMemory(Tensor * input, Tensor * output);
2024-12-31 15:34:08 +08:00
void tuneGemmLowMemory(Tensor * input, Tensor * output);
2025-04-28 11:38:44 +08:00
bool convertToQuantWeight1x1Buffer(cl::Buffer input);
2023-12-27 17:26:44 +08:00
std::vector<int> mPaddings{0, 0};
std::vector<uint32_t> mGlobalWorkSize{1, 1, 1};
std::vector<uint32_t> mLocalWorkSize{1, 1, 1, 1};
void *mFilterDataPtr = nullptr;
2024-09-12 12:57:57 +08:00
bool mUseFPWeight = false;
2024-07-04 11:53:45 +08:00
std::shared_ptr<Tensor> mConvGemmInpTensor;
std::shared_ptr<Tensor> mConvGemmOutTensor;
2024-09-12 12:57:57 +08:00
std::shared_ptr<Tensor> mConvGemmWeightTensor;
2024-07-04 11:53:45 +08:00
std::shared_ptr<KernelWrap> mBufferToConv1x1Kernel = nullptr;
2024-08-24 15:46:21 +08:00
uint32_t batchConvMode = 0; // batch > 1 convolution input arrage mode. 0 is need tune; 1 arrage to n/4chw4; 2 arrage to c/4hwn4
2024-09-12 12:57:57 +08:00
std::shared_ptr<StrassenMatrixComputor> mStrassenComputor;
2023-12-27 17:26:44 +08:00
};
} // namespace OpenCL
} // namespace MNN
#endif /* ConvBufLowMemoryExecution_hpp */
#endif /* MNN_OPENCL_BUFFER_CLOSED */
#endif /* MNN_LOW_MEMORY */