MNN/source/backend/opencl/execution/buffer/MatmulBufExecution.hpp

43 lines
1.1 KiB
C++
Raw Normal View History

2019-12-27 22:16:57 +08:00
//
// MatmulBufExecution.hpp
2019-12-27 22:16:57 +08:00
// MNN
//
// Created by MNN on 2019/01/31.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_OPENCL_BUFFER_CLOSED
#ifndef MatMulBufExecution_hpp
#define MatMulBufExecution_hpp
2019-12-27 22:16:57 +08:00
2024-04-19 11:58:21 +08:00
#include "backend/opencl/execution/image/CommonExecution.hpp"
2019-12-27 22:16:57 +08:00
namespace MNN {
namespace OpenCL {
2024-04-19 11:58:21 +08:00
class MatMulBufExecution : public CommonExecution {
2019-12-27 22:16:57 +08:00
public:
MatMulBufExecution(const std::vector<Tensor *> &inputs, const MNN::Op *op, Backend *backend, bool transposeA, bool transposeB);
virtual ~MatMulBufExecution() = default;
2019-12-27 22:16:57 +08:00
2024-04-19 11:58:21 +08:00
virtual ErrorCode onEncode(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
2019-12-27 22:16:57 +08:00
private:
bool mTransposeA;
bool mTransposeB;
std::string mKernelName;
2019-12-27 22:16:57 +08:00
uint32_t mMaxWorkGroupSize;
std::vector<int> mInput0Shape;
std::vector<int> mInput1Shape;
OpenCLBackend *mOpenCLBackend;
2020-11-05 16:41:56 +08:00
std::vector<uint32_t> mGlobalWorkSize{1, 1};
std::vector<uint32_t> mLocalWorkSize{1, 1};
2019-12-27 22:16:57 +08:00
};
} // namespace OpenCL
} // namespace MNN
#endif
#endif /* MNN_OPENCL_BUFFER_CLOSED */