MNN/source/backend/cuda/execution/MatMulExecution.hpp

70 lines
2.7 KiB
C++
Raw Normal View History

2019-04-17 10:49:11 +08:00
//
2020-11-05 16:41:56 +08:00
// MatMulExecution.hpp
2019-04-17 10:49:11 +08:00
// MNN
//
2020-11-05 16:41:56 +08:00
// Created by MNN on 2020/07/30.
2019-04-17 10:49:11 +08:00
// Copyright © 2018, Alibaba Group Holding Limited
//
2020-11-05 16:41:56 +08:00
#ifndef MatMulExecution_hpp
#define MatMulExecution_hpp
2022-11-08 17:05:14 +08:00
2020-11-05 16:41:56 +08:00
#include "backend/cuda/core/CUDABackend.hpp"
2022-11-08 17:05:14 +08:00
#include "MNNCUDADefine.hpp"
#include "CutlassGemmBatchedParam.hpp"
#include "MNNCUDAFunction.cuh"
2019-04-17 10:49:11 +08:00
namespace MNN {
2020-11-05 16:41:56 +08:00
namespace CUDA {
class MatMulExecution : public Execution {
2019-04-17 10:49:11 +08:00
public:
2020-11-05 16:41:56 +08:00
MatMulExecution(bool transposeA, bool transposeB, Backend *backend);
virtual ~MatMulExecution();
2019-04-17 10:49:11 +08:00
virtual ErrorCode onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
2020-11-05 16:41:56 +08:00
virtual ErrorCode onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
2022-11-08 17:05:14 +08:00
void setArguments(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs);
2019-04-17 10:49:11 +08:00
private:
2020-11-05 16:41:56 +08:00
bool mTransposeA;
bool mTransposeB;
2022-11-08 17:05:14 +08:00
std::shared_ptr<Tensor> mBiasTensor;
GemmBatchedTensor_F16_F16_Linear_AlignCuda_Row_Column_Sm75 mGemmBatchedF16LnAlign1RCSm75;
GemmBatchedTensor_F32_F32_Linear_AlignCuda_Row_Column_Sm75 mGemmBatchedF32F32LnAlign1RCSm75;
GemmBatchedTensor_F16_F32_Linear_AlignCuda_Row_Column_Sm75 mGemmBatchedF16F32LnAlign1RCSm75;
GemmBatchedTensor_F16_F16_Linear_AlignTensor_Row_Column_Sm75 mGemmBatchedF16LnAlign8RCSm75;
GemmBatchedTensor_F32_F32_Linear_AlignTensor_Row_Column_Sm75 mGemmBatchedF32F32LnAlign8RCSm75;
GemmBatchedTensor_F16_F32_Linear_AlignTensor_Row_Column_Sm75 mGemmBatchedF16F32LnAlign8RCSm75;
GemmBatchedTensor_F16_F16_Linear_AlignTensor_Row_Row_Sm75 mGemmBatchedF16LnAlign8RRSm75;
GemmBatchedTensor_F32_F32_Linear_AlignTensor_Row_Row_Sm75 mGemmBatchedF32F32LnAlign8RRSm75;
GemmBatchedTensor_F16_F32_Linear_AlignTensor_Row_Row_Sm75 mGemmBatchedF16F32LnAlign8RRSm75;
GemmBatchedCuda_F16_F16_Linear_AlignCuda_Row_Column mGemmBatchedCudaF16LnAlign1RC;
GemmBatchedCuda_F32_F32_Linear_AlignCuda_Row_Column mGemmBatchedCudaF32F32LnAlign1RC;
GemmBatchedCuda_F16_F32_Linear_AlignCuda_Row_Column mGemmBatchedCudaF16F32LnAlign1RC;
GemmBatchedCuda_F16_F16_Linear_AlignCuda_Row_Row mGemmBatchedCudaF16LnAlign1RR;
GemmBatchedCuda_F32_F32_Linear_AlignCuda_Row_Row mGemmBatchedCudaF32F32LnAlign1RR;
GemmBatchedCuda_F16_F32_Linear_AlignCuda_Row_Row mGemmBatchedCudaF16F32LnAlign1RR;
std::shared_ptr<Tensor> workspaceTensor;
uint8_t* mWorkspace;
void* mTempMatA;
void* mTempMatB;
void* mBiasPtr = nullptr;
bool mNeedATempBuffer = false;
bool mNeedBTempBuffer = false;
bool mUseRRLayout = false;
bool mResizeSetArgument = false;
bool mNeedConvertMatAB = false;
CutlassGemmInfo mGemmInfo;
int mBatch = 1;
int mGpuComputeCap;
2019-04-17 10:49:11 +08:00
};
2020-11-05 16:41:56 +08:00
} // namespace CUDA
2019-04-17 10:49:11 +08:00
} // namespace MNN
2020-11-05 16:41:56 +08:00
#endif