MNN/source/backend/cpu/CPUMatMul.hpp

41 lines
1.4 KiB
C++
Raw Normal View History

2019-04-17 10:49:11 +08:00
//
// CPUMatMul.hpp
// MNN
//
// Created by MNN on 2018/08/06.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef CPUMATMUL_HPP
#define CPUMATMUL_HPP
#include <functional>
2019-12-27 22:16:57 +08:00
#include "core/Execution.hpp"
2020-02-26 09:57:17 +08:00
#include "backend/cpu/compute/StrassenMatmulComputor.hpp"
2019-04-17 10:49:11 +08:00
namespace MNN {
class CPUMatMul : public Execution {
public:
CPUMatMul(Backend *backend, bool transposeA, bool transposeB, bool transposeC, bool multiThread);
2019-04-17 10:49:11 +08:00
virtual ~CPUMatMul() = default;
virtual ErrorCode onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
2019-04-17 10:49:11 +08:00
virtual ErrorCode onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
void execute(const float* APtr, const float* BPtr, float* CPtr, const float* BiasPtr);
2019-04-17 10:49:11 +08:00
private:
void _scheduleForVec(int e, int l, int h);
void _scheduleForVecE(int e, int l, int h);
2019-04-17 10:49:11 +08:00
bool mTransposeA;
bool mTransposeB;
bool mTransposeC;
2020-02-26 09:57:17 +08:00
bool mSupportMultiThread = false;
std::vector<std::pair<std::function<void(int, const float*, const float*, const float*)>, int>> mPreFunctions;
std::vector<std::pair<std::function<void(int, const float*, const float*, const float*, float*)>, int>> mPostFunctions;
2020-02-26 09:57:17 +08:00
std::shared_ptr<StrassenMatrixComputor> mComputer;
bool mStrassenUseBiasDirectly = false;
2019-04-17 10:49:11 +08:00
};
} // namespace MNN
#endif // CPUMATMUL_HPP