MNN/source/backend/opencl/execution/buffer/SoftmaxBufExecution.hpp

44 lines
1.3 KiB
C++
Raw Normal View History

2019-04-17 10:49:11 +08:00
//
// SoftmaxBufExecution.hpp
2019-04-17 10:49:11 +08:00
// MNN
//
// Created by MNN on 2019/01/31.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_OPENCL_BUFFER_CLOSED
#ifndef SoftmaxBufExecution_hpp
#define SoftmaxBufExecution_hpp
2019-04-17 10:49:11 +08:00
#include <vector>
2019-12-27 22:16:57 +08:00
#include "core/Execution.hpp"
#include "backend/opencl/core/OpenCLBackend.hpp"
2023-12-27 17:26:44 +08:00
#include "backend/opencl/execution/image/CommonExtension.hpp"
2019-04-17 10:49:11 +08:00
namespace MNN {
namespace OpenCL {
2023-12-27 17:26:44 +08:00
class SoftmaxBufExecution : public Execution, public CommonExtension {
2019-04-17 10:49:11 +08:00
public:
SoftmaxBufExecution(const std::vector<Tensor *> &inputs, int axis, Backend *backend);
2019-04-17 10:49:11 +08:00
virtual ~SoftmaxBufExecution() = default;
2019-04-17 10:49:11 +08:00
virtual ErrorCode onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
virtual ErrorCode onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
2023-09-20 20:16:25 +08:00
bool buildSoftmaxKernel(int localSize);
2019-04-17 10:49:11 +08:00
private:
2023-09-20 20:16:25 +08:00
int getLocalSize(int size, int maxGroupSize);
2019-04-17 10:49:11 +08:00
cl::Kernel mKernel;
uint32_t mMaxWorkGroupSize;
OpenCLBackend *mOpenCLBackend;
std::vector<uint32_t> mGlobalWorkSize{1, 1, 1};
std::vector<uint32_t> mLocalWorkSize{1, 1, 1, 1};
int mAxis;
};
} // namespace OpenCL
} // namespace MNN
#endif /* SoftmaxBufExecution_hpp */
#endif /* MNN_OPENCL_BUFFER_CLOSED */