MNN/source/backend/opencl/execution/buffer/ArgMaxBufExecution.hpp

41 lines
1.2 KiB
C++
Raw Normal View History

2023-09-04 10:42:11 +08:00
//
// ArgMaxBufExecution.hpp
// MNN
//
// Created by MNN on 2023/08/11.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef MNN_OPENCL_BUFFER_CLOSED
#ifndef ArgMaxBufExecution_hpp
#define ArgMaxBufExecution_hpp
2024-04-19 11:58:21 +08:00
#include "backend/opencl/execution/image/CommonExecution.hpp"
2023-09-04 10:42:11 +08:00
namespace MNN {
namespace OpenCL {
2024-04-19 11:58:21 +08:00
class ArgMaxBufExecution : public CommonExecution {
2023-09-04 10:42:11 +08:00
public:
2024-04-19 11:58:21 +08:00
ArgMaxBufExecution(const std::string &compute, const MNN::Op *op, Backend *backend, const int axis);
2023-09-04 10:42:11 +08:00
virtual ~ArgMaxBufExecution() = default;
2024-04-19 11:58:21 +08:00
virtual ErrorCode onEncode(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
int getLocalSize(int size, int maxGroupSize);
2023-09-04 10:42:11 +08:00
private:
uint32_t mMaxWorkGroupSize;
std::vector<uint32_t> mGlobalWorkSize = {1, 1, 1};
std::vector<uint32_t> mLocalSize = {1, 1, 1};
std::set<std::string> mBuildOptions;
int mAxis;
2024-04-19 11:58:21 +08:00
OpenCLBackend *mOpenCLBackend;
2024-09-12 12:57:57 +08:00
std::shared_ptr<Tensor> mTempInputTensor;
std::shared_ptr<Tensor> mTempOutputTensor;
bool mNeedUnpackC4;
2023-09-04 10:42:11 +08:00
};
} // namespace OpenCL
} // namespace MNN
#endif /* ArgMaxBufExecution_hpp */
#endif/* MNN_OPENCL_BUFFER_CLOSED */