MNN/source/backend/cpu/CPUQuantizedSoftmax.hpp

40 lines
1.1 KiB
C++
Raw Normal View History

2019-04-17 10:49:11 +08:00
//
// CPUQuantizedSoftmax.hpp
// MNN
//
// Created by MNN on 2018/09/29.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef CPUQuantizedSoftmax_hpp
#define CPUQuantizedSoftmax_hpp
2019-12-27 22:16:57 +08:00
#include "core/Execution.hpp"
2019-04-17 10:49:11 +08:00
namespace MNN {
template <typename T>
class CPUQuantizedSoftmax : public Execution {
public:
CPUQuantizedSoftmax(Backend *backend, const Op *op);
virtual ~CPUQuantizedSoftmax() = default;
virtual ErrorCode onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
virtual ErrorCode onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
void QuantizedSoftmax(const uint8_t *inputData, const std::vector<int> &inputDims, int32_t inputBetaMultiplier,
int32_t inputBetaLeftShift, uint8_t *output_data, const std::vector<int> &outputDims);
private:
int32_t mInputMultiplier;
int mInputLeftShift;
int mDiffMin;
float mBeta;
float mInputScale;
std::vector<int> mInputDims;
std::vector<int> mOutputDims;
2019-04-17 10:49:11 +08:00
};
} // namespace MNN
#endif /* CPUQuantizedSoftmax_hpp */