2019-07-11 13:56:52 +08:00
|
|
|
//
|
|
|
|
// CPUDepthwiseConvInt8.hpp
|
|
|
|
// MNN
|
|
|
|
//
|
|
|
|
// Created by MNN on 2019/5/17.
|
|
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
|
|
//
|
|
|
|
|
|
|
|
#ifndef CPUDepthwiseConvInt8_hpp
|
|
|
|
#define CPUDepthwiseConvInt8_hpp
|
|
|
|
|
2019-12-27 22:16:57 +08:00
|
|
|
#include "backend/cpu/CPUConvolution.hpp"
|
2019-07-11 13:56:52 +08:00
|
|
|
|
|
|
|
namespace MNN {
|
|
|
|
|
|
|
|
class CPUDepthwiseConvInt8 : public CPUConvolution {
|
|
|
|
public:
|
|
|
|
CPUDepthwiseConvInt8(Backend *backend, const MNN::Convolution2D *convOp);
|
|
|
|
virtual ~CPUDepthwiseConvInt8() = default;
|
|
|
|
virtual ErrorCode onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
|
|
|
|
virtual ErrorCode onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
int mThreadNumber;
|
|
|
|
// int mPadX;
|
|
|
|
// int mPadY;
|
|
|
|
// relu or relu6
|
|
|
|
bool mRelu;
|
2020-04-29 10:12:16 +08:00
|
|
|
// True represent the middle accumulator if INT16, Fasle is INT32
|
|
|
|
bool mFastMode;
|
2019-07-11 13:56:52 +08:00
|
|
|
std::shared_ptr<Tensor> mWeightInt8;
|
|
|
|
std::shared_ptr<Tensor> mBiasInt32;
|
|
|
|
std::shared_ptr<Tensor> mScaleFloat;
|
|
|
|
std::function<void(int tId, const int8_t *src, int8_t *dst)> mThreadFunction;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace MNN
|
|
|
|
|
|
|
|
#endif /* CPUDepthwiseConvInt8_hpp */
|