MNN/source/backend/cpu/compute/ConvolutionInt8Executor.hpp

45 lines
1.3 KiB
C++
Raw Normal View History

2019-04-17 10:49:11 +08:00
//
// ConvolutionInt8Executor.hpp
// MNN
//
// Created by MNN on 2018/07/16.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifndef ConvolutionInt8Executor_hpp
#define ConvolutionInt8Executor_hpp
#include <stdio.h>
2019-12-27 22:16:57 +08:00
#include "core/AutoStorage.h"
#include "backend/cpu/compute/ConvolutionFloatFactory.h"
#include "backend/cpu/compute/ConvolutionIntFactory.hpp"
#include "backend/cpu/CPUConvolution.hpp"
2019-04-17 10:49:11 +08:00
namespace MNN {
class ConvolutionInt8Executor : public CPUConvolution {
public:
ConvolutionInt8Executor(const Convolution2DCommon *convOp, Backend *b,
const ConvolutionCommon::Int8Common *common, const float *bias, size_t biasSize);
2019-04-17 10:49:11 +08:00
virtual ~ConvolutionInt8Executor() = default;
virtual ErrorCode onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
virtual ErrorCode onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) override;
private:
std::shared_ptr<Tensor> mWeight;
AutoStorage<float> mAlpha;
AutoStorage<float> mBias;
const IDSTQuan *mQuan;
Tensor mSrcCopyBuffer;
Tensor mTempBuffer;
Tensor mTempDstBuffer;
CPUConvolution::Im2ColParameter mIm2ColParamter;
int mSrcCount;
float mAMin;
float mAMax;
float mQuanScale;
};
} // namespace MNN
#endif /* ConvolutionInt8Executor_hpp */