| 
									
										
										
										
											2019-07-11 13:56:52 +08:00
										 |  |  | //
 | 
					
						
							|  |  |  | //  CPUInt8ToFloat.cpp
 | 
					
						
							|  |  |  | //  MNN
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | //  Created by MNN on 2019/5/22.
 | 
					
						
							|  |  |  | //  Copyright © 2018, Alibaba Group Holding Limited
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-12-27 22:16:57 +08:00
										 |  |  | #include "backend/cpu/CPUInt8ToFloat.hpp"
 | 
					
						
							|  |  |  | #include "backend/cpu/CPUBackend.hpp"
 | 
					
						
							|  |  |  | #include "core/Concurrency.h"
 | 
					
						
							|  |  |  | #include "core/Macro.h"
 | 
					
						
							| 
									
										
										
										
											2020-12-10 17:53:24 +08:00
										 |  |  | #include "compute/Int8FunctionsOpt.h"
 | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  | #include "compute/CommonOptFunction.h"
 | 
					
						
							| 
									
										
										
										
											2020-12-13 11:03:03 +08:00
										 |  |  | #include "core/TensorUtils.hpp"
 | 
					
						
							| 
									
										
										
										
											2019-07-11 13:56:52 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | namespace MNN { | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | CPUInt8ToFloat::CPUInt8ToFloat(Backend* backend, const MNN::Op* param) : Execution(backend) { | 
					
						
							|  |  |  |     auto scale         = param->main_as_QuantizedFloatParam(); | 
					
						
							|  |  |  |     const int scaleLen = scale->tensorScale()->size(); | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |     auto pack = static_cast<CPUBackend*>(backend)->functions()->pack; | 
					
						
							|  |  |  |     mScales.reset(Tensor::createDevice<float>({UP_DIV(scaleLen, pack) * pack})); | 
					
						
							| 
									
										
										
										
											2019-07-11 13:56:52 +08:00
										 |  |  |     mValid = backend->onAcquireBuffer(mScales.get(), Backend::STATIC); | 
					
						
							|  |  |  |     if (!mValid) { | 
					
						
							|  |  |  |         return; | 
					
						
							|  |  |  |     } | 
					
						
							| 
									
										
										
										
											2020-12-13 11:03:03 +08:00
										 |  |  |     if (1 == scaleLen) { | 
					
						
							|  |  |  |         mSingle = true; | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |         for (int i = 0; i < pack; ++i) { | 
					
						
							| 
									
										
										
										
											2020-12-13 11:03:03 +08:00
										 |  |  |             mScales->host<float>()[i] = scale->tensorScale()->data()[0]; | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |     } else { | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |         memset(mScales->host<float>(), 0, UP_DIV(scaleLen, pack) * pack * sizeof(float)); | 
					
						
							| 
									
										
										
										
											2020-12-13 11:03:03 +08:00
										 |  |  |         memcpy(mScales->host<float>(), scale->tensorScale()->data(), scaleLen * sizeof(float)); | 
					
						
							|  |  |  |     } | 
					
						
							| 
									
										
										
										
											2021-01-06 16:29:37 +08:00
										 |  |  |     mZeroPoint = scale->zeroPoint(); | 
					
						
							| 
									
										
										
										
											2019-07-11 13:56:52 +08:00
										 |  |  | } | 
					
						
							|  |  |  | CPUInt8ToFloat::~CPUInt8ToFloat() { | 
					
						
							|  |  |  |     backend()->onReleaseBuffer(mScales.get(), Backend::STATIC); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | ErrorCode CPUInt8ToFloat::onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) { | 
					
						
							|  |  |  |     const auto input = inputs[0]; | 
					
						
							|  |  |  |     auto output      = outputs[0]; | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |     auto pack = static_cast<CPUBackend*>(backend())->functions()->pack; | 
					
						
							|  |  |  |     auto int8F = static_cast<CPUBackend*>(backend())->int8Functions(); | 
					
						
							| 
									
										
										
										
											2019-07-11 13:56:52 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  |     const auto inputDataPtr = input->host<int8_t>(); | 
					
						
							|  |  |  |     auto outputDataPtr      = output->host<float>(); | 
					
						
							|  |  |  |     const auto scaleDataPtr = mScales->host<float>(); | 
					
						
							|  |  |  |     const int channels      = input->channel(); | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |     int icDiv4        = UP_DIV(channels, pack); | 
					
						
							| 
									
										
										
										
											2019-07-11 13:56:52 +08:00
										 |  |  |     const int batch         = input->batch(); | 
					
						
							|  |  |  |     const int batchStride   = input->stride(0); | 
					
						
							| 
									
										
										
										
											2020-10-30 10:05:42 +08:00
										 |  |  |     int oc4Stride           = 1; | 
					
						
							|  |  |  |     for (int i = 2; i < input->dimensions(); ++i) { | 
					
						
							|  |  |  |         oc4Stride *= input->length(i); | 
					
						
							|  |  |  |     } | 
					
						
							| 
									
										
										
										
											2020-12-13 11:03:03 +08:00
										 |  |  |     if (mSingle) { | 
					
						
							|  |  |  |         oc4Stride = icDiv4 * oc4Stride; | 
					
						
							|  |  |  |         icDiv4 = 1; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     int total = batch * icDiv4; | 
					
						
							| 
									
										
										
										
											2019-07-11 13:56:52 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-12-13 11:03:03 +08:00
										 |  |  |     MNN_CONCURRENCY_BEGIN(tId, total) { | 
					
						
							|  |  |  |         int bIndex = tId / icDiv4; | 
					
						
							|  |  |  |         int z = tId % icDiv4; | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |         const auto srcChannelPtr   = inputDataPtr + tId * oc4Stride * pack; | 
					
						
							|  |  |  |         const auto scaleChannelPtr = scaleDataPtr + z * pack; | 
					
						
							|  |  |  |         auto dstChannlePtr         = outputDataPtr + tId * oc4Stride * pack; | 
					
						
							|  |  |  |         int8F->MNNInt8ScaleToFloat(dstChannlePtr, srcChannelPtr, scaleChannelPtr, oc4Stride, mZeroPoint); | 
					
						
							| 
									
										
										
										
											2019-07-11 13:56:52 +08:00
										 |  |  |     } | 
					
						
							| 
									
										
										
										
											2020-12-13 11:03:03 +08:00
										 |  |  |     MNN_CONCURRENCY_END(); | 
					
						
							| 
									
										
										
										
											2019-07-11 13:56:52 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  |     return NO_ERROR; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | class CPUInt8ToFloatCreator : public CPUBackend::Creator { | 
					
						
							|  |  |  | public: | 
					
						
							|  |  |  |     virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, | 
					
						
							|  |  |  |                                 const MNN::Op* op, Backend* backend) const override { | 
					
						
							| 
									
										
										
										
											2022-12-30 15:18:58 +08:00
										 |  |  |         if (nullptr == op->main_as_QuantizedFloatParam()) { | 
					
						
							|  |  |  |             return new CastWrapExecution(backend, DataType_DT_FLOAT); | 
					
						
							|  |  |  |         } | 
					
						
							| 
									
										
										
										
											2019-07-11 13:56:52 +08:00
										 |  |  |         return new CPUInt8ToFloat(backend, op); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | REGISTER_CPU_OP_CREATOR(CPUInt8ToFloatCreator, OpType_Int8ToFloat); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | } // namespace MNN
 |