| 
									
										
										
										
											2019-04-17 10:49:11 +08:00
										 |  |  | //
 | 
					
						
							|  |  |  | //  ShapeSqueeze.cpp
 | 
					
						
							|  |  |  | //  MNN
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | //  Created by MNN on 2019/01/10.
 | 
					
						
							|  |  |  | //  Copyright © 2018, Alibaba Group Holding Limited
 | 
					
						
							|  |  |  | //
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2020-11-05 16:41:56 +08:00
										 |  |  | #include "shape/SizeComputer.hpp"
 | 
					
						
							| 
									
										
										
										
											2019-12-27 22:16:57 +08:00
										 |  |  | #include "core/Macro.h"
 | 
					
						
							|  |  |  | #include "core/TensorUtils.hpp"
 | 
					
						
							| 
									
										
										
										
											2019-04-17 10:49:11 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | namespace MNN { | 
					
						
							| 
									
										
										
										
											2019-06-05 10:45:59 +08:00
										 |  |  | class UnSqueezeSizeComputer : public SizeComputer { | 
					
						
							|  |  |  |     virtual bool onComputeSize(const MNN::Op* op, const std::vector<Tensor*>& inputs, | 
					
						
							|  |  |  |                                const std::vector<Tensor*>& outputs) const override { | 
					
						
							|  |  |  |         MNN_ASSERT(1 == outputs.size()); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
											
												- dynamic computation graph (beta)
	- add supports (/express)
	- add tests
	- add benchmarks with it (/benchmark/exprModels)
- Python
	- MNN engine and tools were submitted to pip
	- available on Windows/macOS/Linux
- Engine/Converter
	- add supports for each op benchmarking
	- refactor optimizer by separating steps
- CPU
	- add supports for Conv3D, Pool3D, ELU, ReverseSequence
	- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
	- add half transform in CPU
	- add broadcast supports for binary
	- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
	- add sub, real div supports for binary
	- add supports for unary
	- optimize Conv2D, Reshape
- Vulkan
	- add max supports for eltwise
- Metal
	- fix metallib missing problem
- Train/Quantization
	- use express to refactor training codes
											
										 
											2019-09-26 21:02:07 +08:00
										 |  |  |         const int* squeezeDim = nullptr; | 
					
						
							|  |  |  |         int squeezeDimSize    = 0; | 
					
						
							|  |  |  |         if (nullptr != op->main_as_SqueezeParam()->squeezeDims()) { | 
					
						
							|  |  |  |             squeezeDim     = op->main_as_SqueezeParam()->squeezeDims()->data(); | 
					
						
							|  |  |  |             squeezeDimSize = op->main_as_SqueezeParam()->squeezeDims()->size(); | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |         } else if (inputs.size() > 1) { | 
					
						
							|  |  |  |             squeezeDim     = inputs[1]->host<int>(); | 
					
						
							|  |  |  |             squeezeDimSize = inputs[1]->elementSize(); | 
					
						
							| 
									
										
										
											
												- dynamic computation graph (beta)
	- add supports (/express)
	- add tests
	- add benchmarks with it (/benchmark/exprModels)
- Python
	- MNN engine and tools were submitted to pip
	- available on Windows/macOS/Linux
- Engine/Converter
	- add supports for each op benchmarking
	- refactor optimizer by separating steps
- CPU
	- add supports for Conv3D, Pool3D, ELU, ReverseSequence
	- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
	- add half transform in CPU
	- add broadcast supports for binary
	- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
	- add sub, real div supports for binary
	- add supports for unary
	- optimize Conv2D, Reshape
- Vulkan
	- add max supports for eltwise
- Metal
	- fix metallib missing problem
- Train/Quantization
	- use express to refactor training codes
											
										 
											2019-09-26 21:02:07 +08:00
										 |  |  |         } | 
					
						
							| 
									
										
										
										
											2020-11-05 16:41:56 +08:00
										 |  |  |         auto& ob = outputs[0]->buffer(); | 
					
						
							| 
									
										
										
										
											2023-04-11 11:12:00 +08:00
										 |  |  |         auto& ib  = inputs[0]->buffer(); | 
					
						
							| 
									
										
										
										
											2020-11-05 16:41:56 +08:00
										 |  |  |         ob.dimensions = ib.dimensions + squeezeDimSize; | 
					
						
							| 
									
										
										
										
											2023-04-18 18:54:46 +08:00
										 |  |  |         uint32_t mask[MNN_MAX_TENSOR_DIM]; | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |         ::memset(mask, 0, sizeof(mask)); | 
					
						
							| 
									
										
										
										
											2019-06-05 10:45:59 +08:00
										 |  |  |         for (int i = 0; i < squeezeDimSize; i++) { | 
					
						
							| 
									
										
										
										
											2020-11-05 16:41:56 +08:00
										 |  |  |             int axis = squeezeDim[i]; | 
					
						
							|  |  |  |             if (axis < 0) { | 
					
						
							|  |  |  |                 axis += ob.dimensions; | 
					
						
							|  |  |  |             } | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |             mask[axis] = 1; | 
					
						
							| 
									
										
										
										
											2019-06-05 10:45:59 +08:00
										 |  |  |         } | 
					
						
							|  |  |  |         int oDim      = 0; | 
					
						
							|  |  |  |         for (int i = 0; i < ob.dimensions; i++) { | 
					
						
							|  |  |  |             ob.dim[i].extent = 1; | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |             if (mask[i] == 0) { | 
					
						
							| 
									
										
										
										
											2019-06-05 10:45:59 +08:00
										 |  |  |                 ob.dim[i].extent = ib.dim[oDim].extent; | 
					
						
							| 
									
										
										
										
											2019-07-19 17:09:09 +08:00
										 |  |  |                 oDim++; | 
					
						
							| 
									
										
										
										
											2019-06-05 10:45:59 +08:00
										 |  |  |             } | 
					
						
							|  |  |  |         } | 
					
						
							|  |  |  |         ob.type                                               = inputs[0]->buffer().type; | 
					
						
							|  |  |  |         TensorUtils::getDescribe(outputs[0])->dimensionFormat = TensorUtils::getDescribe(inputs[0])->dimensionFormat; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         return true; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | }; | 
					
						
							| 
									
										
										
										
											2019-04-17 10:49:11 +08:00
										 |  |  | class SqueezeSizeComputer : public SizeComputer { | 
					
						
							|  |  |  |     virtual bool onComputeSize(const MNN::Op* op, const std::vector<Tensor*>& inputs, | 
					
						
							|  |  |  |                                const std::vector<Tensor*>& outputs) const override { | 
					
						
							|  |  |  |         MNN_ASSERT(1 == outputs.size()); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
											
												- dynamic computation graph (beta)
	- add supports (/express)
	- add tests
	- add benchmarks with it (/benchmark/exprModels)
- Python
	- MNN engine and tools were submitted to pip
	- available on Windows/macOS/Linux
- Engine/Converter
	- add supports for each op benchmarking
	- refactor optimizer by separating steps
- CPU
	- add supports for Conv3D, Pool3D, ELU, ReverseSequence
	- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
	- add half transform in CPU
	- add broadcast supports for binary
	- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
	- add sub, real div supports for binary
	- add supports for unary
	- optimize Conv2D, Reshape
- Vulkan
	- add max supports for eltwise
- Metal
	- fix metallib missing problem
- Train/Quantization
	- use express to refactor training codes
											
										 
											2019-09-26 21:02:07 +08:00
										 |  |  |         const int* squeezeDim = nullptr; | 
					
						
							|  |  |  |         int squeezeDimSize    = 0; | 
					
						
							|  |  |  |         if (nullptr != op->main_as_SqueezeParam()->squeezeDims()) { | 
					
						
							|  |  |  |             squeezeDim     = op->main_as_SqueezeParam()->squeezeDims()->data(); | 
					
						
							|  |  |  |             squeezeDimSize = op->main_as_SqueezeParam()->squeezeDims()->size(); | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |         } else if (inputs.size() > 1) { | 
					
						
							|  |  |  |             squeezeDim     = inputs[1]->host<int>(); | 
					
						
							|  |  |  |             squeezeDimSize = inputs[1]->elementSize(); | 
					
						
							| 
									
										
										
											
												- dynamic computation graph (beta)
	- add supports (/express)
	- add tests
	- add benchmarks with it (/benchmark/exprModels)
- Python
	- MNN engine and tools were submitted to pip
	- available on Windows/macOS/Linux
- Engine/Converter
	- add supports for each op benchmarking
	- refactor optimizer by separating steps
- CPU
	- add supports for Conv3D, Pool3D, ELU, ReverseSequence
	- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
	- add half transform in CPU
	- add broadcast supports for binary
	- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
	- add sub, real div supports for binary
	- add supports for unary
	- optimize Conv2D, Reshape
- Vulkan
	- add max supports for eltwise
- Metal
	- fix metallib missing problem
- Train/Quantization
	- use express to refactor training codes
											
										 
											2019-09-26 21:02:07 +08:00
										 |  |  |         } | 
					
						
							| 
									
										
										
										
											2023-04-18 18:54:46 +08:00
										 |  |  |         uint32_t mask[MNN_MAX_TENSOR_DIM]; | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |         ::memset(mask, 0, sizeof(mask)); | 
					
						
							| 
									
										
										
										
											2021-09-14 21:02:11 +08:00
										 |  |  |         auto& ob = outputs[0]->buffer(); | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |         auto& ib = inputs[0]->buffer(); | 
					
						
							| 
									
										
										
										
											2019-04-17 10:49:11 +08:00
										 |  |  |         for (int i = 0; i < squeezeDimSize; i++) { | 
					
						
							| 
									
										
										
										
											2021-09-14 21:02:11 +08:00
										 |  |  |             int axis = squeezeDim[i]; | 
					
						
							|  |  |  |             if (axis < 0) { | 
					
						
							|  |  |  |                 axis += ib.dimensions; | 
					
						
							|  |  |  |             } | 
					
						
							| 
									
										
										
										
											2022-06-10 10:39:50 +08:00
										 |  |  |             if (1 != ib.dim[axis].extent) { | 
					
						
							|  |  |  |                 MNN_ERROR("Cannot Squeeze dim[%d], 1 is expected, %d is got. input shape:", axis, ib.dim[axis].extent); | 
					
						
							|  |  |  |                 inputs[0]->printShape(); | 
					
						
							|  |  |  |                 return false; | 
					
						
							|  |  |  |             } | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |             mask[axis] = 1; | 
					
						
							| 
									
										
										
										
											2019-04-17 10:49:11 +08:00
										 |  |  |         } | 
					
						
							| 
									
										
										
										
											2019-08-01 17:08:47 +08:00
										 |  |  |         if (squeezeDimSize == 0) { | 
					
						
							|  |  |  |             for (int i = 0; i < ib.dimensions; ++i) { | 
					
						
							|  |  |  |                 if (ib.dim[i].extent == 1) { | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |                     mask[i] = 1; | 
					
						
							| 
									
										
										
										
											2019-08-01 17:08:47 +08:00
										 |  |  |                     ++squeezeDimSize; | 
					
						
							|  |  |  |                 } | 
					
						
							|  |  |  |             } | 
					
						
							|  |  |  |         } | 
					
						
							| 
									
										
										
										
											2020-11-05 16:41:56 +08:00
										 |  |  |         // in = Tensor(shape=())
 | 
					
						
							|  |  |  |         // out = Squeeze(in) should also returns a tensor with shape=(), but
 | 
					
						
							|  |  |  |         // the `squeezeDimSize` and `ib.dimensions` are all 0.
 | 
					
						
							|  |  |  |         MNN_ASSERT(squeezeDimSize <= ib.dimensions); | 
					
						
							| 
									
										
										
										
											2019-04-17 10:49:11 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  |         ob.dimensions = ib.dimensions - squeezeDimSize; | 
					
						
							|  |  |  |         int oDim      = 0; | 
					
						
							|  |  |  |         for (int i = 0; i < ib.dimensions; i++) { | 
					
						
							| 
									
										
										
										
											2021-09-18 15:52:30 +08:00
										 |  |  |             if (mask[i] == 0) { | 
					
						
							| 
									
										
										
										
											2019-04-17 10:49:11 +08:00
										 |  |  |                 ob.dim[oDim].extent = ib.dim[i].extent; | 
					
						
							|  |  |  |                 oDim++; | 
					
						
							|  |  |  |             } | 
					
						
							|  |  |  |         } | 
					
						
							| 
									
										
										
										
											2019-06-05 10:45:59 +08:00
										 |  |  |         ob.type                                               = inputs[0]->buffer().type; | 
					
						
							|  |  |  |         TensorUtils::getDescribe(outputs[0])->dimensionFormat = TensorUtils::getDescribe(inputs[0])->dimensionFormat; | 
					
						
							| 
									
										
										
										
											2019-04-17 10:49:11 +08:00
										 |  |  |         return true; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | REGISTER_SHAPE(SqueezeSizeComputer, OpType_Squeeze); | 
					
						
							| 
									
										
										
										
											2019-06-05 10:45:59 +08:00
										 |  |  | REGISTER_SHAPE(UnSqueezeSizeComputer, OpType_Unsqueeze); | 
					
						
							| 
									
										
										
										
											2019-04-17 10:49:11 +08:00
										 |  |  | } // namespace MNN
 |