MNN/express/Expr.cpp

1385 lines
50 KiB
C++
Raw Normal View History

2019-12-27 22:16:57 +08:00
//
// Expr.cpp
// MNN
//
// Created by MNN on 2019/06/10.
// Copyright © 2018, Alibaba Group Holding Limited
//
#define FLATBUFFERS_PREFER_PRINTF
2025-03-12 11:35:16 +08:00
#include <stack>
#include "core/OpCommonUtils.hpp"
2019-12-27 22:16:57 +08:00
#include <MNN/expr/Expr.hpp>
2020-11-05 16:41:56 +08:00
#include <MNN/expr/Executor.hpp>
2019-12-27 22:16:57 +08:00
#include <MNN/expr/ExprCreator.hpp>
#include "Utils.hpp"
2022-12-30 15:18:58 +08:00
#include "RuntimeAttr.hpp"
2019-12-27 22:16:57 +08:00
#include "core/FileLoader.hpp"
2020-11-05 16:41:56 +08:00
#include "core/TensorUtils.hpp"
[MNN:Sync] Sync internal github Commits: 8148ae75c 弗人 bugfix 14cb8ec7f 弗人 [Converter:Bugfix] bugfix for onnx depthwise convtranspose 476fbcd90 雁行 [MNN:Feature] Open AVX cast and bugfix for contentCFG. 5e26b9fd3 雁行 [Test:Feature] Add android test. 37e147b25 雁行 [MNN:Bugfix] Bugfix for floordiv. 144c185f5 tianbu.xsw hangxing fix hiai b4fd429d6 tianbu.xsw updateCacheFile bugfix -- update cache size d4ba572a8 雁行 [MNN:Bugfix] Support int8 in AVX2 and some Bugfix. 43061f07e xiaying [MNN:Bugfix] Fix bug for module mode run part of model 398cc5ab6 tianhang.yth refactor demo 736380600 xiaying [Express:Bugfix] Fix memory leak for copy branch b8dab0a27 tianhang.yth MNNFloat2Int8 sizeQuad=0 crash fix 94b95bfed ghz [BugFix]1.Better method for fast pack valid check 6a921f85e xiaying [Converter:Bugfix] Fix bug for Fuseconsttosubgraph 5f77ae889 tianhang.yth numThread bugfix a807ef879 tianhang.yth add createSession(configs, runtimeinfo) API, add pymnn demo, pymnn logcat bugfix ad05409d3 xiaying [MNN:Bugfix] Fix bug for StaticModule's sizecompute overflow, add error print for module mode 9d81b8299 xiaying [MNN:Bugfix] Fix bug for Unique op for output size = 1 03b15e9af xiaying [Test:Feature] Add MatMulBConst Test, Fix bug for single Convert c944a76ee tianhang.yth add auto backend and getSessionInfo @tianbu 91fa7267b ghz [BugFix]1.fix the error in eP check bf0041f77 ghz [BugFix]1.Fix the logic error in eP check. 2.Fix the sp align error 693871672 雁行 [CPU:Bugfix] rm adrp instruction for clang compiler bug. 1b8f6b3d8 ghz 1.Fix the wronly use of r13 in arm32 version. 2.Fix the missing callee register save and restore process. feb7ecc4c 弗人 modify log of python offline quant 040c04811 ghz [BufFix]1.replace platform-related regs. 2.fix the same problem in arm32 version 609f37db8 弗人 add log for python quant, python convert 5511dd30a ghz [BugFix]1.Add testcases in SparseConv to check all functional code branch. 2. Fix the bug in "MNNPackC4ForMatMul_A.S" in arm64, which is caused by the missing check of eReal parameter. a93ff9280 tianhang.yth add tf.Unique op support 9729ff773 allen.lk [Bugfix] Fix one arm32 instruction syntax that clang works but gcc DOES NOT work. use index instruction instead. 297c1ad14 雁行 [Expr:Bugfix] bugfix for tensor content used by shape compute. ef8c369e3 弗人 catch exception 07c2dd670 弗人 add dependence to setup, base64 encode url, add time log 177e590c1 弗人 [Python:Feature] add aliyun log for python quant tool 40a7928cf allen.lk [Debug:Sparse] 1.Add group parameter in torchscript converter. 2. Stop split running to avoid memory corruption when check failed in TransformGroupConvolution 3. fix Op split issue in TransformGroupConvolution 3bdea84a1 allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. c3c6fbdbd allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. bc590eee4 雁行 [Converter:Bugfix] bugfix for onnx instancenormalization convert. d8918593f tianhang.yth add auto backend and getSessionInfo @tianbu 83a198ed7 杭行 update d0dd3e09b 杭行 update 99540202e xiaying [Converter:Optimize] Opt the tensor convert insert 333d8db82 allen.lk [Debug:Sparse] Fix All platform-register r9 / x18 issue on arm32 and arm64. db5994672 杭行 merge 6293de7b8 tianbu.xsw fix pymnn updateCacheFile 5c2e11cb1 tianbu.xsw do updateCache in createSession 6e7641ff4 tianbu.xsw do not limit cacheFile for a model 5287a65e4 tianbu.xsw bugfix 52ba53a91 tianbu.xsw revert pymnn api 60284d830 tianbu.xsw bugfix 6d8077490 tianbu.xsw rename updateCacheFile api params 3cb172710 tianhang.yth updateCacheFile API size default value is 0 c5b69aabf tianbu.xsw updateCacheFile python api fix 5d5da7aa5 tianbu.xsw reflector code 5707877a4 雁行 [MNN:Speed] Speedup for softmax in x86 and arm. 2a211825c tianbu.xsw reflector code for updateCacheFile 76db3a835 tianbu.xsw [Cache Feature]: Add updateCacheFile API for increment cache b06b0fd43 allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. e68bfa495 雁行 [Converter:Feature] Add UUID when model convert. a9cb935dc xiaying [MNN:Speed] Support c4nhwc for more fastblit 019f40353 xiaying [Converter:Refractor] Reduce memory used by MNNConvert(bert from 5G -> 1G) d2a6d3d05 xiaying [MNN:Bugfix] Fix bug for identity output not find 604d0801b xiaying [Converter:Bugfix] Fix bug for FuseGeLu 4bada2367 xiaying [MNN:Refractor] SegmentMean rewrite as segment 82070e708 xiaying [MNN:Bugfix] Fix bug for GeometryBinary e8ea4266e xiaying Fix bug for ShapeTensorConvert compute for dim = 1 error 1f1cf1991 xiaying [Tools:Bugfix] Fix system compability for fastTestOnnx 6f422efe2 xiaying [Tools:Bugfix] Remove color for checkDir for easy to dump 968f7ec88 xiaying [MNN:Speed] Support turn broadcast binary to loop 3e7aaf46f xiaying [MNN:Refractor] Set Convolution1x1Strassen support variable input/output ptr 1f65ab163 xiaying [MNN:Bugfix] Fix bug for mini mnn can't convert model d65953d47 xiaying [MNN:Bugfix] Fix bug for armv7a - android-14 + ARM82 8b68be45c xiaying [MNN:Feature] Add segment 8a8f264f5 xiaying [Vulkan:Bugfix] Remove unuseful print 025bb0fda xiaying [Converter:Bugfix] Fix bug for oneof don't support 43900251e tianbu.xsw enable setCacheFile python API ebfb05c74 tianbu.xsw [Metal Feature] support metallib obtain from walle transfer task 9665c0a79 弗人 add check for path in json file c66fef224 xiaying [Converter:Bugfix] Fix bug for oneof don't support 42f192852 xiaying [MNN:Bugfix] Fix bug for not set output / saveTensor into origin Schedule's outputs 1b95354ff 雁行 [Feature]: Support shape compute for SetDiff1D, and null input for Prod. 83966d043 xiaying [Test:Feature] Add test for static module 42d1be933 xiaying [Converter:Bugfix] Fix bug for mnn convert and static model add more outputs for origin model 9067531c3 xiaying [Converter:Refractor] formatLicence 99558bed9 xiaying [Converter:Bugfix] Count the op for unuseful and controlflow 4f6da0fa7 allen.lk [Feature:GRUMultiOutput] fix multi output dimension type c6b219bce xiaying [Converter:Feature] Turn torch converter to object dd4e68a37 xiaying [Converter:Feature] Support dump supported ops 80b6a60a3 xiaying [Converter:Info] If has output name, print output name instead of computed 015278fc3 xiaying [MNN:Refractor] Revert IfModule's debug info 23ac967c4 xiaying Don't transform for multi-input convolution/deconvolution b02b0d4de xiaying Fix bug for multi-input for conv1d 254d8b1d4 xiaying Fix bug for Conv1dSqueezeMove for multi input convolution 1d d47d0b9ca xiaying Fix bug for CPURaster's fuse nc4hw4 357c5bd33 xiaying Fix ConvBiasAdd for conv's inputs op > 1 55b1f0c9c xiaying [Converter:Bugfix] Don't transform for multi-input convolution/deconvolution 1902a30f5 xiaying [Converter:Bugfix] Fix bug for Conv1dSqueezeMove for multi input convolution 1d c23fe617b xiaying [MNN:Bugfix] Fix bug for multi-input for conv1d 8ff018426 xiaying [MNN:Bugfix] Fix bug for CPURaster's fuse nc4hw4 d4e8cd602 xiaying [Converter:Bugfix] Fix ConvBiasAdd for conv's inputs op > 1 846266b42 tianbu.xsw return when program and tune both nullptr fd67c76a9 xiaying [Converter:Bugfix] DepthwiseConvWeightMerge only valid for tflite e77a242c4 xiaying [Converter:Feature] Support tflite's half pixel be054c377 tianbu.xsw [OpenCL Bugfix] do not rewrite cache when binary program is produced 51e65aa35 xiaying [Converter:Feature] Support tflite for fp16 and multi-input convolution 1ccdfdeb5 tianbu.xsw redefine svm macro name 31234d372 tianbu.xsw [OpenCL SVM] add macro for only use wrapper d739e35da xiaying [MNN:Bugfix] Fix compile bug for grid op 24ab13c79 Joker feat(arm82): add GridSample op support in arm82 backend, AVX(by xiaying) 7b142978e xiaying [AVX512:Speed] Optimize for e <= 8 5f6febe7b tianbu.xsw code refactor 998d91b57 xiaying [Express:Speed] Merge submodule for speed 22c89146f tianhang.yth fix alpha div by zero bug and arm server compile bug 8f829a170 tianbu.xsw [OpenCL Pad] unify conv/deconv pad computing 4a28f603e xiaying [Express:Speed] Shared Const for All Submodule c74cf28f3 xiaying [MNN:Refractor] Seperate Const init and schedule 2a1eebb7a xiaying [Tools:Bugfix] Fix bug for modelTest.py count size 72f04008c xiaying [MNN:Refractor] Delete unuseful const op 1e735d03c xiaying [Converter:Bugfix] Fix bug for static module gen 4dfadbc6e xiaying [MNN:Refractor] Rewrite const init mode 1fcf0417a xiaying [MNN:Bugfix] Fix bug for deconvolutin multi-input for multi-batch 41d429cfd xiaying [Train:Bugfix] Revert convert NCHW for mnistTrain f947a5f01 xiaying [Test:Feature] Add testTrain dad59b6f6 tianbu.xsw move realize code from Backend.hpp to Tensor.cpp cf4473ad1 xiaying [Train:Bugfix] Support pad for GeometryPoolGrad 91ab13734 xiaying [MNN:Bugfix] Fix compile bug for avx512 742e80f47 xiaying [MNN:Refractor] Opt the logic for checknan judge 12543b841 xiaying [ARM82:Bugfix] Fix compile bug for ios 3a2b0a49f xiaying [ARM82:Speed] Opt Pack / Unpack for armv8 c0f1995cd xiaying [ARM82:Speed] Opt MNNPackC8FP16 and MNNUnpackC8FP16 by asm e0fc77dcf xiaying [MNN:Speed] Fix bug for DeconvolutionWithStride for C4HW4, open it 584bec578 xiaying [MNN:Bugfix] Fix bug for format set error for onnx d5bd4148d xiaying [MNN:Bugfix] Fix bug for format set error for onnx b00265841 xiaying [MNN:Bugfix] Fix bug for SparseConvolutionTiledExecutor bb09188ac xiaying [Test:Bugfix] Fix bug for run into sparse auto 426d1babd xiaying [MNN:Refractor] Small bugfix for Group convolution and pack 7d0ea1c46 tianbu.xsw [testModel Feature] support testModel.out input resize 4169c54ce xiaying [MNN:Bugfix] Fix bug for checkNAN for origin 412a82222 xiaying [Test:Bugfix] Fix bug for CheckNAN's error of matmul 319b1d425 xiaying [MNN:Bugfix] Fix bug for multi-batch for ConvInt8 050b728a6 xiaying [Test:Bugfix] Use NCHW for ConvInt8Test 7db3423a1 xiaying [OpenCL:Bugfix] Fix bug for opencl::image,opencl::buffer for C4HW4 adcec6a7f xiaying [Vulkan:Bugfix] Fix bug for invalid tensor size limit d2a7cf4e9 xiaying [Vulkan:Bugfix] Fix bug for onCopyBuffer of nc4hw4 557bebdd3 xiaying [MNN:Bugfix] Fix bug for BF16-ARM32 bbe186649 tianbu.xsw [Update AUTO mode]: fix MNN_FORWARD_AUTO choose priority 6deb23439 xiaying [MNN:Bugfix] Fix bug for GeometryBinary don't care about NC4HW4 same size b137590e4 xiaying [MNN:Bugfix] Fix bug for GeometryBinary don't care about NC4HW4 same size 7003558ea xiaying [Converter:Bugfix] Fix bug for onnx pad for serveral case b5f8cae5a xiaying [Converter:Bugfix] Fix bug for onnx pad for serveral case 29b09e125 xiaying [MNN:Bugfix] Fix bug for arm64-bf16 42ce00770 xiaying [MNN:Bugfix] Fix bug for ARM64 - float a2d89fc18 雁行 [Converter:Feature] Support Binary Unary for Torch. 7f1c0deb1 xiaying [MNN:Bugfix] Fix bug for Raster for Int8 8335a6f18 tianbu.xsw [OpenCL Shared Memory] modify data_format method b359e031b xiaying [ARM82:Bugfix] Fix bug for arm82 and speed up pack / unpack c8 24bf3fc88 雁行 [Convert:Feature] Support LayerNormFuse without gamma beta. 3e629624b xiaying [MNN:Bugfix] Fix bug for float - armv7a 2b7908ec7 tianbu.xsw modify workItemSize 3cee0d413 xiaying [MNN:Bugfix] test wrong clear 9cbbfb998 xiaying [MNN:Bugfix] fix compile bug for c++ < 14 2d7a44484 xiaying [MNN:Bugfix] fix compile bug for c++ < 14 eb7d0cb53 xiaying [Test:Bugfix] Don't test for NC4HW4 directly 7b40ca8d1 xiaying [MNN:Bugfix] Fix bug for ConvolutionGroup 2694d8a91 xiaying [MNN:Bugfix] Fix bug for CPUGridSample f89af60f6 xiaying [MNN:Bugfix] Fix compile bug for arm a151abcdd xiaying [MNN:Bugfix] Fix bug for convert for int8 / int16 b254dbe61 雁行 [MNN:Bugfix] Bugfix for Conv onClone. d08150631 xiaying [MNN:Bugfix] Fix bug for fast rcnn e5568a0df xiaying [MNN:Bugfix] Fix bug for CPURaster treat NC4HW4 fast blit 128318933 雁行 [Raster:Bugfix] bugfix for Raster merge onResize. 03caacbea xiaying [MNN:Bugfix] fix bug for CPUDeconvolution and Convolution1x1Strassen for iw != ow e1e3c245c xiaying [MNN:Bugfix] Fix bug for ConvolutionWinograd 2524cbc6d xiaying [MNN:Bugfix] Fix bug for CPUSoftmax 44ec79b8f xiaying [MNN:Bugfix] Fix bug for CPUConvolutionDepthwise / Scale / DeconvolutionDW 21ae956ce xiaying [MNN:Bugfix] Fix bug for Multi-Batch-TiledExecutor 09a5069c7 xiaying [MNN:Speed] Add offset for src and dst 6776c6784 xiaying [MNN:Bugfix] Fix bug for trainable model cc83ae30b xiaying [MNN:Bugfix] Fix bug for trainable model
2021-07-29 11:46:59 +08:00
#include "core/WrapExecution.hpp"
2022-12-30 15:18:58 +08:00
#include "utils/InitNet.hpp"
//#define MNN_OPEN_TIME_TRACE
2019-12-27 22:16:57 +08:00
#include "MNN/AutoTime.hpp"
2020-11-05 16:41:56 +08:00
#include "MNN/expr/ExecutorScope.hpp"
[MNN:Sync] Sync internal github Commits: 8148ae75c 弗人 bugfix 14cb8ec7f 弗人 [Converter:Bugfix] bugfix for onnx depthwise convtranspose 476fbcd90 雁行 [MNN:Feature] Open AVX cast and bugfix for contentCFG. 5e26b9fd3 雁行 [Test:Feature] Add android test. 37e147b25 雁行 [MNN:Bugfix] Bugfix for floordiv. 144c185f5 tianbu.xsw hangxing fix hiai b4fd429d6 tianbu.xsw updateCacheFile bugfix -- update cache size d4ba572a8 雁行 [MNN:Bugfix] Support int8 in AVX2 and some Bugfix. 43061f07e xiaying [MNN:Bugfix] Fix bug for module mode run part of model 398cc5ab6 tianhang.yth refactor demo 736380600 xiaying [Express:Bugfix] Fix memory leak for copy branch b8dab0a27 tianhang.yth MNNFloat2Int8 sizeQuad=0 crash fix 94b95bfed ghz [BugFix]1.Better method for fast pack valid check 6a921f85e xiaying [Converter:Bugfix] Fix bug for Fuseconsttosubgraph 5f77ae889 tianhang.yth numThread bugfix a807ef879 tianhang.yth add createSession(configs, runtimeinfo) API, add pymnn demo, pymnn logcat bugfix ad05409d3 xiaying [MNN:Bugfix] Fix bug for StaticModule's sizecompute overflow, add error print for module mode 9d81b8299 xiaying [MNN:Bugfix] Fix bug for Unique op for output size = 1 03b15e9af xiaying [Test:Feature] Add MatMulBConst Test, Fix bug for single Convert c944a76ee tianhang.yth add auto backend and getSessionInfo @tianbu 91fa7267b ghz [BugFix]1.fix the error in eP check bf0041f77 ghz [BugFix]1.Fix the logic error in eP check. 2.Fix the sp align error 693871672 雁行 [CPU:Bugfix] rm adrp instruction for clang compiler bug. 1b8f6b3d8 ghz 1.Fix the wronly use of r13 in arm32 version. 2.Fix the missing callee register save and restore process. feb7ecc4c 弗人 modify log of python offline quant 040c04811 ghz [BufFix]1.replace platform-related regs. 2.fix the same problem in arm32 version 609f37db8 弗人 add log for python quant, python convert 5511dd30a ghz [BugFix]1.Add testcases in SparseConv to check all functional code branch. 2. Fix the bug in "MNNPackC4ForMatMul_A.S" in arm64, which is caused by the missing check of eReal parameter. a93ff9280 tianhang.yth add tf.Unique op support 9729ff773 allen.lk [Bugfix] Fix one arm32 instruction syntax that clang works but gcc DOES NOT work. use index instruction instead. 297c1ad14 雁行 [Expr:Bugfix] bugfix for tensor content used by shape compute. ef8c369e3 弗人 catch exception 07c2dd670 弗人 add dependence to setup, base64 encode url, add time log 177e590c1 弗人 [Python:Feature] add aliyun log for python quant tool 40a7928cf allen.lk [Debug:Sparse] 1.Add group parameter in torchscript converter. 2. Stop split running to avoid memory corruption when check failed in TransformGroupConvolution 3. fix Op split issue in TransformGroupConvolution 3bdea84a1 allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. c3c6fbdbd allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. bc590eee4 雁行 [Converter:Bugfix] bugfix for onnx instancenormalization convert. d8918593f tianhang.yth add auto backend and getSessionInfo @tianbu 83a198ed7 杭行 update d0dd3e09b 杭行 update 99540202e xiaying [Converter:Optimize] Opt the tensor convert insert 333d8db82 allen.lk [Debug:Sparse] Fix All platform-register r9 / x18 issue on arm32 and arm64. db5994672 杭行 merge 6293de7b8 tianbu.xsw fix pymnn updateCacheFile 5c2e11cb1 tianbu.xsw do updateCache in createSession 6e7641ff4 tianbu.xsw do not limit cacheFile for a model 5287a65e4 tianbu.xsw bugfix 52ba53a91 tianbu.xsw revert pymnn api 60284d830 tianbu.xsw bugfix 6d8077490 tianbu.xsw rename updateCacheFile api params 3cb172710 tianhang.yth updateCacheFile API size default value is 0 c5b69aabf tianbu.xsw updateCacheFile python api fix 5d5da7aa5 tianbu.xsw reflector code 5707877a4 雁行 [MNN:Speed] Speedup for softmax in x86 and arm. 2a211825c tianbu.xsw reflector code for updateCacheFile 76db3a835 tianbu.xsw [Cache Feature]: Add updateCacheFile API for increment cache b06b0fd43 allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. e68bfa495 雁行 [Converter:Feature] Add UUID when model convert. a9cb935dc xiaying [MNN:Speed] Support c4nhwc for more fastblit 019f40353 xiaying [Converter:Refractor] Reduce memory used by MNNConvert(bert from 5G -> 1G) d2a6d3d05 xiaying [MNN:Bugfix] Fix bug for identity output not find 604d0801b xiaying [Converter:Bugfix] Fix bug for FuseGeLu 4bada2367 xiaying [MNN:Refractor] SegmentMean rewrite as segment 82070e708 xiaying [MNN:Bugfix] Fix bug for GeometryBinary e8ea4266e xiaying Fix bug for ShapeTensorConvert compute for dim = 1 error 1f1cf1991 xiaying [Tools:Bugfix] Fix system compability for fastTestOnnx 6f422efe2 xiaying [Tools:Bugfix] Remove color for checkDir for easy to dump 968f7ec88 xiaying [MNN:Speed] Support turn broadcast binary to loop 3e7aaf46f xiaying [MNN:Refractor] Set Convolution1x1Strassen support variable input/output ptr 1f65ab163 xiaying [MNN:Bugfix] Fix bug for mini mnn can't convert model d65953d47 xiaying [MNN:Bugfix] Fix bug for armv7a - android-14 + ARM82 8b68be45c xiaying [MNN:Feature] Add segment 8a8f264f5 xiaying [Vulkan:Bugfix] Remove unuseful print 025bb0fda xiaying [Converter:Bugfix] Fix bug for oneof don't support 43900251e tianbu.xsw enable setCacheFile python API ebfb05c74 tianbu.xsw [Metal Feature] support metallib obtain from walle transfer task 9665c0a79 弗人 add check for path in json file c66fef224 xiaying [Converter:Bugfix] Fix bug for oneof don't support 42f192852 xiaying [MNN:Bugfix] Fix bug for not set output / saveTensor into origin Schedule's outputs 1b95354ff 雁行 [Feature]: Support shape compute for SetDiff1D, and null input for Prod. 83966d043 xiaying [Test:Feature] Add test for static module 42d1be933 xiaying [Converter:Bugfix] Fix bug for mnn convert and static model add more outputs for origin model 9067531c3 xiaying [Converter:Refractor] formatLicence 99558bed9 xiaying [Converter:Bugfix] Count the op for unuseful and controlflow 4f6da0fa7 allen.lk [Feature:GRUMultiOutput] fix multi output dimension type c6b219bce xiaying [Converter:Feature] Turn torch converter to object dd4e68a37 xiaying [Converter:Feature] Support dump supported ops 80b6a60a3 xiaying [Converter:Info] If has output name, print output name instead of computed 015278fc3 xiaying [MNN:Refractor] Revert IfModule's debug info 23ac967c4 xiaying Don't transform for multi-input convolution/deconvolution b02b0d4de xiaying Fix bug for multi-input for conv1d 254d8b1d4 xiaying Fix bug for Conv1dSqueezeMove for multi input convolution 1d d47d0b9ca xiaying Fix bug for CPURaster's fuse nc4hw4 357c5bd33 xiaying Fix ConvBiasAdd for conv's inputs op > 1 55b1f0c9c xiaying [Converter:Bugfix] Don't transform for multi-input convolution/deconvolution 1902a30f5 xiaying [Converter:Bugfix] Fix bug for Conv1dSqueezeMove for multi input convolution 1d c23fe617b xiaying [MNN:Bugfix] Fix bug for multi-input for conv1d 8ff018426 xiaying [MNN:Bugfix] Fix bug for CPURaster's fuse nc4hw4 d4e8cd602 xiaying [Converter:Bugfix] Fix ConvBiasAdd for conv's inputs op > 1 846266b42 tianbu.xsw return when program and tune both nullptr fd67c76a9 xiaying [Converter:Bugfix] DepthwiseConvWeightMerge only valid for tflite e77a242c4 xiaying [Converter:Feature] Support tflite's half pixel be054c377 tianbu.xsw [OpenCL Bugfix] do not rewrite cache when binary program is produced 51e65aa35 xiaying [Converter:Feature] Support tflite for fp16 and multi-input convolution 1ccdfdeb5 tianbu.xsw redefine svm macro name 31234d372 tianbu.xsw [OpenCL SVM] add macro for only use wrapper d739e35da xiaying [MNN:Bugfix] Fix compile bug for grid op 24ab13c79 Joker feat(arm82): add GridSample op support in arm82 backend, AVX(by xiaying) 7b142978e xiaying [AVX512:Speed] Optimize for e <= 8 5f6febe7b tianbu.xsw code refactor 998d91b57 xiaying [Express:Speed] Merge submodule for speed 22c89146f tianhang.yth fix alpha div by zero bug and arm server compile bug 8f829a170 tianbu.xsw [OpenCL Pad] unify conv/deconv pad computing 4a28f603e xiaying [Express:Speed] Shared Const for All Submodule c74cf28f3 xiaying [MNN:Refractor] Seperate Const init and schedule 2a1eebb7a xiaying [Tools:Bugfix] Fix bug for modelTest.py count size 72f04008c xiaying [MNN:Refractor] Delete unuseful const op 1e735d03c xiaying [Converter:Bugfix] Fix bug for static module gen 4dfadbc6e xiaying [MNN:Refractor] Rewrite const init mode 1fcf0417a xiaying [MNN:Bugfix] Fix bug for deconvolutin multi-input for multi-batch 41d429cfd xiaying [Train:Bugfix] Revert convert NCHW for mnistTrain f947a5f01 xiaying [Test:Feature] Add testTrain dad59b6f6 tianbu.xsw move realize code from Backend.hpp to Tensor.cpp cf4473ad1 xiaying [Train:Bugfix] Support pad for GeometryPoolGrad 91ab13734 xiaying [MNN:Bugfix] Fix compile bug for avx512 742e80f47 xiaying [MNN:Refractor] Opt the logic for checknan judge 12543b841 xiaying [ARM82:Bugfix] Fix compile bug for ios 3a2b0a49f xiaying [ARM82:Speed] Opt Pack / Unpack for armv8 c0f1995cd xiaying [ARM82:Speed] Opt MNNPackC8FP16 and MNNUnpackC8FP16 by asm e0fc77dcf xiaying [MNN:Speed] Fix bug for DeconvolutionWithStride for C4HW4, open it 584bec578 xiaying [MNN:Bugfix] Fix bug for format set error for onnx d5bd4148d xiaying [MNN:Bugfix] Fix bug for format set error for onnx b00265841 xiaying [MNN:Bugfix] Fix bug for SparseConvolutionTiledExecutor bb09188ac xiaying [Test:Bugfix] Fix bug for run into sparse auto 426d1babd xiaying [MNN:Refractor] Small bugfix for Group convolution and pack 7d0ea1c46 tianbu.xsw [testModel Feature] support testModel.out input resize 4169c54ce xiaying [MNN:Bugfix] Fix bug for checkNAN for origin 412a82222 xiaying [Test:Bugfix] Fix bug for CheckNAN's error of matmul 319b1d425 xiaying [MNN:Bugfix] Fix bug for multi-batch for ConvInt8 050b728a6 xiaying [Test:Bugfix] Use NCHW for ConvInt8Test 7db3423a1 xiaying [OpenCL:Bugfix] Fix bug for opencl::image,opencl::buffer for C4HW4 adcec6a7f xiaying [Vulkan:Bugfix] Fix bug for invalid tensor size limit d2a7cf4e9 xiaying [Vulkan:Bugfix] Fix bug for onCopyBuffer of nc4hw4 557bebdd3 xiaying [MNN:Bugfix] Fix bug for BF16-ARM32 bbe186649 tianbu.xsw [Update AUTO mode]: fix MNN_FORWARD_AUTO choose priority 6deb23439 xiaying [MNN:Bugfix] Fix bug for GeometryBinary don't care about NC4HW4 same size b137590e4 xiaying [MNN:Bugfix] Fix bug for GeometryBinary don't care about NC4HW4 same size 7003558ea xiaying [Converter:Bugfix] Fix bug for onnx pad for serveral case b5f8cae5a xiaying [Converter:Bugfix] Fix bug for onnx pad for serveral case 29b09e125 xiaying [MNN:Bugfix] Fix bug for arm64-bf16 42ce00770 xiaying [MNN:Bugfix] Fix bug for ARM64 - float a2d89fc18 雁行 [Converter:Feature] Support Binary Unary for Torch. 7f1c0deb1 xiaying [MNN:Bugfix] Fix bug for Raster for Int8 8335a6f18 tianbu.xsw [OpenCL Shared Memory] modify data_format method b359e031b xiaying [ARM82:Bugfix] Fix bug for arm82 and speed up pack / unpack c8 24bf3fc88 雁行 [Convert:Feature] Support LayerNormFuse without gamma beta. 3e629624b xiaying [MNN:Bugfix] Fix bug for float - armv7a 2b7908ec7 tianbu.xsw modify workItemSize 3cee0d413 xiaying [MNN:Bugfix] test wrong clear 9cbbfb998 xiaying [MNN:Bugfix] fix compile bug for c++ < 14 2d7a44484 xiaying [MNN:Bugfix] fix compile bug for c++ < 14 eb7d0cb53 xiaying [Test:Bugfix] Don't test for NC4HW4 directly 7b40ca8d1 xiaying [MNN:Bugfix] Fix bug for ConvolutionGroup 2694d8a91 xiaying [MNN:Bugfix] Fix bug for CPUGridSample f89af60f6 xiaying [MNN:Bugfix] Fix compile bug for arm a151abcdd xiaying [MNN:Bugfix] Fix bug for convert for int8 / int16 b254dbe61 雁行 [MNN:Bugfix] Bugfix for Conv onClone. d08150631 xiaying [MNN:Bugfix] Fix bug for fast rcnn e5568a0df xiaying [MNN:Bugfix] Fix bug for CPURaster treat NC4HW4 fast blit 128318933 雁行 [Raster:Bugfix] bugfix for Raster merge onResize. 03caacbea xiaying [MNN:Bugfix] fix bug for CPUDeconvolution and Convolution1x1Strassen for iw != ow e1e3c245c xiaying [MNN:Bugfix] Fix bug for ConvolutionWinograd 2524cbc6d xiaying [MNN:Bugfix] Fix bug for CPUSoftmax 44ec79b8f xiaying [MNN:Bugfix] Fix bug for CPUConvolutionDepthwise / Scale / DeconvolutionDW 21ae956ce xiaying [MNN:Bugfix] Fix bug for Multi-Batch-TiledExecutor 09a5069c7 xiaying [MNN:Speed] Add offset for src and dst 6776c6784 xiaying [MNN:Bugfix] Fix bug for trainable model cc83ae30b xiaying [MNN:Bugfix] Fix bug for trainable model
2021-07-29 11:46:59 +08:00
#include "half.hpp"
2022-12-30 15:18:58 +08:00
#include "geometry/GeometryComputer.hpp"
#include "geometry/GeometryComputerUtils.hpp"
2019-12-27 22:16:57 +08:00
2020-11-05 16:41:56 +08:00
//#define MNN_EXPRESS_ERROR_REPORT
2019-12-27 22:16:57 +08:00
static inline std::string numberToString(int index) {
char s[10];
snprintf(s, 10, "%d", index);
return std::string(s);
2019-12-27 22:16:57 +08:00
}
2020-11-05 16:41:56 +08:00
static bool HasUnknownDim(const std::vector<int>& dims) {
for (const int& dim : dims) {
if (dim < 0) {
return true;
}
}
return false;
}
2019-12-27 22:16:57 +08:00
namespace MNN {
namespace Express {
void Variable::Info::syncSize() {
size = 1;
for (int i=0; i<dim.size(); ++i) {
2020-02-26 09:57:17 +08:00
if (dim[i] <= 0) {
// Not valid
size = 0;
return;
}
2019-12-27 22:16:57 +08:00
if (order == NC4HW4 && i == 1) {
size *= (UP_DIV(dim[1], 4) * 4);
} else {
size *= dim[i];
}
}
}
bool VARP::fix(VARP::InputType type) const {
if (nullptr == mContent->expr().first->get()) {
mContent->expr().first->mType = type;
return true;
}
auto info = mContent->getInfo();
if (nullptr == info) {
return false;
}
2022-12-30 15:18:58 +08:00
auto exprInfo = mContent->expr();
auto inside = exprInfo.first->inside();
auto mFrom = exprInfo.first;
auto cache = mFrom->inside()->mCache;
if (nullptr == cache) {
ExecutorScope::Current()->makeCache({mFrom}, false);
cache = mFrom->inside()->mCache;
}
if (nullptr == cache) {
return false;
}
if (NO_ERROR != cache->compute()) {
return false;
2019-12-27 22:16:57 +08:00
}
2022-12-30 15:18:58 +08:00
auto inputTensor = inside->mCache->getSession()->getTensor(inside->mCacheOffset + exprInfo.second);
auto tensor = Tensor::clone(inputTensor);
VARP newVARP = Express::Variable::create(Express::Expr::create(tensor, true));
newVARP->expr().first->mType = type;
auto& pipelineInfo = inside->mCache->getSession()->getPipelineInfo(0);
2024-04-19 11:58:21 +08:00
if (TensorUtils::getDescribeOrigin(tensor)->getBackend() == pipelineInfo.first.cache.first.get()) {
2022-12-30 15:18:58 +08:00
newVARP->expr().first->inside()->mHoldBackend = pipelineInfo.first.cache.first;
2024-04-19 11:58:21 +08:00
} else if (TensorUtils::getDescribeOrigin(tensor)->getBackend() == pipelineInfo.first.cache.second.get()) {
2022-12-30 15:18:58 +08:00
newVARP->expr().first->inside()->mHoldBackend = pipelineInfo.first.cache.second;
}
Variable::replace(VARP(mContent), newVARP);
2024-11-18 14:37:45 +08:00
inputTensor->wait(MNN::Tensor::MAP_TENSOR_READ, true);
2019-12-27 22:16:57 +08:00
return true;
}
Expr::Expr(int outputSize) {
2020-11-05 16:41:56 +08:00
mInside.reset(new Inside(outputSize));
2019-12-27 22:16:57 +08:00
mOutputNames.resize(outputSize);
}
2021-04-08 15:34:23 +08:00
Expr::Expr(Tensor* tensor, bool own) {
mInside.reset(new Inside(tensor, own));
2021-01-06 16:29:37 +08:00
mOutputNames.resize(1);
}
2019-12-27 22:16:57 +08:00
Expr::~Expr() {
mInside.reset();
}
Variable::Info* Expr::outputInfo(int index) const {
2019-12-27 22:16:57 +08:00
return mInside->mOutputInfos.data() + index;
}
void Expr::_addLinkForInputs(EXPRP expr) {
auto inputs = expr->inputs();
for (int i=0; i<inputs.size(); ++i) {
2022-02-18 11:30:27 +08:00
if (inputs[i].get() == nullptr) {
continue;
}
2019-12-27 22:16:57 +08:00
bool findEmpty = false;
auto inputExpr = inputs[i]->mFrom;
for (int j=0; j<inputExpr->mTo.size(); ++j) {
auto ref = inputExpr->mTo[j].lock();
if (nullptr == ref) {
inputExpr->mTo[j] = WeakEXPRP(expr);
findEmpty = true;
break;
}
}
if (!findEmpty) {
inputExpr->mTo.emplace_back(WeakEXPRP(expr));
}
}
}
2021-04-08 15:34:23 +08:00
EXPRP Expr::create(Tensor* tensor, bool own) {
EXPRP expr(new Expr(tensor, own));
2021-01-06 16:29:37 +08:00
expr->mOp = nullptr;
expr->mType = VARP::CONSTANT;
auto& dstInfo = expr->mInside->mOutputInfos[0];
expr->mInside->mInfoDirty = false;
expr->mInside->mContentDirty = false;
return expr;
}
2020-12-15 14:12:35 +08:00
EXPRP Expr::create(Variable::Info&& info, const void* ptr, VARP::InputType type, Expr::MemoryType memtype) {
2019-12-27 22:16:57 +08:00
EXPRP expr(new Expr(1));
expr->mOp = nullptr;
2020-11-05 16:41:56 +08:00
auto originPtr = ptr;
2019-12-27 22:16:57 +08:00
expr->mInside->mOutputInfos[0] = std::move(info);
auto& dstInfo = expr->mInside->mOutputInfos[0];
2020-11-05 16:41:56 +08:00
expr->mInside->mInfoDirty = false;
2019-12-27 22:16:57 +08:00
dstInfo.syncSize();
2020-11-05 16:41:56 +08:00
Utils::copyInfoToTensor(expr->mInside->mOutputTensors[0], expr->mInside->mOutputInfos.data());
expr->mType = type;
if (type == VARP::CONSTANT) {
TensorUtils::getDescribe(expr->mInside->mOutputTensors[0])->usage = Tensor::InsideDescribe::CONSTANT;
TensorUtils::getDescribe(expr->mInside->mOutputTensors[0])->isMutable = false;
2020-11-05 16:41:56 +08:00
} else if (type == VARP::INPUT) {
TensorUtils::getDescribe(expr->mInside->mOutputTensors[0])->usage = Tensor::InsideDescribe::INPUT;
2019-12-27 22:16:57 +08:00
} else {
2020-11-05 16:41:56 +08:00
// VARP::TRAINABLE
TensorUtils::getDescribe(expr->mInside->mOutputTensors[0])->usage = Tensor::InsideDescribe::TRAINABLE;
}
2020-12-15 14:12:35 +08:00
if (dstInfo.size > 0 && memtype == COPY) {
2020-11-05 16:41:56 +08:00
auto res = Utils::allocMemoryForHostTensor(expr->mInside->mOutputTensors[0]);
if (!res) {
MNN_ASSERT(false);
return nullptr;
}
} else {
expr->mInside->mOutputTensors[0]->buffer().host = nullptr;
2019-12-27 22:16:57 +08:00
}
if (nullptr == originPtr) {
2020-11-05 16:41:56 +08:00
if (type == VARP::INPUT && dstInfo.size > 0) {
expr->mInside->mContentDirty = true;
}
2019-12-27 22:16:57 +08:00
return expr;
}
2020-11-05 16:41:56 +08:00
expr->mInside->mContentDirty = false;
2020-12-15 14:12:35 +08:00
if (memtype == COPY) {
2023-09-20 20:16:25 +08:00
size_t total_size = dstInfo.size;
total_size *= dstInfo.type.bytes();
::memcpy(expr->mInside->mOutputTensors[0]->buffer().host, originPtr, total_size);
2020-11-05 16:41:56 +08:00
} else {
expr->mInside->mOutputTensors[0]->buffer().host = (uint8_t*)originPtr;
2020-12-15 14:12:35 +08:00
if (memtype == REF) {
TensorUtils::getDescribe(expr->mInside->mOutputTensors[0])->memoryType = Tensor::InsideDescribe::MEMORY_OUTSIDE;
}
2020-11-05 16:41:56 +08:00
}
2019-12-27 22:16:57 +08:00
return expr;
}
2021-02-07 10:45:07 +08:00
EXPRP Expr::create(std::shared_ptr<BufferStorage> extra, std::vector<VARP>&& inputs, int outputSize) {
EXPRP expr(new Expr(outputSize));
2021-02-07 10:45:07 +08:00
expr->mStorage = extra;
expr->mOp = flatbuffers::GetRoot<Op>(extra->buffer());
2023-12-27 17:26:44 +08:00
switch (expr->mOp->type()) {
case OpType_Const:
expr->mType = VARP::CONSTANT;
break;
case OpType_TrainableParam:
expr->mType = VARP::TRAINABLE;
break;
default:
expr->mType = VARP::INPUT;
break;
}
expr->mInputs = std::move(inputs);
2023-12-04 11:12:20 +08:00
auto exe = ExecutorScope::Current();
expr->mInside->mReq = exe->getRequirement(expr.get());
2024-07-22 19:51:53 +08:00
if ((!(exe->getLazyMode() & Executor::LAZY_COMPUTE_ONCE)) && exe->lazyEval) {
2023-12-04 11:12:20 +08:00
_addLinkForInputs(expr);
}
return expr;
}
2019-12-27 22:16:57 +08:00
EXPRP Expr::create(const OpT* op, std::vector<VARP> inputs, int outputSize) {
if (OpType_Input == op->type) {
Variable::Info info;
info.dim = op->main.AsInput()->dims;
if (info.dim.size() >= 1 && -1 == info.dim[0]) {
info.dim[0] = 1;
}
info.order = Utils::revertFormat(op->main.AsInput()->dformat);
info.type = Utils::revertDataType(op->main.AsInput()->dtype);
2020-11-05 16:41:56 +08:00
return create(std::move(info), nullptr, VARP::INPUT);
2019-12-27 22:16:57 +08:00
}
if (OpType_Const == op->type || OpType_TrainableParam == op->type) {
2024-04-19 11:58:21 +08:00
if (!op->externalPath.empty()) {
flatbuffers::FlatBufferBuilder builder;
auto offset = Op::Pack(builder, op);
builder.Finish(offset);
std::shared_ptr<BufferStorage> extra(new BufferStorage);
extra->storage = builder.ReleaseRaw(extra->allocated_size, extra->offset);
auto resExpr = Expr::create(extra, std::move(inputs), outputSize);
resExpr->setName(op->name);
return resExpr;
}
2019-12-27 22:16:57 +08:00
Variable::Info info;
info.dim = op->main.AsBlob()->dims;
info.order = Utils::revertFormat(op->main.AsBlob()->dataFormat);
2020-11-05 16:41:56 +08:00
void* ptr = nullptr;
2019-12-27 22:16:57 +08:00
info.type = Utils::revertDataType(op->main.AsBlob()->dataType);
[MNN:Sync] Sync internal github Commits: 8148ae75c 弗人 bugfix 14cb8ec7f 弗人 [Converter:Bugfix] bugfix for onnx depthwise convtranspose 476fbcd90 雁行 [MNN:Feature] Open AVX cast and bugfix for contentCFG. 5e26b9fd3 雁行 [Test:Feature] Add android test. 37e147b25 雁行 [MNN:Bugfix] Bugfix for floordiv. 144c185f5 tianbu.xsw hangxing fix hiai b4fd429d6 tianbu.xsw updateCacheFile bugfix -- update cache size d4ba572a8 雁行 [MNN:Bugfix] Support int8 in AVX2 and some Bugfix. 43061f07e xiaying [MNN:Bugfix] Fix bug for module mode run part of model 398cc5ab6 tianhang.yth refactor demo 736380600 xiaying [Express:Bugfix] Fix memory leak for copy branch b8dab0a27 tianhang.yth MNNFloat2Int8 sizeQuad=0 crash fix 94b95bfed ghz [BugFix]1.Better method for fast pack valid check 6a921f85e xiaying [Converter:Bugfix] Fix bug for Fuseconsttosubgraph 5f77ae889 tianhang.yth numThread bugfix a807ef879 tianhang.yth add createSession(configs, runtimeinfo) API, add pymnn demo, pymnn logcat bugfix ad05409d3 xiaying [MNN:Bugfix] Fix bug for StaticModule's sizecompute overflow, add error print for module mode 9d81b8299 xiaying [MNN:Bugfix] Fix bug for Unique op for output size = 1 03b15e9af xiaying [Test:Feature] Add MatMulBConst Test, Fix bug for single Convert c944a76ee tianhang.yth add auto backend and getSessionInfo @tianbu 91fa7267b ghz [BugFix]1.fix the error in eP check bf0041f77 ghz [BugFix]1.Fix the logic error in eP check. 2.Fix the sp align error 693871672 雁行 [CPU:Bugfix] rm adrp instruction for clang compiler bug. 1b8f6b3d8 ghz 1.Fix the wronly use of r13 in arm32 version. 2.Fix the missing callee register save and restore process. feb7ecc4c 弗人 modify log of python offline quant 040c04811 ghz [BufFix]1.replace platform-related regs. 2.fix the same problem in arm32 version 609f37db8 弗人 add log for python quant, python convert 5511dd30a ghz [BugFix]1.Add testcases in SparseConv to check all functional code branch. 2. Fix the bug in "MNNPackC4ForMatMul_A.S" in arm64, which is caused by the missing check of eReal parameter. a93ff9280 tianhang.yth add tf.Unique op support 9729ff773 allen.lk [Bugfix] Fix one arm32 instruction syntax that clang works but gcc DOES NOT work. use index instruction instead. 297c1ad14 雁行 [Expr:Bugfix] bugfix for tensor content used by shape compute. ef8c369e3 弗人 catch exception 07c2dd670 弗人 add dependence to setup, base64 encode url, add time log 177e590c1 弗人 [Python:Feature] add aliyun log for python quant tool 40a7928cf allen.lk [Debug:Sparse] 1.Add group parameter in torchscript converter. 2. Stop split running to avoid memory corruption when check failed in TransformGroupConvolution 3. fix Op split issue in TransformGroupConvolution 3bdea84a1 allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. c3c6fbdbd allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. bc590eee4 雁行 [Converter:Bugfix] bugfix for onnx instancenormalization convert. d8918593f tianhang.yth add auto backend and getSessionInfo @tianbu 83a198ed7 杭行 update d0dd3e09b 杭行 update 99540202e xiaying [Converter:Optimize] Opt the tensor convert insert 333d8db82 allen.lk [Debug:Sparse] Fix All platform-register r9 / x18 issue on arm32 and arm64. db5994672 杭行 merge 6293de7b8 tianbu.xsw fix pymnn updateCacheFile 5c2e11cb1 tianbu.xsw do updateCache in createSession 6e7641ff4 tianbu.xsw do not limit cacheFile for a model 5287a65e4 tianbu.xsw bugfix 52ba53a91 tianbu.xsw revert pymnn api 60284d830 tianbu.xsw bugfix 6d8077490 tianbu.xsw rename updateCacheFile api params 3cb172710 tianhang.yth updateCacheFile API size default value is 0 c5b69aabf tianbu.xsw updateCacheFile python api fix 5d5da7aa5 tianbu.xsw reflector code 5707877a4 雁行 [MNN:Speed] Speedup for softmax in x86 and arm. 2a211825c tianbu.xsw reflector code for updateCacheFile 76db3a835 tianbu.xsw [Cache Feature]: Add updateCacheFile API for increment cache b06b0fd43 allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. e68bfa495 雁行 [Converter:Feature] Add UUID when model convert. a9cb935dc xiaying [MNN:Speed] Support c4nhwc for more fastblit 019f40353 xiaying [Converter:Refractor] Reduce memory used by MNNConvert(bert from 5G -> 1G) d2a6d3d05 xiaying [MNN:Bugfix] Fix bug for identity output not find 604d0801b xiaying [Converter:Bugfix] Fix bug for FuseGeLu 4bada2367 xiaying [MNN:Refractor] SegmentMean rewrite as segment 82070e708 xiaying [MNN:Bugfix] Fix bug for GeometryBinary e8ea4266e xiaying Fix bug for ShapeTensorConvert compute for dim = 1 error 1f1cf1991 xiaying [Tools:Bugfix] Fix system compability for fastTestOnnx 6f422efe2 xiaying [Tools:Bugfix] Remove color for checkDir for easy to dump 968f7ec88 xiaying [MNN:Speed] Support turn broadcast binary to loop 3e7aaf46f xiaying [MNN:Refractor] Set Convolution1x1Strassen support variable input/output ptr 1f65ab163 xiaying [MNN:Bugfix] Fix bug for mini mnn can't convert model d65953d47 xiaying [MNN:Bugfix] Fix bug for armv7a - android-14 + ARM82 8b68be45c xiaying [MNN:Feature] Add segment 8a8f264f5 xiaying [Vulkan:Bugfix] Remove unuseful print 025bb0fda xiaying [Converter:Bugfix] Fix bug for oneof don't support 43900251e tianbu.xsw enable setCacheFile python API ebfb05c74 tianbu.xsw [Metal Feature] support metallib obtain from walle transfer task 9665c0a79 弗人 add check for path in json file c66fef224 xiaying [Converter:Bugfix] Fix bug for oneof don't support 42f192852 xiaying [MNN:Bugfix] Fix bug for not set output / saveTensor into origin Schedule's outputs 1b95354ff 雁行 [Feature]: Support shape compute for SetDiff1D, and null input for Prod. 83966d043 xiaying [Test:Feature] Add test for static module 42d1be933 xiaying [Converter:Bugfix] Fix bug for mnn convert and static model add more outputs for origin model 9067531c3 xiaying [Converter:Refractor] formatLicence 99558bed9 xiaying [Converter:Bugfix] Count the op for unuseful and controlflow 4f6da0fa7 allen.lk [Feature:GRUMultiOutput] fix multi output dimension type c6b219bce xiaying [Converter:Feature] Turn torch converter to object dd4e68a37 xiaying [Converter:Feature] Support dump supported ops 80b6a60a3 xiaying [Converter:Info] If has output name, print output name instead of computed 015278fc3 xiaying [MNN:Refractor] Revert IfModule's debug info 23ac967c4 xiaying Don't transform for multi-input convolution/deconvolution b02b0d4de xiaying Fix bug for multi-input for conv1d 254d8b1d4 xiaying Fix bug for Conv1dSqueezeMove for multi input convolution 1d d47d0b9ca xiaying Fix bug for CPURaster's fuse nc4hw4 357c5bd33 xiaying Fix ConvBiasAdd for conv's inputs op > 1 55b1f0c9c xiaying [Converter:Bugfix] Don't transform for multi-input convolution/deconvolution 1902a30f5 xiaying [Converter:Bugfix] Fix bug for Conv1dSqueezeMove for multi input convolution 1d c23fe617b xiaying [MNN:Bugfix] Fix bug for multi-input for conv1d 8ff018426 xiaying [MNN:Bugfix] Fix bug for CPURaster's fuse nc4hw4 d4e8cd602 xiaying [Converter:Bugfix] Fix ConvBiasAdd for conv's inputs op > 1 846266b42 tianbu.xsw return when program and tune both nullptr fd67c76a9 xiaying [Converter:Bugfix] DepthwiseConvWeightMerge only valid for tflite e77a242c4 xiaying [Converter:Feature] Support tflite's half pixel be054c377 tianbu.xsw [OpenCL Bugfix] do not rewrite cache when binary program is produced 51e65aa35 xiaying [Converter:Feature] Support tflite for fp16 and multi-input convolution 1ccdfdeb5 tianbu.xsw redefine svm macro name 31234d372 tianbu.xsw [OpenCL SVM] add macro for only use wrapper d739e35da xiaying [MNN:Bugfix] Fix compile bug for grid op 24ab13c79 Joker feat(arm82): add GridSample op support in arm82 backend, AVX(by xiaying) 7b142978e xiaying [AVX512:Speed] Optimize for e <= 8 5f6febe7b tianbu.xsw code refactor 998d91b57 xiaying [Express:Speed] Merge submodule for speed 22c89146f tianhang.yth fix alpha div by zero bug and arm server compile bug 8f829a170 tianbu.xsw [OpenCL Pad] unify conv/deconv pad computing 4a28f603e xiaying [Express:Speed] Shared Const for All Submodule c74cf28f3 xiaying [MNN:Refractor] Seperate Const init and schedule 2a1eebb7a xiaying [Tools:Bugfix] Fix bug for modelTest.py count size 72f04008c xiaying [MNN:Refractor] Delete unuseful const op 1e735d03c xiaying [Converter:Bugfix] Fix bug for static module gen 4dfadbc6e xiaying [MNN:Refractor] Rewrite const init mode 1fcf0417a xiaying [MNN:Bugfix] Fix bug for deconvolutin multi-input for multi-batch 41d429cfd xiaying [Train:Bugfix] Revert convert NCHW for mnistTrain f947a5f01 xiaying [Test:Feature] Add testTrain dad59b6f6 tianbu.xsw move realize code from Backend.hpp to Tensor.cpp cf4473ad1 xiaying [Train:Bugfix] Support pad for GeometryPoolGrad 91ab13734 xiaying [MNN:Bugfix] Fix compile bug for avx512 742e80f47 xiaying [MNN:Refractor] Opt the logic for checknan judge 12543b841 xiaying [ARM82:Bugfix] Fix compile bug for ios 3a2b0a49f xiaying [ARM82:Speed] Opt Pack / Unpack for armv8 c0f1995cd xiaying [ARM82:Speed] Opt MNNPackC8FP16 and MNNUnpackC8FP16 by asm e0fc77dcf xiaying [MNN:Speed] Fix bug for DeconvolutionWithStride for C4HW4, open it 584bec578 xiaying [MNN:Bugfix] Fix bug for format set error for onnx d5bd4148d xiaying [MNN:Bugfix] Fix bug for format set error for onnx b00265841 xiaying [MNN:Bugfix] Fix bug for SparseConvolutionTiledExecutor bb09188ac xiaying [Test:Bugfix] Fix bug for run into sparse auto 426d1babd xiaying [MNN:Refractor] Small bugfix for Group convolution and pack 7d0ea1c46 tianbu.xsw [testModel Feature] support testModel.out input resize 4169c54ce xiaying [MNN:Bugfix] Fix bug for checkNAN for origin 412a82222 xiaying [Test:Bugfix] Fix bug for CheckNAN's error of matmul 319b1d425 xiaying [MNN:Bugfix] Fix bug for multi-batch for ConvInt8 050b728a6 xiaying [Test:Bugfix] Use NCHW for ConvInt8Test 7db3423a1 xiaying [OpenCL:Bugfix] Fix bug for opencl::image,opencl::buffer for C4HW4 adcec6a7f xiaying [Vulkan:Bugfix] Fix bug for invalid tensor size limit d2a7cf4e9 xiaying [Vulkan:Bugfix] Fix bug for onCopyBuffer of nc4hw4 557bebdd3 xiaying [MNN:Bugfix] Fix bug for BF16-ARM32 bbe186649 tianbu.xsw [Update AUTO mode]: fix MNN_FORWARD_AUTO choose priority 6deb23439 xiaying [MNN:Bugfix] Fix bug for GeometryBinary don't care about NC4HW4 same size b137590e4 xiaying [MNN:Bugfix] Fix bug for GeometryBinary don't care about NC4HW4 same size 7003558ea xiaying [Converter:Bugfix] Fix bug for onnx pad for serveral case b5f8cae5a xiaying [Converter:Bugfix] Fix bug for onnx pad for serveral case 29b09e125 xiaying [MNN:Bugfix] Fix bug for arm64-bf16 42ce00770 xiaying [MNN:Bugfix] Fix bug for ARM64 - float a2d89fc18 雁行 [Converter:Feature] Support Binary Unary for Torch. 7f1c0deb1 xiaying [MNN:Bugfix] Fix bug for Raster for Int8 8335a6f18 tianbu.xsw [OpenCL Shared Memory] modify data_format method b359e031b xiaying [ARM82:Bugfix] Fix bug for arm82 and speed up pack / unpack c8 24bf3fc88 雁行 [Convert:Feature] Support LayerNormFuse without gamma beta. 3e629624b xiaying [MNN:Bugfix] Fix bug for float - armv7a 2b7908ec7 tianbu.xsw modify workItemSize 3cee0d413 xiaying [MNN:Bugfix] test wrong clear 9cbbfb998 xiaying [MNN:Bugfix] fix compile bug for c++ < 14 2d7a44484 xiaying [MNN:Bugfix] fix compile bug for c++ < 14 eb7d0cb53 xiaying [Test:Bugfix] Don't test for NC4HW4 directly 7b40ca8d1 xiaying [MNN:Bugfix] Fix bug for ConvolutionGroup 2694d8a91 xiaying [MNN:Bugfix] Fix bug for CPUGridSample f89af60f6 xiaying [MNN:Bugfix] Fix compile bug for arm a151abcdd xiaying [MNN:Bugfix] Fix bug for convert for int8 / int16 b254dbe61 雁行 [MNN:Bugfix] Bugfix for Conv onClone. d08150631 xiaying [MNN:Bugfix] Fix bug for fast rcnn e5568a0df xiaying [MNN:Bugfix] Fix bug for CPURaster treat NC4HW4 fast blit 128318933 雁行 [Raster:Bugfix] bugfix for Raster merge onResize. 03caacbea xiaying [MNN:Bugfix] fix bug for CPUDeconvolution and Convolution1x1Strassen for iw != ow e1e3c245c xiaying [MNN:Bugfix] Fix bug for ConvolutionWinograd 2524cbc6d xiaying [MNN:Bugfix] Fix bug for CPUSoftmax 44ec79b8f xiaying [MNN:Bugfix] Fix bug for CPUConvolutionDepthwise / Scale / DeconvolutionDW 21ae956ce xiaying [MNN:Bugfix] Fix bug for Multi-Batch-TiledExecutor 09a5069c7 xiaying [MNN:Speed] Add offset for src and dst 6776c6784 xiaying [MNN:Bugfix] Fix bug for trainable model cc83ae30b xiaying [MNN:Bugfix] Fix bug for trainable model
2021-07-29 11:46:59 +08:00
info.syncSize();
2019-12-27 22:16:57 +08:00
switch (op->main.AsBlob()->dataType) {
case DataType_DT_INT8:
2020-11-05 16:41:56 +08:00
ptr = (void*)op->main.AsBlob()->int8s.data();
2019-12-27 22:16:57 +08:00
break;
case DataType_DT_INT32:
2020-11-05 16:41:56 +08:00
ptr = (void*)op->main.AsBlob()->int32s.data();
2019-12-27 22:16:57 +08:00
break;
case DataType_DT_UINT8:
2020-11-05 16:41:56 +08:00
ptr = (void*)op->main.AsBlob()->uint8s.data();
2019-12-27 22:16:57 +08:00
break;
case DataType_DT_FLOAT:
2020-11-05 16:41:56 +08:00
ptr = (void*)op->main.AsBlob()->float32s.data();
2019-12-27 22:16:57 +08:00
break;
2023-09-20 20:16:25 +08:00
case DataType_DT_BFLOAT16:
ptr = (void*)op->main.AsBlob()->uint8s.data();
break;
2019-12-27 22:16:57 +08:00
default:
break;
}
[MNN:Sync] Sync internal github Commits: 8148ae75c 弗人 bugfix 14cb8ec7f 弗人 [Converter:Bugfix] bugfix for onnx depthwise convtranspose 476fbcd90 雁行 [MNN:Feature] Open AVX cast and bugfix for contentCFG. 5e26b9fd3 雁行 [Test:Feature] Add android test. 37e147b25 雁行 [MNN:Bugfix] Bugfix for floordiv. 144c185f5 tianbu.xsw hangxing fix hiai b4fd429d6 tianbu.xsw updateCacheFile bugfix -- update cache size d4ba572a8 雁行 [MNN:Bugfix] Support int8 in AVX2 and some Bugfix. 43061f07e xiaying [MNN:Bugfix] Fix bug for module mode run part of model 398cc5ab6 tianhang.yth refactor demo 736380600 xiaying [Express:Bugfix] Fix memory leak for copy branch b8dab0a27 tianhang.yth MNNFloat2Int8 sizeQuad=0 crash fix 94b95bfed ghz [BugFix]1.Better method for fast pack valid check 6a921f85e xiaying [Converter:Bugfix] Fix bug for Fuseconsttosubgraph 5f77ae889 tianhang.yth numThread bugfix a807ef879 tianhang.yth add createSession(configs, runtimeinfo) API, add pymnn demo, pymnn logcat bugfix ad05409d3 xiaying [MNN:Bugfix] Fix bug for StaticModule's sizecompute overflow, add error print for module mode 9d81b8299 xiaying [MNN:Bugfix] Fix bug for Unique op for output size = 1 03b15e9af xiaying [Test:Feature] Add MatMulBConst Test, Fix bug for single Convert c944a76ee tianhang.yth add auto backend and getSessionInfo @tianbu 91fa7267b ghz [BugFix]1.fix the error in eP check bf0041f77 ghz [BugFix]1.Fix the logic error in eP check. 2.Fix the sp align error 693871672 雁行 [CPU:Bugfix] rm adrp instruction for clang compiler bug. 1b8f6b3d8 ghz 1.Fix the wronly use of r13 in arm32 version. 2.Fix the missing callee register save and restore process. feb7ecc4c 弗人 modify log of python offline quant 040c04811 ghz [BufFix]1.replace platform-related regs. 2.fix the same problem in arm32 version 609f37db8 弗人 add log for python quant, python convert 5511dd30a ghz [BugFix]1.Add testcases in SparseConv to check all functional code branch. 2. Fix the bug in "MNNPackC4ForMatMul_A.S" in arm64, which is caused by the missing check of eReal parameter. a93ff9280 tianhang.yth add tf.Unique op support 9729ff773 allen.lk [Bugfix] Fix one arm32 instruction syntax that clang works but gcc DOES NOT work. use index instruction instead. 297c1ad14 雁行 [Expr:Bugfix] bugfix for tensor content used by shape compute. ef8c369e3 弗人 catch exception 07c2dd670 弗人 add dependence to setup, base64 encode url, add time log 177e590c1 弗人 [Python:Feature] add aliyun log for python quant tool 40a7928cf allen.lk [Debug:Sparse] 1.Add group parameter in torchscript converter. 2. Stop split running to avoid memory corruption when check failed in TransformGroupConvolution 3. fix Op split issue in TransformGroupConvolution 3bdea84a1 allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. c3c6fbdbd allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. bc590eee4 雁行 [Converter:Bugfix] bugfix for onnx instancenormalization convert. d8918593f tianhang.yth add auto backend and getSessionInfo @tianbu 83a198ed7 杭行 update d0dd3e09b 杭行 update 99540202e xiaying [Converter:Optimize] Opt the tensor convert insert 333d8db82 allen.lk [Debug:Sparse] Fix All platform-register r9 / x18 issue on arm32 and arm64. db5994672 杭行 merge 6293de7b8 tianbu.xsw fix pymnn updateCacheFile 5c2e11cb1 tianbu.xsw do updateCache in createSession 6e7641ff4 tianbu.xsw do not limit cacheFile for a model 5287a65e4 tianbu.xsw bugfix 52ba53a91 tianbu.xsw revert pymnn api 60284d830 tianbu.xsw bugfix 6d8077490 tianbu.xsw rename updateCacheFile api params 3cb172710 tianhang.yth updateCacheFile API size default value is 0 c5b69aabf tianbu.xsw updateCacheFile python api fix 5d5da7aa5 tianbu.xsw reflector code 5707877a4 雁行 [MNN:Speed] Speedup for softmax in x86 and arm. 2a211825c tianbu.xsw reflector code for updateCacheFile 76db3a835 tianbu.xsw [Cache Feature]: Add updateCacheFile API for increment cache b06b0fd43 allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. e68bfa495 雁行 [Converter:Feature] Add UUID when model convert. a9cb935dc xiaying [MNN:Speed] Support c4nhwc for more fastblit 019f40353 xiaying [Converter:Refractor] Reduce memory used by MNNConvert(bert from 5G -> 1G) d2a6d3d05 xiaying [MNN:Bugfix] Fix bug for identity output not find 604d0801b xiaying [Converter:Bugfix] Fix bug for FuseGeLu 4bada2367 xiaying [MNN:Refractor] SegmentMean rewrite as segment 82070e708 xiaying [MNN:Bugfix] Fix bug for GeometryBinary e8ea4266e xiaying Fix bug for ShapeTensorConvert compute for dim = 1 error 1f1cf1991 xiaying [Tools:Bugfix] Fix system compability for fastTestOnnx 6f422efe2 xiaying [Tools:Bugfix] Remove color for checkDir for easy to dump 968f7ec88 xiaying [MNN:Speed] Support turn broadcast binary to loop 3e7aaf46f xiaying [MNN:Refractor] Set Convolution1x1Strassen support variable input/output ptr 1f65ab163 xiaying [MNN:Bugfix] Fix bug for mini mnn can't convert model d65953d47 xiaying [MNN:Bugfix] Fix bug for armv7a - android-14 + ARM82 8b68be45c xiaying [MNN:Feature] Add segment 8a8f264f5 xiaying [Vulkan:Bugfix] Remove unuseful print 025bb0fda xiaying [Converter:Bugfix] Fix bug for oneof don't support 43900251e tianbu.xsw enable setCacheFile python API ebfb05c74 tianbu.xsw [Metal Feature] support metallib obtain from walle transfer task 9665c0a79 弗人 add check for path in json file c66fef224 xiaying [Converter:Bugfix] Fix bug for oneof don't support 42f192852 xiaying [MNN:Bugfix] Fix bug for not set output / saveTensor into origin Schedule's outputs 1b95354ff 雁行 [Feature]: Support shape compute for SetDiff1D, and null input for Prod. 83966d043 xiaying [Test:Feature] Add test for static module 42d1be933 xiaying [Converter:Bugfix] Fix bug for mnn convert and static model add more outputs for origin model 9067531c3 xiaying [Converter:Refractor] formatLicence 99558bed9 xiaying [Converter:Bugfix] Count the op for unuseful and controlflow 4f6da0fa7 allen.lk [Feature:GRUMultiOutput] fix multi output dimension type c6b219bce xiaying [Converter:Feature] Turn torch converter to object dd4e68a37 xiaying [Converter:Feature] Support dump supported ops 80b6a60a3 xiaying [Converter:Info] If has output name, print output name instead of computed 015278fc3 xiaying [MNN:Refractor] Revert IfModule's debug info 23ac967c4 xiaying Don't transform for multi-input convolution/deconvolution b02b0d4de xiaying Fix bug for multi-input for conv1d 254d8b1d4 xiaying Fix bug for Conv1dSqueezeMove for multi input convolution 1d d47d0b9ca xiaying Fix bug for CPURaster's fuse nc4hw4 357c5bd33 xiaying Fix ConvBiasAdd for conv's inputs op > 1 55b1f0c9c xiaying [Converter:Bugfix] Don't transform for multi-input convolution/deconvolution 1902a30f5 xiaying [Converter:Bugfix] Fix bug for Conv1dSqueezeMove for multi input convolution 1d c23fe617b xiaying [MNN:Bugfix] Fix bug for multi-input for conv1d 8ff018426 xiaying [MNN:Bugfix] Fix bug for CPURaster's fuse nc4hw4 d4e8cd602 xiaying [Converter:Bugfix] Fix ConvBiasAdd for conv's inputs op > 1 846266b42 tianbu.xsw return when program and tune both nullptr fd67c76a9 xiaying [Converter:Bugfix] DepthwiseConvWeightMerge only valid for tflite e77a242c4 xiaying [Converter:Feature] Support tflite's half pixel be054c377 tianbu.xsw [OpenCL Bugfix] do not rewrite cache when binary program is produced 51e65aa35 xiaying [Converter:Feature] Support tflite for fp16 and multi-input convolution 1ccdfdeb5 tianbu.xsw redefine svm macro name 31234d372 tianbu.xsw [OpenCL SVM] add macro for only use wrapper d739e35da xiaying [MNN:Bugfix] Fix compile bug for grid op 24ab13c79 Joker feat(arm82): add GridSample op support in arm82 backend, AVX(by xiaying) 7b142978e xiaying [AVX512:Speed] Optimize for e <= 8 5f6febe7b tianbu.xsw code refactor 998d91b57 xiaying [Express:Speed] Merge submodule for speed 22c89146f tianhang.yth fix alpha div by zero bug and arm server compile bug 8f829a170 tianbu.xsw [OpenCL Pad] unify conv/deconv pad computing 4a28f603e xiaying [Express:Speed] Shared Const for All Submodule c74cf28f3 xiaying [MNN:Refractor] Seperate Const init and schedule 2a1eebb7a xiaying [Tools:Bugfix] Fix bug for modelTest.py count size 72f04008c xiaying [MNN:Refractor] Delete unuseful const op 1e735d03c xiaying [Converter:Bugfix] Fix bug for static module gen 4dfadbc6e xiaying [MNN:Refractor] Rewrite const init mode 1fcf0417a xiaying [MNN:Bugfix] Fix bug for deconvolutin multi-input for multi-batch 41d429cfd xiaying [Train:Bugfix] Revert convert NCHW for mnistTrain f947a5f01 xiaying [Test:Feature] Add testTrain dad59b6f6 tianbu.xsw move realize code from Backend.hpp to Tensor.cpp cf4473ad1 xiaying [Train:Bugfix] Support pad for GeometryPoolGrad 91ab13734 xiaying [MNN:Bugfix] Fix compile bug for avx512 742e80f47 xiaying [MNN:Refractor] Opt the logic for checknan judge 12543b841 xiaying [ARM82:Bugfix] Fix compile bug for ios 3a2b0a49f xiaying [ARM82:Speed] Opt Pack / Unpack for armv8 c0f1995cd xiaying [ARM82:Speed] Opt MNNPackC8FP16 and MNNUnpackC8FP16 by asm e0fc77dcf xiaying [MNN:Speed] Fix bug for DeconvolutionWithStride for C4HW4, open it 584bec578 xiaying [MNN:Bugfix] Fix bug for format set error for onnx d5bd4148d xiaying [MNN:Bugfix] Fix bug for format set error for onnx b00265841 xiaying [MNN:Bugfix] Fix bug for SparseConvolutionTiledExecutor bb09188ac xiaying [Test:Bugfix] Fix bug for run into sparse auto 426d1babd xiaying [MNN:Refractor] Small bugfix for Group convolution and pack 7d0ea1c46 tianbu.xsw [testModel Feature] support testModel.out input resize 4169c54ce xiaying [MNN:Bugfix] Fix bug for checkNAN for origin 412a82222 xiaying [Test:Bugfix] Fix bug for CheckNAN's error of matmul 319b1d425 xiaying [MNN:Bugfix] Fix bug for multi-batch for ConvInt8 050b728a6 xiaying [Test:Bugfix] Use NCHW for ConvInt8Test 7db3423a1 xiaying [OpenCL:Bugfix] Fix bug for opencl::image,opencl::buffer for C4HW4 adcec6a7f xiaying [Vulkan:Bugfix] Fix bug for invalid tensor size limit d2a7cf4e9 xiaying [Vulkan:Bugfix] Fix bug for onCopyBuffer of nc4hw4 557bebdd3 xiaying [MNN:Bugfix] Fix bug for BF16-ARM32 bbe186649 tianbu.xsw [Update AUTO mode]: fix MNN_FORWARD_AUTO choose priority 6deb23439 xiaying [MNN:Bugfix] Fix bug for GeometryBinary don't care about NC4HW4 same size b137590e4 xiaying [MNN:Bugfix] Fix bug for GeometryBinary don't care about NC4HW4 same size 7003558ea xiaying [Converter:Bugfix] Fix bug for onnx pad for serveral case b5f8cae5a xiaying [Converter:Bugfix] Fix bug for onnx pad for serveral case 29b09e125 xiaying [MNN:Bugfix] Fix bug for arm64-bf16 42ce00770 xiaying [MNN:Bugfix] Fix bug for ARM64 - float a2d89fc18 雁行 [Converter:Feature] Support Binary Unary for Torch. 7f1c0deb1 xiaying [MNN:Bugfix] Fix bug for Raster for Int8 8335a6f18 tianbu.xsw [OpenCL Shared Memory] modify data_format method b359e031b xiaying [ARM82:Bugfix] Fix bug for arm82 and speed up pack / unpack c8 24bf3fc88 雁行 [Convert:Feature] Support LayerNormFuse without gamma beta. 3e629624b xiaying [MNN:Bugfix] Fix bug for float - armv7a 2b7908ec7 tianbu.xsw modify workItemSize 3cee0d413 xiaying [MNN:Bugfix] test wrong clear 9cbbfb998 xiaying [MNN:Bugfix] fix compile bug for c++ < 14 2d7a44484 xiaying [MNN:Bugfix] fix compile bug for c++ < 14 eb7d0cb53 xiaying [Test:Bugfix] Don't test for NC4HW4 directly 7b40ca8d1 xiaying [MNN:Bugfix] Fix bug for ConvolutionGroup 2694d8a91 xiaying [MNN:Bugfix] Fix bug for CPUGridSample f89af60f6 xiaying [MNN:Bugfix] Fix compile bug for arm a151abcdd xiaying [MNN:Bugfix] Fix bug for convert for int8 / int16 b254dbe61 雁行 [MNN:Bugfix] Bugfix for Conv onClone. d08150631 xiaying [MNN:Bugfix] Fix bug for fast rcnn e5568a0df xiaying [MNN:Bugfix] Fix bug for CPURaster treat NC4HW4 fast blit 128318933 雁行 [Raster:Bugfix] bugfix for Raster merge onResize. 03caacbea xiaying [MNN:Bugfix] fix bug for CPUDeconvolution and Convolution1x1Strassen for iw != ow e1e3c245c xiaying [MNN:Bugfix] Fix bug for ConvolutionWinograd 2524cbc6d xiaying [MNN:Bugfix] Fix bug for CPUSoftmax 44ec79b8f xiaying [MNN:Bugfix] Fix bug for CPUConvolutionDepthwise / Scale / DeconvolutionDW 21ae956ce xiaying [MNN:Bugfix] Fix bug for Multi-Batch-TiledExecutor 09a5069c7 xiaying [MNN:Speed] Add offset for src and dst 6776c6784 xiaying [MNN:Bugfix] Fix bug for trainable model cc83ae30b xiaying [MNN:Bugfix] Fix bug for trainable model
2021-07-29 11:46:59 +08:00
Expr::MemoryType memtype = Expr::MemoryType::COPY;
if (op->main.AsBlob()->dataType == DataType_DT_HALF) {
auto src = (half_float::half*)op->main.AsBlob()->uint8s.data();
ptr = MNNMemoryAllocAlign(info.size * sizeof(float), MNN_MEMORY_ALIGN_DEFAULT);
if (nullptr == src || nullptr == ptr) {
EXPRP empty;
return empty;
}
auto outputPtr = (float*)ptr;
for (int i=0; i<info.size; ++i) {
outputPtr[i] = src[i];
}
memtype = Expr::MemoryType::MOVE;
}
2020-11-05 16:41:56 +08:00
//MNN_ASSERT(nullptr != ptr);
[MNN:Sync] Sync internal github Commits: 8148ae75c 弗人 bugfix 14cb8ec7f 弗人 [Converter:Bugfix] bugfix for onnx depthwise convtranspose 476fbcd90 雁行 [MNN:Feature] Open AVX cast and bugfix for contentCFG. 5e26b9fd3 雁行 [Test:Feature] Add android test. 37e147b25 雁行 [MNN:Bugfix] Bugfix for floordiv. 144c185f5 tianbu.xsw hangxing fix hiai b4fd429d6 tianbu.xsw updateCacheFile bugfix -- update cache size d4ba572a8 雁行 [MNN:Bugfix] Support int8 in AVX2 and some Bugfix. 43061f07e xiaying [MNN:Bugfix] Fix bug for module mode run part of model 398cc5ab6 tianhang.yth refactor demo 736380600 xiaying [Express:Bugfix] Fix memory leak for copy branch b8dab0a27 tianhang.yth MNNFloat2Int8 sizeQuad=0 crash fix 94b95bfed ghz [BugFix]1.Better method for fast pack valid check 6a921f85e xiaying [Converter:Bugfix] Fix bug for Fuseconsttosubgraph 5f77ae889 tianhang.yth numThread bugfix a807ef879 tianhang.yth add createSession(configs, runtimeinfo) API, add pymnn demo, pymnn logcat bugfix ad05409d3 xiaying [MNN:Bugfix] Fix bug for StaticModule's sizecompute overflow, add error print for module mode 9d81b8299 xiaying [MNN:Bugfix] Fix bug for Unique op for output size = 1 03b15e9af xiaying [Test:Feature] Add MatMulBConst Test, Fix bug for single Convert c944a76ee tianhang.yth add auto backend and getSessionInfo @tianbu 91fa7267b ghz [BugFix]1.fix the error in eP check bf0041f77 ghz [BugFix]1.Fix the logic error in eP check. 2.Fix the sp align error 693871672 雁行 [CPU:Bugfix] rm adrp instruction for clang compiler bug. 1b8f6b3d8 ghz 1.Fix the wronly use of r13 in arm32 version. 2.Fix the missing callee register save and restore process. feb7ecc4c 弗人 modify log of python offline quant 040c04811 ghz [BufFix]1.replace platform-related regs. 2.fix the same problem in arm32 version 609f37db8 弗人 add log for python quant, python convert 5511dd30a ghz [BugFix]1.Add testcases in SparseConv to check all functional code branch. 2. Fix the bug in "MNNPackC4ForMatMul_A.S" in arm64, which is caused by the missing check of eReal parameter. a93ff9280 tianhang.yth add tf.Unique op support 9729ff773 allen.lk [Bugfix] Fix one arm32 instruction syntax that clang works but gcc DOES NOT work. use index instruction instead. 297c1ad14 雁行 [Expr:Bugfix] bugfix for tensor content used by shape compute. ef8c369e3 弗人 catch exception 07c2dd670 弗人 add dependence to setup, base64 encode url, add time log 177e590c1 弗人 [Python:Feature] add aliyun log for python quant tool 40a7928cf allen.lk [Debug:Sparse] 1.Add group parameter in torchscript converter. 2. Stop split running to avoid memory corruption when check failed in TransformGroupConvolution 3. fix Op split issue in TransformGroupConvolution 3bdea84a1 allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. c3c6fbdbd allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. bc590eee4 雁行 [Converter:Bugfix] bugfix for onnx instancenormalization convert. d8918593f tianhang.yth add auto backend and getSessionInfo @tianbu 83a198ed7 杭行 update d0dd3e09b 杭行 update 99540202e xiaying [Converter:Optimize] Opt the tensor convert insert 333d8db82 allen.lk [Debug:Sparse] Fix All platform-register r9 / x18 issue on arm32 and arm64. db5994672 杭行 merge 6293de7b8 tianbu.xsw fix pymnn updateCacheFile 5c2e11cb1 tianbu.xsw do updateCache in createSession 6e7641ff4 tianbu.xsw do not limit cacheFile for a model 5287a65e4 tianbu.xsw bugfix 52ba53a91 tianbu.xsw revert pymnn api 60284d830 tianbu.xsw bugfix 6d8077490 tianbu.xsw rename updateCacheFile api params 3cb172710 tianhang.yth updateCacheFile API size default value is 0 c5b69aabf tianbu.xsw updateCacheFile python api fix 5d5da7aa5 tianbu.xsw reflector code 5707877a4 雁行 [MNN:Speed] Speedup for softmax in x86 and arm. 2a211825c tianbu.xsw reflector code for updateCacheFile 76db3a835 tianbu.xsw [Cache Feature]: Add updateCacheFile API for increment cache b06b0fd43 allen.lk [Debug:Sparse] Fix and warning one kind of segmentfault cause by memory corruption when resize ConvolutionWinograd. Avoid to use some registers as arm restriction. e68bfa495 雁行 [Converter:Feature] Add UUID when model convert. a9cb935dc xiaying [MNN:Speed] Support c4nhwc for more fastblit 019f40353 xiaying [Converter:Refractor] Reduce memory used by MNNConvert(bert from 5G -> 1G) d2a6d3d05 xiaying [MNN:Bugfix] Fix bug for identity output not find 604d0801b xiaying [Converter:Bugfix] Fix bug for FuseGeLu 4bada2367 xiaying [MNN:Refractor] SegmentMean rewrite as segment 82070e708 xiaying [MNN:Bugfix] Fix bug for GeometryBinary e8ea4266e xiaying Fix bug for ShapeTensorConvert compute for dim = 1 error 1f1cf1991 xiaying [Tools:Bugfix] Fix system compability for fastTestOnnx 6f422efe2 xiaying [Tools:Bugfix] Remove color for checkDir for easy to dump 968f7ec88 xiaying [MNN:Speed] Support turn broadcast binary to loop 3e7aaf46f xiaying [MNN:Refractor] Set Convolution1x1Strassen support variable input/output ptr 1f65ab163 xiaying [MNN:Bugfix] Fix bug for mini mnn can't convert model d65953d47 xiaying [MNN:Bugfix] Fix bug for armv7a - android-14 + ARM82 8b68be45c xiaying [MNN:Feature] Add segment 8a8f264f5 xiaying [Vulkan:Bugfix] Remove unuseful print 025bb0fda xiaying [Converter:Bugfix] Fix bug for oneof don't support 43900251e tianbu.xsw enable setCacheFile python API ebfb05c74 tianbu.xsw [Metal Feature] support metallib obtain from walle transfer task 9665c0a79 弗人 add check for path in json file c66fef224 xiaying [Converter:Bugfix] Fix bug for oneof don't support 42f192852 xiaying [MNN:Bugfix] Fix bug for not set output / saveTensor into origin Schedule's outputs 1b95354ff 雁行 [Feature]: Support shape compute for SetDiff1D, and null input for Prod. 83966d043 xiaying [Test:Feature] Add test for static module 42d1be933 xiaying [Converter:Bugfix] Fix bug for mnn convert and static model add more outputs for origin model 9067531c3 xiaying [Converter:Refractor] formatLicence 99558bed9 xiaying [Converter:Bugfix] Count the op for unuseful and controlflow 4f6da0fa7 allen.lk [Feature:GRUMultiOutput] fix multi output dimension type c6b219bce xiaying [Converter:Feature] Turn torch converter to object dd4e68a37 xiaying [Converter:Feature] Support dump supported ops 80b6a60a3 xiaying [Converter:Info] If has output name, print output name instead of computed 015278fc3 xiaying [MNN:Refractor] Revert IfModule's debug info 23ac967c4 xiaying Don't transform for multi-input convolution/deconvolution b02b0d4de xiaying Fix bug for multi-input for conv1d 254d8b1d4 xiaying Fix bug for Conv1dSqueezeMove for multi input convolution 1d d47d0b9ca xiaying Fix bug for CPURaster's fuse nc4hw4 357c5bd33 xiaying Fix ConvBiasAdd for conv's inputs op > 1 55b1f0c9c xiaying [Converter:Bugfix] Don't transform for multi-input convolution/deconvolution 1902a30f5 xiaying [Converter:Bugfix] Fix bug for Conv1dSqueezeMove for multi input convolution 1d c23fe617b xiaying [MNN:Bugfix] Fix bug for multi-input for conv1d 8ff018426 xiaying [MNN:Bugfix] Fix bug for CPURaster's fuse nc4hw4 d4e8cd602 xiaying [Converter:Bugfix] Fix ConvBiasAdd for conv's inputs op > 1 846266b42 tianbu.xsw return when program and tune both nullptr fd67c76a9 xiaying [Converter:Bugfix] DepthwiseConvWeightMerge only valid for tflite e77a242c4 xiaying [Converter:Feature] Support tflite's half pixel be054c377 tianbu.xsw [OpenCL Bugfix] do not rewrite cache when binary program is produced 51e65aa35 xiaying [Converter:Feature] Support tflite for fp16 and multi-input convolution 1ccdfdeb5 tianbu.xsw redefine svm macro name 31234d372 tianbu.xsw [OpenCL SVM] add macro for only use wrapper d739e35da xiaying [MNN:Bugfix] Fix compile bug for grid op 24ab13c79 Joker feat(arm82): add GridSample op support in arm82 backend, AVX(by xiaying) 7b142978e xiaying [AVX512:Speed] Optimize for e <= 8 5f6febe7b tianbu.xsw code refactor 998d91b57 xiaying [Express:Speed] Merge submodule for speed 22c89146f tianhang.yth fix alpha div by zero bug and arm server compile bug 8f829a170 tianbu.xsw [OpenCL Pad] unify conv/deconv pad computing 4a28f603e xiaying [Express:Speed] Shared Const for All Submodule c74cf28f3 xiaying [MNN:Refractor] Seperate Const init and schedule 2a1eebb7a xiaying [Tools:Bugfix] Fix bug for modelTest.py count size 72f04008c xiaying [MNN:Refractor] Delete unuseful const op 1e735d03c xiaying [Converter:Bugfix] Fix bug for static module gen 4dfadbc6e xiaying [MNN:Refractor] Rewrite const init mode 1fcf0417a xiaying [MNN:Bugfix] Fix bug for deconvolutin multi-input for multi-batch 41d429cfd xiaying [Train:Bugfix] Revert convert NCHW for mnistTrain f947a5f01 xiaying [Test:Feature] Add testTrain dad59b6f6 tianbu.xsw move realize code from Backend.hpp to Tensor.cpp cf4473ad1 xiaying [Train:Bugfix] Support pad for GeometryPoolGrad 91ab13734 xiaying [MNN:Bugfix] Fix compile bug for avx512 742e80f47 xiaying [MNN:Refractor] Opt the logic for checknan judge 12543b841 xiaying [ARM82:Bugfix] Fix compile bug for ios 3a2b0a49f xiaying [ARM82:Speed] Opt Pack / Unpack for armv8 c0f1995cd xiaying [ARM82:Speed] Opt MNNPackC8FP16 and MNNUnpackC8FP16 by asm e0fc77dcf xiaying [MNN:Speed] Fix bug for DeconvolutionWithStride for C4HW4, open it 584bec578 xiaying [MNN:Bugfix] Fix bug for format set error for onnx d5bd4148d xiaying [MNN:Bugfix] Fix bug for format set error for onnx b00265841 xiaying [MNN:Bugfix] Fix bug for SparseConvolutionTiledExecutor bb09188ac xiaying [Test:Bugfix] Fix bug for run into sparse auto 426d1babd xiaying [MNN:Refractor] Small bugfix for Group convolution and pack 7d0ea1c46 tianbu.xsw [testModel Feature] support testModel.out input resize 4169c54ce xiaying [MNN:Bugfix] Fix bug for checkNAN for origin 412a82222 xiaying [Test:Bugfix] Fix bug for CheckNAN's error of matmul 319b1d425 xiaying [MNN:Bugfix] Fix bug for multi-batch for ConvInt8 050b728a6 xiaying [Test:Bugfix] Use NCHW for ConvInt8Test 7db3423a1 xiaying [OpenCL:Bugfix] Fix bug for opencl::image,opencl::buffer for C4HW4 adcec6a7f xiaying [Vulkan:Bugfix] Fix bug for invalid tensor size limit d2a7cf4e9 xiaying [Vulkan:Bugfix] Fix bug for onCopyBuffer of nc4hw4 557bebdd3 xiaying [MNN:Bugfix] Fix bug for BF16-ARM32 bbe186649 tianbu.xsw [Update AUTO mode]: fix MNN_FORWARD_AUTO choose priority 6deb23439 xiaying [MNN:Bugfix] Fix bug for GeometryBinary don't care about NC4HW4 same size b137590e4 xiaying [MNN:Bugfix] Fix bug for GeometryBinary don't care about NC4HW4 same size 7003558ea xiaying [Converter:Bugfix] Fix bug for onnx pad for serveral case b5f8cae5a xiaying [Converter:Bugfix] Fix bug for onnx pad for serveral case 29b09e125 xiaying [MNN:Bugfix] Fix bug for arm64-bf16 42ce00770 xiaying [MNN:Bugfix] Fix bug for ARM64 - float a2d89fc18 雁行 [Converter:Feature] Support Binary Unary for Torch. 7f1c0deb1 xiaying [MNN:Bugfix] Fix bug for Raster for Int8 8335a6f18 tianbu.xsw [OpenCL Shared Memory] modify data_format method b359e031b xiaying [ARM82:Bugfix] Fix bug for arm82 and speed up pack / unpack c8 24bf3fc88 雁行 [Convert:Feature] Support LayerNormFuse without gamma beta. 3e629624b xiaying [MNN:Bugfix] Fix bug for float - armv7a 2b7908ec7 tianbu.xsw modify workItemSize 3cee0d413 xiaying [MNN:Bugfix] test wrong clear 9cbbfb998 xiaying [MNN:Bugfix] fix compile bug for c++ < 14 2d7a44484 xiaying [MNN:Bugfix] fix compile bug for c++ < 14 eb7d0cb53 xiaying [Test:Bugfix] Don't test for NC4HW4 directly 7b40ca8d1 xiaying [MNN:Bugfix] Fix bug for ConvolutionGroup 2694d8a91 xiaying [MNN:Bugfix] Fix bug for CPUGridSample f89af60f6 xiaying [MNN:Bugfix] Fix compile bug for arm a151abcdd xiaying [MNN:Bugfix] Fix bug for convert for int8 / int16 b254dbe61 雁行 [MNN:Bugfix] Bugfix for Conv onClone. d08150631 xiaying [MNN:Bugfix] Fix bug for fast rcnn e5568a0df xiaying [MNN:Bugfix] Fix bug for CPURaster treat NC4HW4 fast blit 128318933 雁行 [Raster:Bugfix] bugfix for Raster merge onResize. 03caacbea xiaying [MNN:Bugfix] fix bug for CPUDeconvolution and Convolution1x1Strassen for iw != ow e1e3c245c xiaying [MNN:Bugfix] Fix bug for ConvolutionWinograd 2524cbc6d xiaying [MNN:Bugfix] Fix bug for CPUSoftmax 44ec79b8f xiaying [MNN:Bugfix] Fix bug for CPUConvolutionDepthwise / Scale / DeconvolutionDW 21ae956ce xiaying [MNN:Bugfix] Fix bug for Multi-Batch-TiledExecutor 09a5069c7 xiaying [MNN:Speed] Add offset for src and dst 6776c6784 xiaying [MNN:Bugfix] Fix bug for trainable model cc83ae30b xiaying [MNN:Bugfix] Fix bug for trainable model
2021-07-29 11:46:59 +08:00
auto expr = create(std::move(info), ptr, VARP::CONSTANT, memtype);
2020-11-05 16:41:56 +08:00
if (OpType_TrainableParam == op->type && nullptr != ptr) {
2019-12-27 22:16:57 +08:00
expr->mType = VARP::TRAINABLE;
}
return expr;
}
flatbuffers::FlatBufferBuilder builder;
auto offset = Op::Pack(builder, op);
builder.Finish(offset);
2021-02-07 10:45:07 +08:00
std::shared_ptr<BufferStorage> extra(new BufferStorage);
extra->storage = builder.ReleaseRaw(extra->allocated_size, extra->offset);
2021-02-07 10:45:07 +08:00
auto resExpr = Expr::create(extra, std::move(inputs), outputSize);
2020-02-26 09:57:17 +08:00
resExpr->setName(op->name);
return resExpr;
2019-12-27 22:16:57 +08:00
}
void Expr::setName(const std::string& name) {
mName = name;
}
bool Expr::requireInfo() {
if (!mInside->mInfoDirty) {
2019-12-27 22:16:57 +08:00
return true;
}
if (!mValid) {
return false;
}
2020-02-26 09:57:17 +08:00
if (nullptr == mOp) {
2020-11-05 16:41:56 +08:00
return !HasUnknownDim(mInside->mOutputInfos[0].dim);
2020-02-26 09:57:17 +08:00
}
2022-12-30 15:18:58 +08:00
if (!mCanDecompose) {
return true;
}
2019-12-27 22:16:57 +08:00
bool ready = true;
for (int i = 0; i < mInputs.size(); ++i) {
if (nullptr == mInputs[i] || nullptr == mInputs[i]->mFrom) {
// The Variable is set nullptr by api
return false;
}
2020-11-05 16:41:56 +08:00
auto inputInfo = mInputs[i]->getInfo();
if (nullptr == inputInfo) {
2019-12-27 22:16:57 +08:00
#ifdef MNN_EXPRESS_ERROR_REPORT
MNN_ERROR("%s, %d input not ready\n", mName.c_str(), i);
#endif
mValid = false;
return false;
}
}
for (int i = 0; i < mInputs.size(); ++i) {
auto& v = mInputs[i];
2022-02-18 11:30:27 +08:00
if (v->getInfo()->size == 0) {
// zero shape
continue;
}
2019-12-27 22:16:57 +08:00
if (mInside->mReq.shapeNeedContent[i]) {
2020-11-05 16:41:56 +08:00
// For shape need content, the content must not be nullptr
auto ptr = v->readInternal(true);
if (nullptr == ptr) {
ready = false;
break;
}
2019-12-27 22:16:57 +08:00
}
}
if (!ready) {
return false;
}
//MNN_PRINT("Info %s, %p Start\n", mName.c_str(), this);
2020-11-05 16:41:56 +08:00
auto res = ExecutorScope::Current()->computeInfo(this);
2019-12-27 22:16:57 +08:00
//MNN_PRINT("Info Compute %s\n", mName.c_str());
if (NO_ERROR == res) {
mInside->mInfoDirty = false;
2019-12-27 22:16:57 +08:00
} else {
mValid = false;
}
return NO_ERROR == res;
}
size_t Variable::linkNumber() const {
return mFrom->outputs().size();
}
const std::vector<WeakEXPRP>& Variable::toExprs() const {
return mFrom->outputs();
}
VARP Variable::create(EXPRP expr, int index) {
VARP res(new Variable(expr, index));
2020-11-05 16:41:56 +08:00
#ifdef MNN_EXPR_SHAPE_EAGER
auto info = expr->requireInfo();
if (!info) {
#ifdef MNN_EXPRESS_ERROR_REPORT
MNN_ERROR("Can't compute shape\n");
#endif
}
#endif
2025-04-28 11:38:44 +08:00
if (nullptr == expr->get()) {
return res;
}
2022-12-30 15:18:58 +08:00
auto executor = ExecutorScope::Current();
if (!executor->lazyEval) {
2022-08-12 10:30:48 +08:00
res.fix(VARP::CONSTANT);
2022-12-30 15:18:58 +08:00
return res;
2022-08-12 10:30:48 +08:00
}
#ifndef MNN_REDUCE_SIZE
// CONTENT Mode, Use Geometry Computer to Decompress Expr
2022-12-30 15:18:58 +08:00
do {
2023-12-04 11:12:20 +08:00
if (!(executor->getLazyMode() & Executor::LAZY_CONTENT)) {
2022-12-30 15:18:58 +08:00
break;
}
if (expr->get() == nullptr) {
break;
}
if (!expr->mCanDecompose) {
break;
}
bool res = expr->requireInfo();
if (!res) {
break;
}
std::map<Tensor*, VARP> varMap;
std::vector<Tensor*> inputTensors(expr->mInputs.size());
std::vector<Tensor*> outputTensors(expr->outputSize());
for (int i=0; i<inputTensors.size(); ++i) {
inputTensors[i] = Utils::getTensor(expr->mInputs[i]);
varMap.insert(std::make_pair(inputTensors[i], expr->mInputs[i]));
}
for (int i=0; i<outputTensors.size(); ++i) {
outputTensors[i] = expr->mInside->mOutputTensors[i];
}
auto bn = executor->getAttr()->constantBackend;
// TODO: Support set mask
GeometryComputer::Context context(Interpreter::GeometryComputeMask::GEOMETRCOMPUTEMASK_ALL, bn);
2022-12-30 15:18:58 +08:00
auto geo = GeometryComputer::search(expr->get()->type(), Runtime::Compiler_Loop);
CommandBuffer cmd;
res = geo->onCompute(expr->get(), inputTensors, outputTensors, context, cmd);
if (!res) {
break;
}
for (int i=0; i<outputTensors.size(); ++i) {
// Avoid release from host tensor, the memory is owned by executor's cpu runtime
if (TensorUtils::getDescribe(outputTensors[i])->usage == Tensor::InsideDescribe::CONSTANT) {
TensorUtils::getDescribe(outputTensors[i])->memoryType = Tensor::InsideDescribe::MEMORY_BACKEND;
}
}
if (TensorUtils::getDescribe(outputTensors[index])->usage == Tensor::InsideDescribe::CONSTANT) {
auto constExpr = Expr::create(Tensor::clone(outputTensors[index]), true);
return Variable::create(constExpr);
}
// TODO: For multi-output expr, reduce dup compute
CommandBuffer cmdDst;
GeometryComputerUtils::makeRaster(cmd, cmdDst, context);
for (auto t : outputTensors) {
context.getRasterCacheCreateRecursive(t, cmdDst);
}
// Make New Exprs
for (int cmdIndex=0; cmdIndex < cmdDst.command.size(); ++cmdIndex) {
auto& cmd = cmdDst.command[cmdIndex];
std::vector<VARP> cmdInputs(cmd->inputs.size());
for (int i=0; i<cmd->inputs.size(); ++i) {
auto iter = varMap.find(cmd->inputs[i]);
if (iter == varMap.end()) {
// Extract Const Value
auto constExpr = Expr::create(Tensor::clone(cmd->inputs[i]), true);
VARP constVar(new Variable(constExpr, 0));
varMap.insert(std::make_pair(cmd->inputs[i], constVar));
cmdInputs[i] = constVar;
} else {
cmdInputs[i] = iter->second;
}
}
EXPRP currentExpr;
if (cmd->op->type() == OpType_Raster) {
// Rebuild raster buffer
auto cmdTensor = cmd->outputs[0];
auto cmdDes = TensorUtils::getDescribe(cmdTensor);
MNN_ASSERT(cmd->inputs.size() == cmdDes->regions.size());
std::vector<int> regions(cmdDes->regions.size() * 11);
for (int j=0; j<cmdDes->regions.size(); ++j) {
auto& srcReg = cmdDes->regions[j];
auto dstInt = regions.data() + 11 * j;
dstInt[0] = srcReg.src.offset;
::memcpy(dstInt + 1, srcReg.src.stride, 3 * sizeof(int));
dstInt[4] = srcReg.dst.offset;
::memcpy(dstInt + 5, srcReg.dst.stride, 3 * sizeof(int));
::memcpy(dstInt + 8, srcReg.size, 3 * sizeof(int));
}
auto cmdExpr = Utils::makeRaster(cmdInputs, regions, cmdTensor->shape(), cmdTensor->getType(), TensorUtils::getDescribe(cmdTensor)->dimensionFormat);
cmdExpr->mCanDecompose = false;
VARP cmdVar(new Variable(cmdExpr, 0));
varMap.insert(std::make_pair(cmdTensor, cmdVar));
currentExpr = cmdVar->mFrom;
} else {
EXPRP cmdExpr;
if (cmd->op == expr->get()) {
cmdExpr = Expr::create(expr->mStorage, std::move(cmdInputs), cmd->outputs.size());
} else {
cmdExpr = Expr::create(cmd->buffer, std::move(cmdInputs), cmd->outputs.size());
}
currentExpr = cmdExpr;
cmdExpr->mCanDecompose = false;
for (int j=0; j<cmd->outputs.size(); ++j) {
VARP cmdVar(new Variable(cmdExpr, j));
varMap.insert(std::make_pair(cmd->outputs[j], cmdVar));
}
}
for (int j=0; j<cmd->outputs.size(); ++j) {
Utils::copyTensorToInfo(currentExpr->inside()->mOutputInfos.data() + j, cmd->outputs[j]);
TensorUtils::copyShape(cmd->outputs[j], currentExpr->inside()->mOutputTensors[j], true, true);
}
}
return varMap.find(expr->inside()->mOutputTensors[index])->second;
} while (false);
#endif
2019-12-27 22:16:57 +08:00
return res;
}
void Expr::replace(EXPRP old, EXPRP from) {
if (old.get() == from.get()) {
return;
}
for (auto input : old->inputs()) {
2022-02-18 11:30:27 +08:00
if (input.get() == nullptr) {
continue;
}
2019-12-27 22:16:57 +08:00
for (int j=0; j<input->mFrom->mTo.size(); ++j) {
auto ref = input->mFrom->mTo[j].lock();
if (ref.get() == old.get()) {
input->mFrom->mTo[j].reset();
}
}
}
for (auto input : from->inputs()) {
2022-02-18 11:30:27 +08:00
if (input.get() == nullptr) {
continue;
}
2019-12-27 22:16:57 +08:00
bool hasSet = false;
for (int j=0; j<input->mFrom->mTo.size(); ++j) {
auto ref = input->mFrom->mTo[j].lock();
if (ref.get() == old.get()) {
hasSet = true;
break;
}
}
if (!hasSet) {
for (int j=0; j<input->mFrom->mTo.size(); ++j) {
auto ref = input->mFrom->mTo[j].lock();
if (nullptr == ref) {
input->mFrom->mTo[j] = WeakEXPRP(old);
hasSet = true;
break;
}
}
}
if (!hasSet) {
input->mFrom->mTo.emplace_back(WeakEXPRP(old));
}
}
2022-12-30 15:18:58 +08:00
old->mCanDecompose = from->mCanDecompose;
2019-12-27 22:16:57 +08:00
old->mOp = from->mOp;
old->mName = from->mName;
old->mOutputNames = from->mOutputNames;
2021-02-07 10:45:07 +08:00
old->mStorage = from->mStorage;
2019-12-27 22:16:57 +08:00
old->mType = from->mType;
2020-02-26 09:57:17 +08:00
old->mValid = from->mValid;
2019-12-27 22:16:57 +08:00
old->mInside = from->mInside;
old->mInputs = from->mInputs;
2020-11-05 16:41:56 +08:00
std::vector<Expr*> visited;
2019-12-27 22:16:57 +08:00
old->visitOutputs([&](EXPRP expr, int index) {
2020-11-05 16:41:56 +08:00
if (expr->visited()) {
return false;
}
2020-11-05 16:41:56 +08:00
visited.emplace_back(expr.get());
expr->setVisited(true);
expr->mInside->mCache.reset();
expr->mInside->mCacheOffset = 0;
2020-02-26 09:57:17 +08:00
expr->mValid = true;
expr->mInside->mInfoDirty = true;
2019-12-27 22:16:57 +08:00
return true;
});
2020-11-05 16:41:56 +08:00
for (auto e : visited) {
e->setVisited(false);
}
2019-12-27 22:16:57 +08:00
}
void Variable::setName(const std::string& name) {
mFrom->mOutputNames[mFromIndex] = name;
if (mFrom->name().empty()) {
mFrom->setName(name);
}
}
2024-02-29 16:21:40 +08:00
bool Variable::setDevicePtr(const void* devicePtr, int memoryType) {
if (nullptr != mFrom->get()) {
MNN_ERROR("Can't setDevicePtr to no-input op\n");
return false;
}
informDirty();
MNN_ASSERT(TensorUtils::getDescribe(mFrom->inside()->mOutputTensors[0])->quantAttr == nullptr || TensorUtils::getDescribe(mFrom->inside()->mOutputTensors[0])->type == DataType_DT_FLOAT);
mFrom->mInside->mContentDirty = false;
// Clear host address, Don't malloc hostPtr afterwards
Utils::releaseMemoryForHostTensor(mFrom->inside()->mOutputTensors[0]);
return mFrom->inside()->mOutputTensors[0]->setDevicePtr(devicePtr, memoryType);
}
bool Variable::copyToDevicePtr(void* devicePtr, int memoryType) {
if (nullptr != mFrom->get()) {
MNN_ERROR("Can't copyToDevicePtr to no-input op\n");
return false;
}
auto inside = mFrom->inside();
auto originTensor = inside->mOutputTensors[mFromIndex];
2024-04-19 11:58:21 +08:00
auto bn = TensorUtils::getDescribeOrigin(originTensor)->getBackend();
2024-02-29 16:21:40 +08:00
if(bn == nullptr) {
MNN_ERROR("Error: Varp copyToDevicePtr can't find backend\n");
return false;
}
MNN::Tensor tempTensor(originTensor->dimensions(), originTensor->getDimensionType());
2024-03-13 14:55:54 +08:00
tempTensor.setDevicePtr(devicePtr, memoryType);
2024-02-29 16:21:40 +08:00
2024-04-19 11:58:21 +08:00
TensorUtils::getDescribeOrigin(originTensor)->getBackend()->onCopyBuffer(originTensor, &tempTensor);
2024-02-29 16:21:40 +08:00
// Sync the result
tempTensor.wait(Tensor::MAP_TENSOR_READ, true);
return true;
}
2019-12-27 22:16:57 +08:00
const std::string& Variable::name() const {
return mFrom->outputName(mFromIndex);
}
2022-12-30 15:18:58 +08:00
const Tensor* Variable::getTensor() const {
auto inside = mFrom->inside();
auto inputTensor = inside->mOutputTensors[mFromIndex];
if (nullptr != inside->mCache) {
inputTensor = inside->mCache->getSession()->getTensor(inside->mCacheOffset + mFromIndex);
}
return inputTensor;
}
2019-12-27 22:16:57 +08:00
bool Variable::input(VARP src) {
2023-06-16 09:42:45 +08:00
if (nullptr != mFrom->get()) {
2019-12-27 22:16:57 +08:00
MNN_ERROR("Can't input to no-input op\n");
return false;
}
if (nullptr == src) {
/*Close the Input*/
mFrom->visitOutputs([](EXPRP expr, int index) {
auto recurse = expr->mValid; expr->mValid = false;
return recurse;
});
mFrom->mValid = false;
return false;
}
auto info = src->getInfo();
std::shared_ptr<Variable::Info> tempInfo;
2020-07-04 01:21:30 +08:00
if (nullptr == info) {
2019-12-27 22:16:57 +08:00
tempInfo.reset(new Variable::Info);
2020-07-04 01:21:30 +08:00
tempInfo->size = 0;
2019-12-27 22:16:57 +08:00
tempInfo->type = halide_type_of<float>();
info = tempInfo.get();
}
auto dstInfo = getInfo();
2020-11-05 16:41:56 +08:00
bool needChange = nullptr == dstInfo || info->order != dstInfo->order || info->dim.size() != dstInfo->dim.size() || info->type != dstInfo->type;
2019-12-27 22:16:57 +08:00
if (!needChange) {
for (int i=0; i<info->dim.size(); ++i) {
if (dstInfo->dim[i] != info->dim[i]) {
needChange = true;
break;
}
}
}
2020-07-04 01:21:30 +08:00
if (!mFrom->mInside->mCache) {
2020-11-05 16:41:56 +08:00
ExecutorScope::Current()->makeCache({mFrom}, false);
2020-07-04 01:21:30 +08:00
}
2019-12-27 22:16:57 +08:00
if (needChange) {
mFrom->mInside->mOutputInfos[0] = *info;
2020-11-05 16:41:56 +08:00
Utils::releaseMemoryForHostTensor(mFrom->inside()->mOutputTensors[0]);
Utils::copyInfoToTensor(mFrom->inside()->mOutputTensors[0], mFrom->inside()->mOutputInfos.data());
Utils::allocMemoryForHostTensor(mFrom->inside()->mOutputTensors[0]);
2019-12-27 22:16:57 +08:00
}
2020-07-04 01:21:30 +08:00
if (info->size) {
2019-12-27 22:16:57 +08:00
auto dstPtr = writeInternal(false);
auto srcPtr = src->readMap<void>();
if (nullptr == dstPtr || nullptr == srcPtr) {
2020-11-05 16:41:56 +08:00
//MNN_ERROR("Alloc memory error or compute src error in Variable::Input\n");
2019-12-27 22:16:57 +08:00
return false;
}
::memcpy(dstPtr, srcPtr, info->size * info->type.bytes());
}
if (needChange) {
mFrom->visitOutputs([](EXPRP expr, int index) { return expr->setInfoDirty(); });
} else {
informDirty();
}
2020-11-05 16:41:56 +08:00
mFrom->mInside->mContentDirty = false;
2019-12-27 22:16:57 +08:00
return true;
}
void Variable::replace(VARP dst, VARP src) {
if (nullptr == src) {
dst->setExpr(nullptr, 0);
return;
}
2020-11-05 16:41:56 +08:00
if (nullptr == dst) {
dst.mContent = src.mContent;
return;
}
if (src->mFrom.get() == dst->mFrom.get()) {
dst->mFromIndex = src->mFromIndex;
return;
}
if (src->mFrom->outputSize() != dst->mFrom->outputSize()) {
// Can't replace Expr, Just replace VARP
2020-11-05 16:41:56 +08:00
std::vector<Expr*> visited;
dst->mFrom->visitOutputs([src, dst, &visited](EXPRP expr, int index) {
if (expr->visited()) {
return false;
}
expr->setVisited(true);
visited.emplace_back(expr.get());
expr->mInside->mCache.reset();
expr->mInside->mCacheOffset = 0;
2020-02-26 09:57:17 +08:00
expr->mValid = true;
expr->mInside->mInfoDirty = true;
2020-11-05 16:41:56 +08:00
expr->mInside->mContentDirty = true;
return true;
});
2020-11-05 16:41:56 +08:00
for (auto v : visited) {
v->setVisited(false);
}
dst->mFrom->visitOutputs([src, dst](EXPRP expr, int index) {
for (int i =0; i< expr->inputs().size(); ++i) {
auto input = expr->inputs()[i];
if (input == dst) {
expr->mInputs[i] = src;
}
}
src->mFrom->mTo.emplace_back(expr);
return false;
});
dst->mFrom = src->mFrom;
dst->mFromIndex = src->mFromIndex;
return;
}
2019-12-27 22:16:57 +08:00
Expr::replace(dst->mFrom, src->mFrom);
dst->mFromIndex = src->mFromIndex;
}
const Variable::Info* Variable::getInfo() {
if (nullptr == mFrom) {
return nullptr;
}
auto res = mFrom->requireInfo();
if (!res) {
return nullptr;
}
return mFrom->mInside->mOutputInfos.data() + mFromIndex;
}
bool Variable::resize(INTS dims) {
if (nullptr != mFrom->get() && VARP::INPUT != mFrom->mType) {
MNN_ERROR("Can't resize variable not from input\n");
return false;
}
auto& info = mFrom->mInside->mOutputInfos[0];
if (dims.size() == info.dim.size()) {
bool theSame = true;
for (int i=0; i<dims.size(); ++i) {
if (info.dim[i] != dims[i]) {
theSame = false;
break;
}
}
if (theSame) {
return true;
}
}
info.dim = dims;
info.syncSize();
2020-11-05 16:41:56 +08:00
Utils::copyInfoToTensor(mFrom->inside()->mOutputTensors[0], mFrom->inside()->mOutputInfos.data());
Utils::releaseMemoryForHostTensor(mFrom->inside()->mOutputTensors[0]);
2024-04-19 11:58:21 +08:00
if (0 < info.size) {
bool res = Utils::allocMemoryForHostTensor(mFrom->inside()->mOutputTensors[0]);
if (!res) {
return false;
}
}
2020-11-05 16:41:56 +08:00
mFrom->mValid = true;
mFrom->inside()->mInfoDirty = false;
mFrom->inside()->mContentDirty = true;
2019-12-27 22:16:57 +08:00
mFrom->visitOutputs([](EXPRP expr, int index) { return expr->setInfoDirty(); });
return true;
}
void Expr::visit(EXPRP expr, const std::function<bool(EXPRP)>& before, const std::function<bool(EXPRP)>& after) {
bool next = before(expr);
if (!next) {
return;
}
for (int i = 0; i < expr->inputs().size(); ++i) {
2022-02-18 11:30:27 +08:00
if (expr->inputs()[i].get() == nullptr) {
continue;
}
2019-12-27 22:16:57 +08:00
visit(expr->inputs()[i]->mFrom, before, after);
}
after(expr);
}
void* Variable::readInternal(bool forShape) {
2019-12-27 22:16:57 +08:00
if (nullptr == mFrom->get()) {
if (VARP::INPUT == mFrom->mType) {
2020-11-05 16:41:56 +08:00
if (mFrom->mInside->mContentDirty) {
return nullptr;
}
2019-12-27 22:16:57 +08:00
}
2020-11-05 16:41:56 +08:00
//MNN_ASSERT(nullptr != mFrom->inside()->mOutputTensors[0]->buffer().host);
2021-01-06 16:29:37 +08:00
auto inside = mFrom->inside();
2022-12-30 15:18:58 +08:00
auto originTensor = inside->mOutputTensors[mFromIndex];
auto des = TensorUtils::getDescribe(originTensor);
if (WrapExecution::needWrap(originTensor, nullptr) || (des->quantAttr != nullptr && des->type == DataType_DT_INT8)) {
2021-04-08 15:34:23 +08:00
// For StaticModule will other-device runtime, we may create Variable with other-device's memory
2022-05-27 23:48:09 +08:00
// The case won't occurred for varibale = INPUT
2021-01-06 16:29:37 +08:00
// Need Copy
if (nullptr != inside->mHostTensor) {
2021-04-08 15:34:23 +08:00
// The Varp will not be created as input, so we just need copy once
2021-01-06 16:29:37 +08:00
return inside->mHostTensor->host<void>();
}
inside->mHostTensor = new Tensor;
TensorUtils::copyShape(originTensor, inside->mHostTensor, true);
inside->mHostTensor->buffer().type = originTensor->getType();
inside->mHostTensor->buffer().host = (uint8_t*)MNNMemoryAllocAlign(inside->mHostTensor->size(), MNN_MEMORY_ALIGN_DEFAULT);
TensorUtils::getDescribe(inside->mHostTensor)->memoryType = Tensor::InsideDescribe::MEMORY_HOST;
originTensor->copyToHostTensor(inside->mHostTensor);
return inside->mHostTensor->host<void>();
}
return originTensor->buffer().host;
2019-12-27 22:16:57 +08:00
}
auto res = mFrom->requireInfo();
if (false == res) {
return nullptr;
}
auto cache = mFrom->inside()->mCache;
if (nullptr == cache) {
2020-11-05 16:41:56 +08:00
ExecutorScope::Current()->makeCache({mFrom}, forShape);
cache = mFrom->inside()->mCache;
}
if (nullptr == cache) {
return nullptr;
}
2022-12-30 15:18:58 +08:00
if (NO_ERROR != cache->compute()) {
2019-12-27 22:16:57 +08:00
return nullptr;
}
2022-12-30 15:18:58 +08:00
return cache->mapOutput(mFrom->mInside->mCacheOffset + mFromIndex, mFrom->mInside->mOutputTensors[mFromIndex]);
2019-12-27 22:16:57 +08:00
}
2022-09-30 10:02:52 +08:00
2019-12-27 22:16:57 +08:00
void Variable::informDirty() {
2020-11-05 16:41:56 +08:00
std::vector<Expr*> visited;
mFrom->visitOutputs([&visited](EXPRP expr, int index) {
if (expr->visited()) {
return false;
}
visited.emplace_back(expr.get());
expr->setVisited(true);
if (expr->inside()->mReq.shapeNeedContent.empty()) {
// Not init
return false;
}
if (expr->inside()->mReq.shapeNeedContent[index]) {
expr->setInfoDirty();
expr->visitOutputs([](EXPRP e, int index) { return e->setInfoDirty(); });
return false;
}
if (expr->inside()->mReq.contentNeedContent[index]) {
if (expr->inside()->mCache != nullptr) {
2022-12-30 15:18:58 +08:00
expr->inside()->mCache->setContentDirty();
}
return true;
}
return false;
});
2020-11-05 16:41:56 +08:00
for (auto e : visited) {
e->setVisited(false);
}
}
void Variable::prepareCompute(const std::vector<VARP>& vars, bool forceCpu) {
std::vector<EXPRP> exprs;
for (auto v : vars) {
if (nullptr != v && nullptr != v->mFrom->get()) {
if (!v->expr().first->visited() && nullptr == v->expr().first->inside()->mCache) {
v->expr().first->requireInfo();
v->expr().first->setVisited(true);
exprs.emplace_back(v->expr().first);
}
}
}
2020-11-05 16:41:56 +08:00
for (auto v : vars) {
if (nullptr != v && nullptr != v->mFrom->get()) {
v->expr().first->setVisited(false);
}
2020-11-05 16:41:56 +08:00
}
if (exprs.empty()) {
return;
}
2020-11-05 16:41:56 +08:00
ExecutorScope::Current()->makeCache(std::move(exprs), forceCpu);
2019-12-27 22:16:57 +08:00
}
2021-01-06 16:29:37 +08:00
void Variable::compute(const std::vector<VARP>& vars, bool forceCPU) {
prepareCompute(vars, forceCPU);
for (auto& v : vars) {
if (nullptr != v && nullptr != v->mFrom->get()) {
2021-01-06 16:29:37 +08:00
auto inside = v->mFrom->inside();
if (nullptr != inside && nullptr != inside->mCache) {
2022-12-30 15:18:58 +08:00
inside->mCache->compute();
}
}
}
2021-01-06 16:29:37 +08:00
}
2019-12-27 22:16:57 +08:00
void* Variable::writeInternal(bool inform) {
if (nullptr != mFrom->get()) {
return nullptr;
}
2019-12-27 22:16:57 +08:00
if (inform) {
informDirty();
}
MNN_ASSERT(TensorUtils::getDescribe(mFrom->inside()->mOutputTensors[0])->quantAttr == nullptr || TensorUtils::getDescribe(mFrom->inside()->mOutputTensors[0])->type == DataType_DT_FLOAT);
2020-11-05 16:41:56 +08:00
mFrom->mInside->mContentDirty = false;
return mFrom->inside()->mOutputTensors[0]->host<void>();
2019-12-27 22:16:57 +08:00
}
2023-04-27 15:11:05 +08:00
void Variable::writeScaleInternal(float scaleValue, float zeroPoint, bool inform) {
MNN_ASSERT(TensorUtils::getDescribe(mFrom->inside()->mOutputTensors[0])->quantAttr == nullptr || TensorUtils::getDescribe(mFrom->inside()->mOutputTensors[0])->type == DataType_DT_FLOAT);
if (inform) {
informDirty();
}
mFrom->mInside->mContentDirty = true;
TensorUtils::getDescribe(mFrom->inside()->mOutputTensors[0])->quantAttr.reset(new QuantAttr);
auto quant = TensorUtils::getDescribe(mFrom->inside()->mOutputTensors[0])->quantAttr.get();
quant->scale = scaleValue;
quant->zero = zeroPoint;
}
2019-12-27 22:16:57 +08:00
void Variable::unMap() {
//mFrom->inside()->onUnMapContent(mFromIndex);
}
void Expr::visitOutputs(const std::function<bool(EXPRP, int)>& visit) {
for (auto iter = mTo.begin(); iter != mTo.end();) {
auto expr = iter->lock();
if (nullptr == expr) {
iter = mTo.erase(iter);
continue;
}
bool recurse = false;
auto inputs = expr->inputs();
for (int i=0; i<inputs.size(); ++i) {
2022-02-18 11:30:27 +08:00
if (inputs[i].get() == nullptr) {
continue;
}
2019-12-27 22:16:57 +08:00
if (inputs[i]->mFrom.get() == this) {
recurse = recurse || visit(expr, i);
}
}
if (recurse) {
expr->visitOutputs(visit);
}
iter++;
}
}
bool Expr::setInfoDirty() {
if (mInside->mInfoDirty && mValid) {
2019-12-27 22:16:57 +08:00
//MNN_PRINT("End Info Dirty for %s\n", mName.c_str());
return false;
}
//MNN_PRINT("Set Info Dirty for %s\n", mName.c_str());
mInside->mInfoDirty = true;
mInside->mContentDirty = true;
2019-12-27 22:16:57 +08:00
mValid = true;
if (mInside->mCache != nullptr) {
2022-12-30 15:18:58 +08:00
mInside->mCache->setShapeDirty();
2020-11-05 16:41:56 +08:00
}
for (auto o : mInside->mOutputTensors) {
Utils::releaseMemoryForHostTensor(o);
}
2019-12-27 22:16:57 +08:00
return true;
}
std::vector<VARP> Variable::load(const char* fileName) {
AutoStorage<uint8_t> buffer;
2020-11-05 16:41:56 +08:00
{
2024-04-19 11:58:21 +08:00
FileLoader loader(fileName, true);
2020-11-05 16:41:56 +08:00
if (!loader.valid()) {
MNN_ERROR("Error for open %s\n", fileName);
return {};
}
loader.read();
if (!loader.valid()) {
return {};
}
loader.merge(buffer);
if (buffer.get() == nullptr) {
return {};
}
2019-12-27 22:16:57 +08:00
}
2020-02-26 09:57:17 +08:00
return load(buffer.get(), buffer.size());
}
std::vector<VARP> Variable::load(const uint8_t* buffer, size_t length) {
AUTOTIME;
if (false == OpCommonUtils::checkNet(buffer, length)) {
2019-12-27 22:16:57 +08:00
return {};
}
2020-02-26 09:57:17 +08:00
std::unique_ptr<NetT> source(UnPackNet(buffer));
2019-12-27 22:16:57 +08:00
if (nullptr == source) {
return {};
}
// FUNC_PRINT(source->oplists.size());
auto opSize = source->oplists.size();
auto tensorCount = source->tensorName.size();
if (tensorCount == 0) {
tensorCount = source->tensorNumber;
}
std::vector<VARP> variable;
variable.reserve(tensorCount);
std::map<int, VARP> variableMap;
2022-12-30 15:18:58 +08:00
bool isStatic = source->usage == Usage_INFERENCE_STATIC;
std::vector<std::shared_ptr<Tensor>> allTensors;
if (isStatic) {
allTensors.resize(source->tensorName.size());
initTensors(allTensors, flatbuffers::GetRoot<MNN::Net>(buffer));
}
2019-12-27 22:16:57 +08:00
// Generate All Exprs by order of net
for (int i = 0; i < opSize; ++i) {
std::vector<VARP> inputs;
auto op = source->oplists[i].get();
for (int index = 0; index < op->inputIndexes.size(); ++index) {
auto inputIndex = op->inputIndexes[index];
if (variableMap.find(inputIndex) == variableMap.end()) {
MNN_ERROR("Can't find variable for %s, the graph is error\n", op->name.c_str());
break;
}
inputs.emplace_back(variableMap[inputIndex]);
}
EXPRP expr = Expr::create(source->oplists[i].get(), inputs, (int)op->outputIndexes.size());
expr->setName(source->oplists[i]->name);
2022-12-30 15:18:58 +08:00
if (isStatic && nullptr != expr->get()) {
// Set tensor shape from net
expr->mCanDecompose = false;
for (int index = 0; index < op->outputIndexes.size(); ++index) {
auto outputIndex = op->outputIndexes[index];
delete expr->inside()->mOutputTensors[index];
expr->inside()->mOutputTensors[index] = Tensor::clone(allTensors[outputIndex].get());
Utils::copyTensorToInfo(expr->inside()->mOutputInfos.data() + index, expr->inside()->mOutputTensors[index]);
}
}
2019-12-27 22:16:57 +08:00
for (int index = 0; index < op->outputIndexes.size(); ++index) {
auto outputIndex = op->outputIndexes[index];
if (variableMap.find(outputIndex) == variableMap.end()) {
2022-08-23 21:21:29 +08:00
// just create VARP and don't compute
VARP newVariable(new Variable(expr, index));
2019-12-27 22:16:57 +08:00
if (source->tensorName.size() > outputIndex) {
newVariable->setName(source->tensorName[outputIndex]);
}
variableMap[outputIndex] = newVariable;
variable.emplace_back(newVariable);
}
}
}
return variable;
}
2020-02-26 09:57:17 +08:00
std::map<std::string, VARP> Variable::loadMap(const uint8_t* buffer, size_t length) {
AUTOTIME;
auto variables = load(buffer, length);
std::map<std::string, VARP> varMap;
for (auto v : variables) {
varMap[v->name()] = v;
}
return varMap;
}
2019-12-27 22:16:57 +08:00
std::map<std::string, VARP> Variable::loadMap(const char* fileName) {
AUTOTIME;
auto variables = load(fileName);
std::map<std::string, VARP> varMap;
for (auto v : variables) {
varMap[v->name()] = v;
}
return varMap;
}
std::vector<VARP> Variable::mapToSequence(const std::map<std::string, VARP>& source) {
std::vector<VARP> outputs;
outputs.reserve(source.size());
for (auto& iter : source) {
outputs.emplace_back(iter.second);
}
return outputs;
}
2022-12-30 15:18:58 +08:00
#define SET_TYPE(TYPE, type) \
if (tensor->getType() == halide_type_of<type##_t>()) {\
blob->dataType = DataType_DT_##TYPE;
2019-12-27 22:16:57 +08:00
void Variable::save(const std::vector<VARP>& vars, NetT* dest) {
auto executeOrder = getExecuteOrder(vars);
2022-12-30 15:18:58 +08:00
// Search subgraphs
std::map<std::string, std::shared_ptr<Executor::SubGraph>> subgraphs;
auto exe = ExecutorScope::Current();
#ifndef MNN_REDUCE_SIZE
2022-12-30 15:18:58 +08:00
for (int index = 0; index < executeOrder.size(); ++index) {
auto expr = executeOrder[index];
auto op = expr->get();
if (nullptr == op || op->type() != OpType_While) {
continue;
}
if (op->main_type() != OpParameter_WhileParam) {
continue;
}
auto whileParam = op->main_as_WhileParam();
auto name = whileParam->body_graph()->str();
auto subgraph = exe->findSubGraph(name);
if (nullptr == subgraph) {
#ifdef MNN_EXPRESS_ERROR_REPORT
MNN_ERROR("Variable::save: Invalid subgraph name: %s\n", name.c_str());
#endif
continue;
}
MNN_ASSERT(subgraph->depends.size() == 0);
subgraphs.insert(std::make_pair(name, subgraph));
}
// Save Subgraphs
dest->subgraphs.clear();
for (auto& graphIter : subgraphs) {
// Copy Subgraph info
flatbuffers::FlatBufferBuilder builder;
builder.Finish(MNN::SubGraphProto::Pack(builder, graphIter.second->info.get()));
std::unique_ptr<MNN::SubGraphProtoT> subgraph(flatbuffers::GetRoot<MNN::SubGraphProto>(builder.GetBufferPointer())->UnPack());
dest->subgraphs.emplace_back(std::move(subgraph));
}
#endif
2019-12-27 22:16:57 +08:00
// Get Expr - TensorOffset Map
std::map<EXPRP, int> varIndexInfo;
{
int tensorOffset = 0;
for (int i=0; i<executeOrder.size(); ++i) {
auto expr = executeOrder[i];
auto outputSize = executeOrder[i]->outputSize();
varIndexInfo[expr] = tensorOffset;
tensorOffset += outputSize;
}
dest->tensorName.resize(tensorOffset);
}
// Create All Op
for (int index = 0; index < executeOrder.size(); ++index) {
auto expr = executeOrder[index];
auto mOp = expr->get();
std::unique_ptr<OpT> op;
if (nullptr != mOp) {
op.reset(mOp->UnPack());
} else {
MNN_ASSERT(1 == expr->outputSize());
auto& info = expr->mInside->mOutputInfos[0];
const void* ptr = expr->mInside->mOutputTensors[0]->host<void>();
VARP temp;
2021-04-08 15:34:23 +08:00
if (nullptr == ptr || expr->mInside->mOutputTensors[0]->deviceId() > 0) {
temp = Variable::create(expr);
ptr = temp->readMap<void>();
}
2019-12-27 22:16:57 +08:00
op.reset(new OpT);
if (expr->mType != VARP::INPUT) {
auto blob = new BlobT;
blob->dataFormat = (MNN_DATA_FORMAT)Utils::convertFormat(info.order);
blob->dims = info.dim;
if (info.type.code == halide_type_float) {
2023-12-04 11:12:20 +08:00
blob->dataType = DataType_DT_FLOAT;
blob->float32s.resize(info.size);
::memcpy(blob->float32s.data(), ptr, info.size * sizeof(float));
2020-11-05 16:41:56 +08:00
} else if (info.type.code == halide_type_int && info.type.bits == 32) {
2019-12-27 22:16:57 +08:00
blob->dataType = DataType_DT_INT32;
blob->int32s.resize(info.size);
2020-11-05 16:41:56 +08:00
::memcpy(blob->int32s.data(), ptr, info.size * sizeof(int));
} else if (info.type.code == halide_type_int && info.type.bits == 8) {
blob->dataType = DataType_DT_INT8;
blob->int8s.resize(info.size);
::memcpy(blob->int8s.data(), ptr, info.size * sizeof(int8_t));
} else if (info.type.code == halide_type_uint && info.type.bits == 8) {
2019-12-27 22:16:57 +08:00
blob->dataType = DataType_DT_UINT8;
blob->uint8s.resize(info.size);
2020-11-05 16:41:56 +08:00
::memcpy(blob->uint8s.data(), ptr, info.size * sizeof(uint8_t));
2023-12-04 11:12:20 +08:00
} else if (info.type.code == halide_type_bfloat && info.type.bits == 16) {
blob->dataType = DataType_DT_BFLOAT16;
blob->uint8s.resize(info.size * 2);
::memcpy(blob->uint8s.data(), ptr, info.size * sizeof(int16_t));
2019-12-27 22:16:57 +08:00
}
op->type = OpType_Const;
if (expr->mType == VARP::TRAINABLE) {
op->type = OpType_TrainableParam;
}
op->main.type = OpParameter_Blob;
op->main.value = blob;
} else {
op->type = OpType_Input;
op->main.type = OpParameter_Input;
op->main.value = new InputT;
op->main.AsInput()->dtype = (MNN::DataType)Utils::convertDataType(info.type);
MNN_ASSERT(op->main.AsInput()->dtype != DataType_DT_INVALID);
op->main.AsInput()->dims = info.dim;
op->main.AsInput()->dformat = (MNN_DATA_FORMAT)Utils::convertFormat(info.order);
}
}
2022-08-31 20:11:16 +08:00
if (!expr->name().empty()) {
op->name = expr->name();
}
2019-12-27 22:16:57 +08:00
op->inputIndexes.resize(expr->inputs().size());
for (int i = 0; i < op->inputIndexes.size(); ++i) {
2022-02-18 11:30:27 +08:00
if (expr->inputs()[i] == nullptr) {
op->inputIndexes[i] = -1;
continue;
}
2019-12-27 22:16:57 +08:00
auto inputExpr = expr->inputs()[i]->expr();
op->inputIndexes[i] = varIndexInfo[inputExpr.first] + inputExpr.second;
}
if (op->name.empty()) {
op->name = EnumNameOpType(op->type) + numberToString(index+1);
}
op->outputIndexes.resize(expr->outputSize());
auto tensorIndexOffset = varIndexInfo[expr];
for (int v=0; v<expr->outputSize(); ++v) {
op->outputIndexes[v] = tensorIndexOffset + v;
dest->tensorName[tensorIndexOffset+v] = expr->outputName(v);
}
dest->oplists.emplace_back(std::move(op));
}
2022-12-30 15:18:58 +08:00
bool staticModel = ExecutorScope::Current()->getLazyMode() == Executor::LAZY_CONTENT;
2019-12-27 22:16:57 +08:00
// Fill Empty Tensor Name With Default Op Name
for (int index = 0; index < executeOrder.size(); ++index) {
auto expr = executeOrder[index];
auto op = dest->oplists[index].get();
auto tensorIndexOffset = varIndexInfo[expr];
for (int v=0; v<expr->outputSize(); ++v) {
2020-11-05 16:41:56 +08:00
auto subindex = tensorIndexOffset + v;
if (dest->tensorName[subindex].empty()) {
2019-12-27 22:16:57 +08:00
if (v == 0) {
2020-11-05 16:41:56 +08:00
dest->tensorName[subindex] = op->name;
2019-12-27 22:16:57 +08:00
} else {
2020-11-05 16:41:56 +08:00
dest->tensorName[subindex] = op->name + numberToString(v);
2019-12-27 22:16:57 +08:00
}
}
2023-12-04 11:12:20 +08:00
auto tensor = expr->inside()->mOutputTensors[v];
if (staticModel || TensorUtils::getDescribe(tensor)->quantAttr) {
2022-12-30 15:18:58 +08:00
auto des = TensorUtils::getDescribe(tensor);
auto describe = std::unique_ptr<MNN::TensorDescribeT>(new MNN::TensorDescribeT);
describe->index = varIndexInfo[expr] + v;
2023-12-04 11:12:20 +08:00
describe->name = dest->tensorName[subindex];
2024-07-22 19:51:53 +08:00
2022-12-30 15:18:58 +08:00
auto tensorDes = TensorUtils::getDescribe(tensor);
if (nullptr != tensorDes->quantAttr) {
describe->quantInfo.reset(new TensorQuantInfoT);
describe->quantInfo->max = tensorDes->quantAttr->max;
describe->quantInfo->min = tensorDes->quantAttr->min;
describe->quantInfo->zero = tensorDes->quantAttr->zero;
describe->quantInfo->scale = tensorDes->quantAttr->scale;
}
#ifndef MNN_REDUCE_SIZE
2023-12-04 11:12:20 +08:00
if (staticModel) {
2024-07-22 19:51:53 +08:00
describe->blob = std::unique_ptr<MNN::BlobT>(new MNN::BlobT);
auto& blob = describe->blob;
blob->dataFormat = des->dimensionFormat;
if (tensor->getType() == halide_type_of<float>()) {
blob->dataType = DataType_DT_FLOAT;
} else {
SET_TYPE(INT8, int8)}
SET_TYPE(UINT8, uint8)}
SET_TYPE(INT32, int32)}
SET_TYPE(INT64, int64)}
}
for (int d = 0; d < tensor->dimensions();d++) {
describe->blob->dims.push_back(tensor->buffer().dim[d].extent);
}
2023-12-04 11:12:20 +08:00
for (auto& reg : des->regions) {
auto regionT = std::unique_ptr<MNN::RegionT>(new MNN::RegionT);
regionT->src = std::unique_ptr<MNN::ViewT>(new MNN::ViewT);
regionT->dst = std::unique_ptr<MNN::ViewT>(new MNN::ViewT);
regionT->src->offset = reg.src.offset;
regionT->dst->offset = reg.dst.offset;
for (int s = 0; s < 3; s++) {
regionT->src->stride.push_back(reg.src.stride[s]);
regionT->dst->stride.push_back(reg.dst.stride[s]);
regionT->size.push_back(reg.size[s]);
}
describe->regions.emplace_back(std::move(regionT));
2022-12-30 15:18:58 +08:00
}
}
#endif
2022-12-30 15:18:58 +08:00
dest->extraTensorDescribe.emplace_back(std::move(describe));
}
2019-12-27 22:16:57 +08:00
}
}
#ifndef MNN_REDUCE_SIZE
2022-12-30 15:18:58 +08:00
if (staticModel) {
dest->usage = Usage_INFERENCE_STATIC;
}
#endif
2022-06-24 18:30:05 +08:00
// add version number
dest->extraInfo.reset(new ExtraInfoT);
dest->extraInfo->version = MNN_VERSION;
2019-12-27 22:16:57 +08:00
}
2022-07-12 17:17:33 +08:00
std::vector<int8_t> Variable::save(const std::vector<VARP>& vars) {
std::unique_ptr<NetT> net(new NetT);
save(vars, net.get());
flatbuffers::FlatBufferBuilder builder(1024);
auto offset = Net::Pack(builder, net.get());
builder.Finish(offset);
std::vector<int8_t> result(builder.GetSize());
::memcpy(result.data(), builder.GetBufferPointer(), builder.GetSize());
return result;
}
2019-12-27 22:16:57 +08:00
void Variable::save(const std::vector<VARP>& vars, const char* fileName) {
std::unique_ptr<NetT> net(new NetT);
save(vars, net.get());
// FUNC_PRINT(net->oplists.size());
flatbuffers::FlatBufferBuilder builder(1024);
auto offset = Net::Pack(builder, net.get());
builder.Finish(offset);
FileLoader::write(fileName, std::make_pair(builder.GetBufferPointer(), builder.GetSize()));
2019-12-27 22:16:57 +08:00
}
std::pair<std::map<std::string, VARP>, std::map<std::string, VARP>> Variable::getInputAndOutput(const std::map<std::string, VARP>& allVariable) {
std::pair<std::map<std::string, VARP>, std::map<std::string, VARP>> res;
for (auto& iter : allVariable) {
auto var = iter.second;
if (var->expr().first->get() == nullptr && var->expr().first->mType == VARP::INPUT) {
res.first[var->name()] = var;
}
if (var->linkNumber() == 0) {
res.second[var->name()] = var;
}
}
return res;
}
std::vector<EXPRP> Variable::getExecuteOrder(const std::vector<VARP>& outputs) {
std::vector<EXPRP> sequence;
2025-03-12 11:35:16 +08:00
std::stack<EXPRP> workStack;
2019-12-27 22:16:57 +08:00
for (auto output : outputs) {
2025-03-12 11:35:16 +08:00
if (nullptr == output) {
continue;
}
workStack.push(output->expr().first);
}
while (!workStack.empty()) {
auto expr = workStack.top();
bool valid = true;
if (expr->visited()) {
workStack.pop();
continue;
}
for (auto input : expr->inputs()) {
if (input == nullptr) {
continue;
}
if (input->expr().first->visited()) {
continue;
}
valid = false;
workStack.push(input->expr().first);
break;
}
if (valid) {
sequence.emplace_back(expr);
expr->setVisited(true);
workStack.pop();
}
2019-12-27 22:16:57 +08:00
}
for (auto expr : sequence) {
expr->setVisited(false);
}
return sequence;
}
VARP VARP::operator+(VARP var) const {
return _Add(VARP(mContent), var);
}
VARP VARP::operator-(VARP var) const {
return _Subtract(VARP(mContent), var);
}
VARP VARP::operator*(VARP var) const {
return _Multiply(VARP(mContent), var);
}
VARP VARP::operator/(VARP var) const {
return _Divide(VARP(mContent), var);
}
VARP VARP::mean(INTS dims) const {
return _ReduceMean(VARP(mContent), dims);
}
VARP VARP::sum(INTS dims) const {
return _ReduceSum(VARP(mContent), dims);
}
} // namespace Express
} // namespace MNN