2019-04-17 10:49:11 +08:00
|
|
|
//
|
|
|
|
// Tensor.cpp
|
|
|
|
// MNN
|
|
|
|
//
|
|
|
|
// Created by MNN on 2018/07/06.
|
|
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
|
|
//
|
|
|
|
|
|
|
|
#include <complex.h>
|
|
|
|
#include <string.h>
|
2020-11-05 16:41:56 +08:00
|
|
|
#include <MNN/Tensor.hpp>
|
|
|
|
#include "MNN_generated.h"
|
2019-12-27 22:16:57 +08:00
|
|
|
#include "core/Backend.hpp"
|
|
|
|
#include "core/MNNMemoryUtils.h"
|
|
|
|
#include "core/Macro.h"
|
|
|
|
#include "core/TensorUtils.hpp"
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
|
|
|
|
namespace MNN {
|
|
|
|
Tensor::Tensor(int dimSize, DimensionType type) {
|
2020-11-05 16:41:56 +08:00
|
|
|
MNN_ASSERT(dimSize <= MNN_MAX_TENSOR_DIM);
|
|
|
|
mDescribe = new InsideDescribe;
|
2019-04-17 10:49:11 +08:00
|
|
|
mBuffer.dimensions = dimSize;
|
|
|
|
mBuffer.type = halide_type_of<float>();
|
|
|
|
mBuffer.device = 0;
|
|
|
|
mBuffer.host = nullptr;
|
2020-11-05 16:41:56 +08:00
|
|
|
mBuffer.dim = &mDescribe->dims[0];
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case CAFFE:
|
|
|
|
mDescribe->dimensionFormat = MNN_DATA_FORMAT_NCHW;
|
|
|
|
break;
|
|
|
|
case TENSORFLOW:
|
|
|
|
mDescribe->dimensionFormat = MNN_DATA_FORMAT_NHWC;
|
|
|
|
break;
|
|
|
|
case CAFFE_C4:
|
|
|
|
mDescribe->dimensionFormat = MNN_DATA_FORMAT_NC4HW4;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Tensor::Tensor(const Tensor* tensor, DimensionType type, bool allocMemory) {
|
|
|
|
MNN_ASSERT(tensor != nullptr);
|
|
|
|
|
|
|
|
auto buffer = tensor->buffer();
|
2020-11-05 16:41:56 +08:00
|
|
|
mDescribe = new InsideDescribe;
|
2019-04-17 10:49:11 +08:00
|
|
|
mBuffer.dimensions = buffer.dimensions;
|
|
|
|
mBuffer.type = buffer.type;
|
|
|
|
mBuffer.device = 0;
|
|
|
|
mBuffer.host = nullptr;
|
2020-11-05 16:41:56 +08:00
|
|
|
mBuffer.dim = &mDescribe->dims[0];
|
|
|
|
|
2019-04-17 10:49:11 +08:00
|
|
|
for (int i = 0; i < buffer.dimensions; ++i) {
|
|
|
|
mBuffer.dim[i].extent = buffer.dim[i].extent;
|
|
|
|
}
|
|
|
|
switch (type) {
|
|
|
|
case CAFFE:
|
|
|
|
mDescribe->dimensionFormat = MNN_DATA_FORMAT_NCHW;
|
|
|
|
break;
|
|
|
|
case TENSORFLOW:
|
|
|
|
mDescribe->dimensionFormat = MNN_DATA_FORMAT_NHWC;
|
|
|
|
break;
|
|
|
|
case CAFFE_C4:
|
|
|
|
mDescribe->dimensionFormat = MNN_DATA_FORMAT_NC4HW4;
|
|
|
|
type = CAFFE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// format mapping
|
|
|
|
auto originType = tensor->getDimensionType();
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
if (originType != type && buffer.dimensions >= 4) {
|
2019-04-17 10:49:11 +08:00
|
|
|
std::vector<int> axisMap;
|
|
|
|
// NCHW -> NHWC
|
|
|
|
if (originType == CAFFE) {
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
axisMap.push_back(0);
|
|
|
|
for (int i = 2; i < buffer.dimensions; ++i) {
|
|
|
|
axisMap.push_back(i);
|
|
|
|
}
|
|
|
|
axisMap.push_back(1);
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
// NHWC -> NCHW
|
|
|
|
else {
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
axisMap.push_back(0);
|
|
|
|
axisMap.push_back(buffer.dimensions - 1);
|
|
|
|
for (int i = 1; i < buffer.dimensions - 1; ++i) {
|
|
|
|
axisMap.push_back(i);
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
for (int i = 0; i < buffer.dimensions; ++i) {
|
|
|
|
mBuffer.dim[i].extent = buffer.dim[axisMap[i]].extent;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TensorUtils::setLinearLayout(this);
|
|
|
|
|
|
|
|
if (allocMemory) {
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
auto memorySize = size();
|
|
|
|
if (memorySize > 0) {
|
2020-11-05 16:41:56 +08:00
|
|
|
mDescribe->memoryType = Tensor::InsideDescribe::MEMORY_HOST;
|
|
|
|
mBuffer.host = (uint8_t*)MNNMemoryAllocAlign(size(), MNN_MEMORY_ALIGN_DEFAULT);
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
MNN_ASSERT(mBuffer.host != nullptr);
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Tensor::~Tensor() {
|
|
|
|
if (nullptr != mDescribe->handleFreeFunction) {
|
|
|
|
MNN_ASSERT(mBuffer.type.code == halide_type_handle);
|
|
|
|
auto handles = (void**)mBuffer.host;
|
|
|
|
for (int i = 0; i < elementSize(); ++i) {
|
|
|
|
if (nullptr != handles[i]) {
|
|
|
|
mDescribe->handleFreeFunction(handles[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
if (mDescribe->memoryType == InsideDescribe::MEMORY_HOST) {
|
|
|
|
if (nullptr != mBuffer.host) {
|
|
|
|
MNNMemoryFreeAlign(mBuffer.host);
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
delete mDescribe;
|
|
|
|
}
|
|
|
|
|
|
|
|
Tensor* Tensor::createDevice(const std::vector<int>& dims, halide_type_t type, DimensionType dimType) {
|
2020-11-05 16:41:56 +08:00
|
|
|
auto shapeTensor = new Tensor((int)dims.size(), dimType);
|
2019-04-17 10:49:11 +08:00
|
|
|
for (int i = 0; i < dims.size(); ++i) {
|
2020-11-05 16:41:56 +08:00
|
|
|
shapeTensor->setLength(i, dims[i]);
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
shapeTensor->buffer().type = type;
|
|
|
|
TensorUtils::setLinearLayout(shapeTensor);
|
|
|
|
return shapeTensor;
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Tensor* Tensor::create(const std::vector<int>& dims, halide_type_t type, void* userData, DimensionType dimType) {
|
|
|
|
Tensor shapeTensor((int)dims.size(), dimType);
|
|
|
|
for (int i = 0; i < dims.size(); ++i) {
|
|
|
|
shapeTensor.setLength(i, dims[i]);
|
|
|
|
}
|
|
|
|
shapeTensor.buffer().type = type;
|
|
|
|
|
|
|
|
bool ownData = userData == nullptr;
|
|
|
|
auto result = new Tensor(&shapeTensor, dimType, ownData);
|
|
|
|
if (nullptr != userData) {
|
|
|
|
result->buffer().host = (uint8_t*)userData;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Tensor::copyFromHostTensor(const Tensor* hostTensor) {
|
|
|
|
auto bn = mDescribe->backend;
|
|
|
|
if (nullptr == bn) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
bn->onCopyBuffer(hostTensor, this);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Tensor::copyToHostTensor(Tensor* hostTensor) const {
|
|
|
|
auto bn = mDescribe->backend;
|
|
|
|
if (nullptr == bn) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
bn->onCopyBuffer(this, hostTensor);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Tensor::DimensionType getDimType(const Tensor* origin) {
|
|
|
|
auto dimformat = TensorUtils::getDescribe(origin)->dimensionFormat;
|
|
|
|
switch (dimformat) {
|
|
|
|
case MNN_DATA_FORMAT_NHWC:
|
|
|
|
return Tensor::TENSORFLOW;
|
|
|
|
case MNN_DATA_FORMAT_NCHW:
|
|
|
|
return Tensor::CAFFE;
|
|
|
|
case MNN_DATA_FORMAT_NC4HW4:
|
|
|
|
return Tensor::CAFFE_C4;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return Tensor::CAFFE;
|
|
|
|
}
|
|
|
|
|
|
|
|
Tensor* Tensor::createHostTensorFromDevice(const Tensor* device, bool copyContent) {
|
|
|
|
auto tensor = Tensor::create(device->shape(), device->getType(), nullptr, getDimType(device));
|
|
|
|
if (copyContent) {
|
|
|
|
device->copyToHostTensor(tensor);
|
|
|
|
}
|
|
|
|
return tensor;
|
|
|
|
}
|
|
|
|
|
|
|
|
Tensor::DimensionType Tensor::getDimensionType() const {
|
2019-05-05 20:27:57 +08:00
|
|
|
if (mDescribe->dimensionFormat == MNN_DATA_FORMAT_NHWC) {
|
2019-04-17 10:49:11 +08:00
|
|
|
return Tensor::TENSORFLOW;
|
|
|
|
}
|
|
|
|
return Tensor::CAFFE;
|
|
|
|
}
|
|
|
|
|
|
|
|
Tensor::HandleDataType Tensor::getHandleDataType() const {
|
|
|
|
if (halide_type_handle != mBuffer.type.code) {
|
|
|
|
return HANDLE_NONE;
|
|
|
|
}
|
|
|
|
return mDescribe->handleType;
|
|
|
|
}
|
|
|
|
void Tensor::setType(int type) {
|
|
|
|
switch (type) {
|
|
|
|
case DataType_DT_DOUBLE:
|
|
|
|
case DataType_DT_FLOAT:
|
|
|
|
mBuffer.type = halide_type_of<float>();
|
|
|
|
break;
|
|
|
|
case DataType_DT_BFLOAT16:
|
|
|
|
mBuffer.type = halide_type_t(halide_type_float, 16);
|
|
|
|
break;
|
|
|
|
case DataType_DT_QINT32:
|
|
|
|
case DataType_DT_INT32:
|
|
|
|
case DataType_DT_BOOL:
|
|
|
|
case DataType_DT_INT64:
|
2019-07-04 19:38:23 +08:00
|
|
|
mBuffer.type = halide_type_of<int32_t>();
|
2019-04-17 10:49:11 +08:00
|
|
|
break;
|
|
|
|
case DataType_DT_QINT8:
|
|
|
|
case DataType_DT_INT8:
|
|
|
|
mBuffer.type = halide_type_of<int8_t>();
|
|
|
|
break;
|
|
|
|
case DataType_DT_QUINT8:
|
|
|
|
case DataType_DT_UINT8:
|
|
|
|
mBuffer.type = halide_type_of<uint8_t>();
|
|
|
|
break;
|
|
|
|
case DataType_DT_QUINT16:
|
|
|
|
case DataType_DT_UINT16:
|
|
|
|
mBuffer.type = halide_type_of<uint16_t>();
|
|
|
|
break;
|
|
|
|
case DataType_DT_QINT16:
|
|
|
|
case DataType_DT_INT16:
|
|
|
|
mBuffer.type = halide_type_of<int16_t>();
|
|
|
|
break;
|
|
|
|
case DataType_DT_STRING:
|
|
|
|
mBuffer.type = halide_type_t(halide_type_handle, sizeof(void*) * 8);
|
|
|
|
mDescribe->handleType = HANDLE_STRING;
|
|
|
|
mDescribe->handleFreeFunction = (void (*)(void*))::free;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
MNN_PRINT("Unsupported data type!");
|
|
|
|
MNN_ASSERT(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<int> Tensor::shape() const {
|
|
|
|
std::vector<int> result;
|
|
|
|
for (int i = 0; i < mBuffer.dimensions; ++i) {
|
|
|
|
result.push_back(mBuffer.dim[i].extent);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
void Tensor::print() const {
|
|
|
|
// Do nothing
|
|
|
|
}
|
|
|
|
|
|
|
|
void Tensor::printShape() const {
|
|
|
|
// Do nothing
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
int Tensor::size() const {
|
2020-11-05 16:41:56 +08:00
|
|
|
auto dataSize = mBuffer.type.bytes();
|
2019-04-17 10:49:11 +08:00
|
|
|
MNN_ASSERT(dataSize >= 1);
|
|
|
|
for (int i = 0; i < this->buffer().dimensions; i++) {
|
|
|
|
int currentDimSize = mBuffer.dim[i].extent;
|
2019-08-22 20:13:46 +08:00
|
|
|
if (mDescribe->dimensionFormat == MNN_DATA_FORMAT_NC4HW4 && 1 == i) {
|
|
|
|
currentDimSize = ALIGN_UP4(currentDimSize);
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
dataSize *= currentDimSize;
|
|
|
|
}
|
|
|
|
return dataSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace MNN
|