MNN/source/backend/cpu/CPUReduction.cpp

438 lines
16 KiB
C++
Raw Normal View History

2019-04-17 10:49:11 +08:00
//
// CPUReduction.cpp
// MNN
//
// Created by MNN on 2018/07/25.
// Copyright © 2018, Alibaba Group Holding Limited
//
2019-12-27 22:16:57 +08:00
#include "backend/cpu/CPUReduction.hpp"
#include "backend/cpu/compute/CommonOptFunction.h"
2020-02-26 09:57:17 +08:00
#include "backend/cpu/compute/ConvOpt.h"
#include "core/Concurrency.h"
2019-12-27 22:16:57 +08:00
#include "core/Macro.h"
#include <cmath>
#include <algorithm>
#include "core/OpCommonUtils.hpp"
2019-04-17 10:49:11 +08:00
#define UNIT 4
#define UNIT_DUP(value) \
{ (value), (value), (value), (value) }
namespace MNN {
// outside, axis, inside
2019-04-17 10:49:11 +08:00
class Reduction : public Execution {
public:
Reduction(Backend* backend, const Op* op) : Execution(backend) {
2020-11-05 16:41:56 +08:00
// Do nothing
mAxis = op->main_as_ReductionParam()->dim()->data()[0];
2019-04-17 10:49:11 +08:00
}
virtual ~Reduction() = default;
virtual ErrorCode onExecute(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs) override {
auto input = inputs[0];
auto output = outputs[0];
auto typeCode = input->getType().code;
auto src = inputs[0];
2020-11-05 16:41:56 +08:00
int outside = 1;
for(int i=0; i<mAxis; ++i) {
outside *= input->length(i);
2019-04-17 10:49:11 +08:00
}
2020-11-05 16:41:56 +08:00
int inside = 1;
for(int i=mAxis+1; i<input->dimensions(); ++i) {
inside *= input->length(i);
}
auto axis = input->length(mAxis);
auto dst = output;
//MNN_ASSERT(output->elementSize() == inside * outside);
if (halide_type_float == typeCode) {
this->onReduce(src->host<float>(), dst->host<float>(), inside, outside, axis);
} else if (halide_type_int == typeCode) {
this->onReduce(src->host<int32_t>(), dst->host<int32_t>(), inside, outside, axis);
2019-04-17 10:49:11 +08:00
}
return NO_ERROR;
}
protected:
virtual void onReduce(const float* src, float* dst, int inside, int outside, int axis) const = 0;
virtual void onReduce(const int32_t* src, int32_t* dst, int inside, int outsize, int axis) const = 0;
2020-11-05 16:41:56 +08:00
private:
int mAxis = -1;
2019-04-17 10:49:11 +08:00
};
class MeanReduce : public Reduction {
public:
MeanReduce(Backend* backend, const Op* op) : Reduction(backend, op) {
// nothing to do
}
virtual ~MeanReduce() = default;
protected:
virtual void onReduce(const float* src, float* dst, int inside, int outside, int axisSize) const override {
2020-02-26 09:57:17 +08:00
auto numberThread = ((CPUBackend*)backend())->threadNumber();
MNN_CONCURRENCY_BEGIN(tId, numberThread) {
for (int oi = tId; oi < outside; oi+=numberThread) {
auto srcOutSide = src + oi * axisSize * inside;
auto dstOutSide = dst + oi * inside;
if (inside % 4 == 0) {
::memcpy(dstOutSide, srcOutSide, inside * sizeof(float));
for (int a = 1; a < axisSize; ++a) {
auto srcAxis = srcOutSide + a * inside;
MNNMatrixAddCommon(dstOutSide, dstOutSide, srcAxis, inside, 0, 0, 0, 1);
}
float divide = 1.0f / (float)axisSize;
for (int i=0; i<inside; ++i) {
dstOutSide[i] = dstOutSide[i] * divide;
}
} else {
for (int ii = 0; ii < inside; ++ii) {
auto srcInside = srcOutSide + ii;
auto dstInside = dstOutSide + ii;
float summer = 0.0f;
for (int a = 0; a < axisSize; ++a) {
summer += srcInside[a * inside];
}
*dstInside = summer / (float)axisSize;
}
2019-04-17 10:49:11 +08:00
}
}
}
2020-02-26 09:57:17 +08:00
MNN_CONCURRENCY_END();
2019-04-17 10:49:11 +08:00
}
virtual void onReduce(const int32_t* src, int32_t* dst, int inside, int outside, int axisSize) const override {
for (int oi = 0; oi < outside; ++oi) {
auto srcOutSide = src + oi * axisSize * inside;
auto dstOutSide = dst + oi * inside;
for (int ii = 0; ii < inside; ++ii) {
auto srcInside = srcOutSide + ii;
auto dstInside = dstOutSide + ii;
int32_t summer = 0;
for (int a = 0; a < axisSize; ++a) {
summer += srcInside[a * inside];
}
*dstInside = summer / axisSize;
}
}
}
};
class SumReduce : public Reduction {
public:
SumReduce(Backend* backend, const Op* op) : Reduction(backend, op) {
// nothing to do
}
virtual ~SumReduce() = default;
protected:
virtual void onReduce(const float* src, float* dst, int inside, int outside, int axisSize) const override {
2020-02-26 09:57:17 +08:00
auto numberThread = ((CPUBackend*)backend())->threadNumber();
2023-10-18 10:31:02 +08:00
auto core = static_cast<CPUBackend*>(backend())->functions();
2020-02-26 09:57:17 +08:00
MNN_CONCURRENCY_BEGIN(tId, numberThread) {
for (int oi = tId; oi < outside; oi+=numberThread) {
auto srcOutSide = src + oi * axisSize * inside;
auto dstOutSide = dst + oi * inside;
if (inside % 4 == 0) {
::memcpy(dstOutSide, srcOutSide, inside * sizeof(float));
for (int a = 1; a < axisSize; ++a) {
auto srcAxis = srcOutSide + a * inside;
MNNMatrixAddCommon(dstOutSide, dstOutSide, srcAxis, inside, 0, 0, 0, 1);
}
} else {
for (int ii = 0; ii < inside; ++ii) {
auto srcInside = srcOutSide + ii;
auto dstInside = dstOutSide + ii;
float summer = 0.0f;
2023-10-18 10:31:02 +08:00
if (inside == 1) {
core->MNNAccumulateSequenceNumber(&summer, srcInside, axisSize);
} else {
for (int a = 0; a < axisSize; ++a) {
summer += srcInside[a * inside];
}
2020-02-26 09:57:17 +08:00
}
*dstInside = summer;
}
2019-04-17 10:49:11 +08:00
}
}
}
2020-02-26 09:57:17 +08:00
MNN_CONCURRENCY_END();
2019-04-17 10:49:11 +08:00
}
virtual void onReduce(const int32_t* src, int32_t* dst, int inside, int outside, int axisSize) const override {
for (int oi = 0; oi < outside; ++oi) {
auto srcOutSide = src + oi * axisSize * inside;
auto dstOutSide = dst + oi * inside;
for (int ii = 0; ii < inside; ++ii) {
auto srcInside = srcOutSide + ii;
auto dstInside = dstOutSide + ii;
int32_t summer = 0;
for (int a = 0; a < axisSize; ++a) {
summer += srcInside[a * inside];
}
*dstInside = summer;
}
}
}
};
class MinReduce : public Reduction {
public:
MinReduce(Backend* backend, const Op* op) : Reduction(backend, op) {
// nothing to do
}
virtual ~MinReduce() = default;
protected:
virtual void onReduce(const float* src, float* dst, int inside, int outside, int axisSize) const override {
for (int oi = 0; oi < outside; ++oi) {
auto srcOutSide = src + oi * axisSize * inside;
auto dstOutSide = dst + oi * inside;
for (int ii = 0; ii < inside; ++ii) {
auto srcInside = srcOutSide + ii;
auto dstInside = dstOutSide + ii;
float Min = srcInside[0];
if (1 == inside) {
int32_t inputCountUnit = axisSize / (UNIT * 2);
int32_t remain = axisSize - (inputCountUnit * UNIT * 2);
float minArray[UNIT] = UNIT_DUP(Min);
MNNMinFloat((float*)srcInside, minArray, inputCountUnit);
for (int i = 0; i < UNIT; i++) {
Min = std::min(Min, minArray[i]);
}
if (remain > 0) {
int currentIndex = inputCountUnit * UNIT * 2;
for (int i = 0; i < remain; i++) {
float currentInputData = srcInside[currentIndex + i];
Min = std::min(Min, currentInputData);
}
}
} else {
for (int a = 0; a < axisSize; ++a) {
Min = std::min(Min, srcInside[a * inside]);
}
}
*dstInside = Min;
}
}
}
virtual void onReduce(const int32_t* src, int32_t* dst, int inside, int outside, int axisSize) const override {
for (int oi = 0; oi < outside; ++oi) {
auto srcOutSide = src + oi * axisSize * inside;
auto dstOutSide = dst + oi * inside;
for (int ii = 0; ii < inside; ++ii) {
auto srcInside = srcOutSide + ii;
auto dstInside = dstOutSide + ii;
int32_t Min = srcInside[0];
for (int a = 0; a < axisSize; ++a) {
Min = std::min(Min, srcInside[a * inside]);
}
*dstInside = Min;
}
}
}
};
class MaxReduce : public Reduction {
public:
MaxReduce(Backend* backend, const Op* op) : Reduction(backend, op) {
// nothing to do
}
virtual ~MaxReduce() = default;
protected:
virtual void onReduce(const float* src, float* dst, int inside, int outside, int axisSize) const override {
for (int oi = 0; oi < outside; ++oi) {
auto srcOutSide = src + oi * axisSize * inside;
auto dstOutSide = dst + oi * inside;
for (int ii = 0; ii < inside; ++ii) {
auto srcInside = srcOutSide + ii;
auto dstInside = dstOutSide + ii;
float Max = srcInside[0];
if (1 == inside) {
int32_t inputCountUnit = axisSize / (UNIT * 2);
int32_t remain = axisSize - (inputCountUnit * UNIT * 2);
float maxArray[UNIT] = UNIT_DUP(Max);
MNNMaxFloat((float*)srcInside, maxArray, inputCountUnit);
for (int i = 0; i < UNIT; i++) {
Max = std::max(Max, maxArray[i]);
}
if (remain > 0) {
int currentIndex = inputCountUnit * UNIT * 2;
for (int i = 0; i < remain; i++) {
float currentInputData = srcInside[currentIndex + i];
Max = std::max(Max, currentInputData);
}
}
} else {
for (int a = 0; a < axisSize; ++a) {
Max = std::max(Max, srcInside[a * inside]);
}
}
*dstInside = Max;
}
}
}
virtual void onReduce(const int32_t* src, int32_t* dst, int inside, int outside, int axisSize) const override {
for (int oi = 0; oi < outside; ++oi) {
auto srcOutSide = src + oi * axisSize * inside;
auto dstOutSide = dst + oi * inside;
for (int ii = 0; ii < inside; ++ii) {
auto srcInside = srcOutSide + ii;
auto dstInside = dstOutSide + ii;
int32_t Max = srcInside[0];
for (int a = 0; a < axisSize; ++a) {
Max = std::max(Max, srcInside[a * inside]);
}
*dstInside = Max;
}
}
}
};
class ProdReduce : public Reduction {
public:
ProdReduce(Backend* backend, const Op* op) : Reduction(backend, op) {
// nothing to do
}
virtual ~ProdReduce() = default;
protected:
virtual void onReduce(const float* src, float* dst, int inside, int outside, int axisSize) const override {
for (int oi = 0; oi < outside; ++oi) {
auto srcOutSide = src + oi * axisSize * inside;
auto dstOutSide = dst + oi * inside;
for (int ii = 0; ii < inside; ++ii) {
auto srcInside = srcOutSide + ii;
auto dstInside = dstOutSide + ii;
float product = 1.0f;
for (int a = 0; a < axisSize; ++a) {
product *= srcInside[a * inside];
}
*dstInside = product;
}
}
}
virtual void onReduce(const int32_t* src, int32_t* dst, int inside, int outside, int axisSize) const override {
for (int oi = 0; oi < outside; ++oi) {
auto srcOutSide = src + oi * axisSize * inside;
auto dstOutSide = dst + oi * inside;
for (int ii = 0; ii < inside; ++ii) {
auto srcInside = srcOutSide + ii;
auto dstInside = dstOutSide + ii;
int32_t product = 1;
for (int a = 0; a < axisSize; ++a) {
product *= srcInside[a * inside];
}
*dstInside = product;
}
}
}
};
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
class AnyReduce : public Reduction {
public:
AnyReduce(Backend* backend, const Op* op) : Reduction(backend, op) {
// nothing to do
}
virtual ~ AnyReduce() = default;
protected:
virtual void onReduce(const float* src, float* dst, int inside, int outside, int axisSize) const override {
MNN_ASSERT(false);
}
2019-12-27 22:16:57 +08:00
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
virtual void onReduce(const int32_t* src, int32_t* dst, int inside, int outside, int axisSize) const override {
for (int oi = 0; oi < outside; ++oi) {
auto srcOutSide = src + oi * axisSize * inside;
auto dstOutSide = dst + oi * inside;
for (int ii = 0; ii < inside; ++ii) {
auto srcInside = srcOutSide + ii;
auto dstInside = dstOutSide + ii;
int32_t result = 0;
for (int a = 0; a < axisSize; ++a) {
if (srcInside[a * inside] > 0) {
result = 1;
break;
}
}
*dstInside = result;
}
}
}
};
class AllReduce : public Reduction {
public:
AllReduce(Backend* backend, const Op* op) : Reduction(backend, op) {
// nothing to do
}
virtual ~ AllReduce() = default;
protected:
virtual void onReduce(const float* src, float* dst, int inside, int outside, int axisSize) const override {
MNN_ASSERT(false);
}
2019-12-27 22:16:57 +08:00
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
virtual void onReduce(const int32_t* src, int32_t* dst, int inside, int outside, int axisSize) const override {
for (int oi = 0; oi < outside; ++oi) {
auto srcOutSide = src + oi * axisSize * inside;
auto dstOutSide = dst + oi * inside;
for (int ii = 0; ii < inside; ++ii) {
auto srcInside = srcOutSide + ii;
auto dstInside = dstOutSide + ii;
int32_t result = 1;
for (int a = 0; a < axisSize; ++a) {
if (srcInside[a * inside] == 0) {
result = 0;
break;
}
}
*dstInside = result;
}
}
}
};
2019-04-17 10:49:11 +08:00
Execution* CPUReductionCreator::onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const {
return create(inputs, outputs, op, backend);
}
Execution* CPUReductionCreator::create(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) {
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
auto type = inputs[0]->getType();
if (type.bits != 32) {
return nullptr;
}
if (type.code != halide_type_float && type.code != halide_type_int) {
return nullptr;
}
2019-04-17 10:49:11 +08:00
switch (op->main_as_ReductionParam()->operation()) {
case ReductionType_MEAN:
return new MeanReduce(backend, op);
case ReductionType_SUM:
return new SumReduce(backend, op);
case ReductionType_MINIMUM:
return new MinReduce(backend, op);
case ReductionType_MAXIMUM:
return new MaxReduce(backend, op);
case ReductionType_PROD:
return new ProdReduce(backend, op);
- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
case ReductionType_ANY:
return new AnyReduce(backend, op);
case ReductionType_ALL:
return new AllReduce(backend, op);
2019-04-17 10:49:11 +08:00
default:
MNN_ASSERT(false);
break;
}
return nullptr;
}
REGISTER_CPU_OP_CREATOR(CPUReductionCreator, OpType_Reduction);
} // namespace MNN