- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
//
|
|
|
|
// ReverseSequenceTest.cpp
|
|
|
|
// MNNTests
|
|
|
|
//
|
|
|
|
// Created by MNN on 2019/08/31.
|
|
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
|
|
//
|
|
|
|
|
2019-12-27 22:16:57 +08:00
|
|
|
#include <MNN/expr/ExprCreator.hpp>
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
#include "MNNTestSuite.h"
|
|
|
|
|
|
|
|
using namespace MNN::Express;
|
|
|
|
|
|
|
|
class ReverseSequenceTest : public MNNTestCase {
|
|
|
|
public:
|
2021-06-11 17:17:13 +08:00
|
|
|
virtual bool run(int precision) {
|
2021-02-07 10:45:07 +08:00
|
|
|
// high dimension, batch_dim ahead
|
2025-07-23 14:10:58 +08:00
|
|
|
float threshold = 0.0001;
|
|
|
|
if (precision == 2) {
|
|
|
|
threshold = 0.01;
|
|
|
|
}
|
2024-08-24 15:46:21 +08:00
|
|
|
|
2021-02-07 10:45:07 +08:00
|
|
|
{
|
|
|
|
auto y = _Input({4}, NHWC, halide_type_of<int32_t>());
|
|
|
|
std::vector<int> seq = {7, 2, 3, 5};
|
|
|
|
auto yPtr = y->writeMap<int32_t>();
|
|
|
|
::memcpy(yPtr, seq.data(), seq.size() * sizeof(int32_t));
|
|
|
|
auto x = _Input({6, 4, 7, 10, 8}, NHWC, halide_type_of<float>());
|
|
|
|
auto xPtr = x->writeMap<float>();
|
|
|
|
for (int o = 0; o < 6; ++o) {
|
|
|
|
for (int i = 0; i < 4; ++i) {
|
|
|
|
for (int m = 0; m < 7; ++m) {
|
|
|
|
for (int j = 0; j < 10; ++j) {
|
|
|
|
for (int k = 0; k < 8; ++k) {
|
2025-07-23 14:10:58 +08:00
|
|
|
xPtr[2240 * o + 560 * i + 80 * m + 8 * j + k] = 0.1 * o + i + m + j + 0.2 * k;
|
2021-02-07 10:45:07 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-07 10:45:07 +08:00
|
|
|
auto ry = _ReverseSequence(x, y, 1, 3);
|
|
|
|
auto ryPtr = ry->readMap<float>();
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2025-07-23 14:10:58 +08:00
|
|
|
auto func_equal = [threshold](float a, float b) -> bool {
|
|
|
|
if (a - b > threshold || a - b < -threshold) {
|
2021-02-07 10:45:07 +08:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2021-02-07 10:45:07 +08:00
|
|
|
int count = 0;
|
|
|
|
for (int o = 0; o < 6; ++o) {
|
|
|
|
for (int i = 0; i < 4; ++i) {
|
|
|
|
auto req = seq[i];
|
|
|
|
for (int m = 0; m < 7; ++m) {
|
|
|
|
for (int j = 0; j < 10; ++j) {
|
|
|
|
for (int k = 0; k < 8; ++k) {
|
|
|
|
float compute = ryPtr[2240 * o + 560 * i + 80 * m + 8 * j + k];
|
2025-07-23 14:10:58 +08:00
|
|
|
float need = 0.1 * o + i + m + j + 0.2 * k;
|
2021-02-07 10:45:07 +08:00
|
|
|
if (j < req) {
|
2025-07-23 14:10:58 +08:00
|
|
|
need = 0.1 * o + i + m + (req - j - 1) + 0.2 * k;
|
2021-02-07 10:45:07 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2021-02-07 10:45:07 +08:00
|
|
|
if (!func_equal(need, compute)) {
|
2024-08-24 15:46:21 +08:00
|
|
|
MNN_PRINT("case 1 error\n");
|
2021-02-07 10:45:07 +08:00
|
|
|
return false;
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-08-24 15:46:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
{ // test SizeComputer::needInputContent
|
|
|
|
int dim0 = 1, dim1 = 6, dim2 = 7, dim3 = 10, dim4 = 8;
|
|
|
|
auto x = _Input({dim0, dim1, dim2, dim3, dim4}, NHWC, halide_type_of<float>());
|
|
|
|
auto x_transpose = _Transpose(x, {1, 0, 2, 3, 4});
|
|
|
|
auto x_shape = _Shape(x_transpose, NHWC);
|
|
|
|
int ii[]= {1};
|
|
|
|
auto x_gather = _Gather(x_shape, _Const(ii, {1}, NCHW, halide_type_of<int>()));
|
|
|
|
auto ry = _ReverseSequence(x_transpose, x_gather, 1, 3);
|
|
|
|
auto xPtr = x->writeMap<float>();
|
|
|
|
|
|
|
|
for (int i = 0; i < dim0 * dim1 * dim2 * dim3 * dim4; ++i) {
|
|
|
|
xPtr[i] = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto ryPtr = ry->readMap<float>();
|
|
|
|
|
|
|
|
if (ryPtr == nullptr) {
|
|
|
|
MNN_PRINT("case 2 error\n");
|
|
|
|
return false;
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
2021-06-11 17:17:13 +08:00
|
|
|
|
2021-02-07 10:45:07 +08:00
|
|
|
// high dimension, seq_dim ahead
|
|
|
|
{
|
|
|
|
auto y = _Input({4}, NHWC, halide_type_of<int32_t>());
|
|
|
|
std::vector<int> seq = {7, 2, 3, 5};
|
|
|
|
auto yPtr = y->writeMap<int32_t>();
|
|
|
|
::memcpy(yPtr, seq.data(), seq.size() * sizeof(int32_t));
|
|
|
|
auto x = _Input({6, 10, 7, 4, 8}, NHWC, halide_type_of<float>());
|
|
|
|
auto xPtr = x->writeMap<float>();
|
|
|
|
for (int o = 0; o < 6; ++o) {
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
for (int m = 0; m < 7; ++m) {
|
|
|
|
for (int j = 0; j < 4; ++j) {
|
|
|
|
for (int k = 0; k < 8; ++k) {
|
2025-07-23 14:10:58 +08:00
|
|
|
xPtr[2240 * o + 224 * i + 32 * m + 8 * j + k] = 0.1 * o + i + m + j + 0.2 * k;
|
2021-02-07 10:45:07 +08:00
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-07 10:45:07 +08:00
|
|
|
auto ry = _ReverseSequence(x, y, 3, 1);
|
|
|
|
auto ryPtr = ry->readMap<float>();
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2025-07-23 14:10:58 +08:00
|
|
|
auto func_equal = [threshold](float a, float b) -> bool {
|
|
|
|
if (a - b > threshold || a - b < (-1 * threshold)) {
|
2021-02-07 10:45:07 +08:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2021-02-07 10:45:07 +08:00
|
|
|
int count = 0;
|
|
|
|
for (int o = 0; o < 6; ++o) {
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
for (int m = 0; m < 7; ++m) {
|
|
|
|
for (int j = 0; j < 4; ++j) {
|
|
|
|
auto req = seq[j];
|
|
|
|
for (int k = 0; k < 8; ++k) {
|
|
|
|
auto compute = ryPtr[2240 * o + 224 * i + 32 * m + 8 * j + k];
|
2025-07-23 14:10:58 +08:00
|
|
|
auto need = 0.1 * o + i + m + j + 0.2 * k;
|
2021-02-07 10:45:07 +08:00
|
|
|
if (i < req) {
|
2025-07-23 14:10:58 +08:00
|
|
|
need = 0.1 * o + (req - i - 1) + m + j + 0.2 * k;
|
2021-02-07 10:45:07 +08:00
|
|
|
}
|
|
|
|
if (!func_equal(need, compute)) {
|
2024-08-24 15:46:21 +08:00
|
|
|
MNN_PRINT("case 3 error\n");
|
2021-02-07 10:45:07 +08:00
|
|
|
return false;
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-06-11 17:17:13 +08:00
|
|
|
|
2021-02-07 10:45:07 +08:00
|
|
|
// 3 dimension
|
|
|
|
{
|
|
|
|
auto y = _Input({4}, NHWC, halide_type_of<int32_t>());
|
|
|
|
std::vector<int> seq = {7, 2, 3, 5};
|
|
|
|
auto yPtr = y->writeMap<int32_t>();
|
|
|
|
::memcpy(yPtr, seq.data(), seq.size() * sizeof(int32_t));
|
|
|
|
auto x = _Input({10, 4, 8}, NHWC, halide_type_of<float>());
|
|
|
|
auto xPtr = x->writeMap<float>();
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
for (int j = 0; j < 4; ++j) {
|
|
|
|
for (int k = 0; k < 8; ++k) {
|
2025-07-23 14:10:58 +08:00
|
|
|
xPtr[32 * i + 8 * j + k] = 0.1 * i + j + k;
|
2021-02-07 10:45:07 +08:00
|
|
|
}
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
}
|
|
|
|
}
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2021-02-07 10:45:07 +08:00
|
|
|
auto ry = _ReverseSequence(x, y, 1, 0);
|
|
|
|
auto ryPtr = ry->readMap<float>();
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2025-07-23 14:10:58 +08:00
|
|
|
auto func_equal = [threshold](float a, float b) -> bool {
|
|
|
|
if (a - b > threshold || a - b < (-1 * threshold)) {
|
2021-02-07 10:45:07 +08:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
2020-11-05 16:41:56 +08:00
|
|
|
|
2021-02-07 10:45:07 +08:00
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
for (int j = 0; j < 4; ++j) {
|
|
|
|
auto req = seq[j];
|
|
|
|
for (int k = 0; k < 8; ++k) {
|
|
|
|
auto compute = ryPtr[32 * i + 8 * j + k];
|
2025-07-23 14:10:58 +08:00
|
|
|
auto need = 0.1 * i + j + k;
|
2021-02-07 10:45:07 +08:00
|
|
|
if (i < req) {
|
2025-07-23 14:10:58 +08:00
|
|
|
need = 0.1 * (req - i - 1) + j + k;
|
2021-02-07 10:45:07 +08:00
|
|
|
}
|
|
|
|
if (!func_equal(need, compute)) {
|
2024-08-24 15:46:21 +08:00
|
|
|
MNN_PRINT("case 4 error\n");
|
2021-02-07 10:45:07 +08:00
|
|
|
return false;
|
|
|
|
}
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-08-24 15:46:21 +08:00
|
|
|
return true;
|
- dynamic computation graph (beta)
- add supports (/express)
- add tests
- add benchmarks with it (/benchmark/exprModels)
- Python
- MNN engine and tools were submitted to pip
- available on Windows/macOS/Linux
- Engine/Converter
- add supports for each op benchmarking
- refactor optimizer by separating steps
- CPU
- add supports for Conv3D, Pool3D, ELU, ReverseSequence
- fix ArgMax, Permute, Scale, BinaryOp, Slice, SliceTf
- OpenCL
- add half transform in CPU
- add broadcast supports for binary
- optimize Conv2D, Reshape, Eltwise, Gemm, etc.
- OpenGL
- add sub, real div supports for binary
- add supports for unary
- optimize Conv2D, Reshape
- Vulkan
- add max supports for eltwise
- Metal
- fix metallib missing problem
- Train/Quantization
- use express to refactor training codes
2019-09-26 21:02:07 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
MNNTestSuiteRegister(ReverseSequenceTest, "expr/ReverseSequence");
|