MNN/test/expr/PaddingTest.cpp

127 lines
4.0 KiB
C++
Raw Normal View History

- build: - unify schema building in core and converter; - add more build script for android; - add linux build script for python; - ops impl: - add floor mod support in binary; - use eltwise impl in add/max/sub/mul binary for optimization; - remove fake double support in cast; - fix 5d support for concat; - add adjX and adjY support for batch matmul; - optimize conv2d back prop filter; - add pad mode support for conv3d; - fix bug in conv2d & conv depthwise with very small feature map; - optimize binary without broacast; - add data types support for gather; - add gather ND support; - use uint8 data type in gather v2; - add transpose support for matmul; - add matrix band part; - add dim != 4 support for padding, reshape & tensor convert; - add pad type support for pool3d; - make ops based on TensorFlow Lite quantization optional; - add all & any support for reduction; - use type in parameter as output type in reduction; - add int support for unary; - add variable weight support for conv2d; - fix conv2d depthwise weights initialization; - fix type support for transpose; - fix grad outputs count for reduce grad and reshape grad; - fix priorbox & detection output; - fix metal softmax error; - python: - add runSessionWithCallBackInfo interface; - add max nodes limit (1400) for visualization tool; - fix save error in python3; - align default dim; - convert: - add extra design for optimization; - add more post converting optimizers; - add caffe v1 weights blob support; - add cast, unary, conv transpose support for onnx model; - optimize batchnorm, conv with variable weights, prelu, reshape, slice, upsample for onnx model; - add cos/sin/atan/tan support for unary for tensorflow model; - add any/all support for reduction for tensorflow model; - add elu, conv3d, pool3d support for tensorflow model; - optimize argmax, batchnorm, concat, batch to space, conv with variable weights, prelu, slice for tensorflow model; - others: - fix size computer lock; - fix thread pool deadlock; - add express & parameters in express; - rewrite blitter chooser without static map; - add tests for expr;
2019-10-29 13:37:26 +08:00
//
// PaddingTest.cpp
// MNNTests
//
// Created by MNN on 2019/09/10.
// Copyright © 2018, Alibaba Group Holding Limited
//
#include "ExprCreator.hpp"
#include "MNNTestSuite.h"
#include "MNN_generated.h"
using namespace MNN::Express;
using namespace MNN;
static void fillVar(VARP x) {
auto size = x->getInfo()->size;
auto ptr = x->writeMap<int32_t>();
for (int i=0; i<size; ++i) {
ptr[i] = i + 1;
}
}
static void printVar(VARP x) {
auto size = x->getInfo()->size;
auto ptr = x->readMap<int32_t>();
for (int i=0; i<size; ++i) {
MNN_PRINT("%d, ", ptr[i]);
}
MNN_PRINT("\n");
}
class PaddingTest : public MNNTestCase {
public:
virtual bool run() {
std::unique_ptr<OpT> padding(new OpT);
padding->type = OpType_Padding;
{
auto x = _Input({4, 6}, NCHW, halide_type_of<int32_t>());
auto pad = _Input({4}, NCHW, halide_type_of<int32_t>());
auto paddingPtr = pad->writeMap<int32_t>();
paddingPtr[0] = 0;
paddingPtr[1] = 1;
paddingPtr[2] = 1;
paddingPtr[3] = 1;
fillVar(x);
auto y = Variable::create(Expr::create(padding.get(), {x, pad}));
{
auto size = y->getInfo()->dim;
auto ptr = y->readMap<int32_t>();
for (int i=0; i<size[0]; ++i) {
for (int j=0; j<size[1]; ++j) {
auto compute = ptr[i*8+j];
auto expect = i*6 + (j-1) + 1;
if (i >= 4 || j < 1 || j >= 7) {
expect = 0;
}
if (compute != expect) {
FUNC_PRINT(1);
return false;
}
}
}
}
}
{
auto x = _Input({1, 3, 4, 6}, NCHW, halide_type_of<int32_t>());
auto convert = _Convert(x, NC4HW4);
auto pad = _Input({8}, NCHW, halide_type_of<int32_t>());
auto paddingPtr = pad->writeMap<int32_t>();
paddingPtr[0] = 0;
paddingPtr[1] = 1;
paddingPtr[2] = 0;
paddingPtr[3] = 0;
paddingPtr[4] = 1;
paddingPtr[5] = 1;
paddingPtr[6] = 1;
paddingPtr[7] = 1;
fillVar(x);
auto y = Variable::create(Expr::create(padding.get(), {x, pad}));
auto yC4 = _Convert(Variable::create(Expr::create(padding.get(), {convert, pad})), NCHW);
{
auto info = y->getInfo();
auto info2 = yC4->getInfo();
if(info->size != info2->size) {
FUNC_PRINT(1);
return false;
}
auto ptr0 = y->readMap<int32_t>();
auto ptr1 = yC4->readMap<int32_t>();
for (int i=0; i<info->size; ++i) {
if (ptr0[i] != ptr1[i]) {
FUNC_PRINT(1);
return false;
}
}
}
paddingPtr = pad->writeMap<int32_t>();
paddingPtr[0] = 0;
paddingPtr[1] = 1;
paddingPtr[2] = 0;
paddingPtr[3] = 1;
paddingPtr[4] = 1;
paddingPtr[5] = 1;
paddingPtr[6] = 1;
paddingPtr[7] = 1;
{
auto info = y->getInfo();
auto info2 = yC4->getInfo();
if(info->size != info2->size) {
FUNC_PRINT(1);
return false;
}
auto ptr0 = y->readMap<int32_t>();
auto ptr1 = yC4->readMap<int32_t>();
for (int i=0; i<info->size; ++i) {
if (ptr0[i] != ptr1[i]) {
FUNC_PRINT(1);
return false;
}
}
}
}
return true;
}
};
MNNTestSuiteRegister(PaddingTest, "expr/Padding");