2019-04-17 10:49:11 +08:00
|
|
|
//
|
|
|
|
// ShapeInterp.cpp
|
|
|
|
// MNN
|
|
|
|
//
|
|
|
|
// Created by MNN on 2019/01/10.
|
|
|
|
// Copyright © 2018, Alibaba Group Holding Limited
|
|
|
|
//
|
|
|
|
|
2019-12-27 22:16:57 +08:00
|
|
|
#include "core/Macro.h"
|
|
|
|
#include "core/SizeComputer.hpp"
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
namespace MNN {
|
|
|
|
|
|
|
|
// Size Computer
|
|
|
|
class InterpComputer : public SizeComputer {
|
|
|
|
virtual bool onComputeSize(const MNN::Op* op, const std::vector<Tensor*>& inputs,
|
|
|
|
const std::vector<Tensor*>& outputs) const override {
|
|
|
|
MNN_ASSERT(1 == inputs.size() || 2 == inputs.size());
|
|
|
|
MNN_ASSERT(1 == outputs.size());
|
|
|
|
|
|
|
|
auto& input = inputs[0]->buffer(); // input tensor(data)
|
|
|
|
auto& output = outputs[0]->buffer();
|
|
|
|
int w = 0;
|
|
|
|
int h = 0;
|
|
|
|
const int inputSize = (int)inputs.size();
|
|
|
|
// copy dims
|
|
|
|
memcpy(output.dim, input.dim, sizeof(halide_dimension_t) * input.dimensions);
|
|
|
|
if (1 == inputSize) {
|
|
|
|
auto interp = op->main_as_Interp();
|
|
|
|
// get output dims
|
|
|
|
w = interp->outputWidth();
|
|
|
|
h = interp->outputHeight();
|
|
|
|
if (w == 0 || h == 0) {
|
|
|
|
w = input.dim[3].extent * interp->widthScale();
|
|
|
|
h = input.dim[2].extent * interp->heightScale();
|
|
|
|
}
|
|
|
|
output.dim[3].extent = w;
|
|
|
|
output.dim[2].extent = h;
|
|
|
|
} else {
|
|
|
|
auto shape = inputs[1]; // input shape(shape)
|
|
|
|
MNN_ASSERT(2 == shape->buffer().dim[0].extent);
|
2020-03-17 15:04:56 +08:00
|
|
|
if (shape->getType().code == halide_type_float) {
|
|
|
|
const float *shapeData = shape->host<float>();
|
|
|
|
w = shapeData[1];
|
|
|
|
h = shapeData[0];
|
|
|
|
output.dim[3].extent = w;
|
|
|
|
output.dim[2].extent = h;
|
|
|
|
} else {
|
|
|
|
const int32_t *shapeData = shape->host<int32_t>();
|
|
|
|
w = shapeData[1];
|
|
|
|
h = shapeData[0];
|
|
|
|
output.dim[3].extent = w;
|
|
|
|
output.dim[2].extent = h;
|
|
|
|
}
|
2019-04-17 10:49:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (0 == w || 0 == h) {
|
|
|
|
return false;
|
|
|
|
}
|
2020-02-26 17:44:28 +08:00
|
|
|
outputs[0]->buffer().dimensions = inputs[0]->dimensions();
|
2020-01-15 13:33:47 +08:00
|
|
|
outputs[0]->buffer().type = inputs[0]->getType();
|
2019-08-22 20:13:46 +08:00
|
|
|
TensorUtils::getDescribe(outputs[0])->dimensionFormat = TensorUtils::getDescribe(inputs[0])->dimensionFormat;
|
2019-04-17 10:49:11 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
virtual float onComputeFlops(const MNN::Op* op, const std::vector<Tensor*>& inputs,
|
|
|
|
const std::vector<Tensor*>& outputs) const override {
|
|
|
|
auto elementInM = (float)outputs[0]->elementSize() / 1024.0f / 1024.0f;
|
|
|
|
auto interp = op->main_as_Interp();
|
|
|
|
auto unit = 0;
|
|
|
|
switch (interp->resizeType()) {
|
|
|
|
case 1:
|
|
|
|
unit = 1;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
unit = 4;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
unit = 16;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return unit * elementInM;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-08-22 20:13:46 +08:00
|
|
|
REGISTER_SHAPE_INPUTS(InterpComputer, OpType_Interp, {1});
|
2019-04-17 10:49:11 +08:00
|
|
|
} // namespace MNN
|