Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions modules/dnn/src/net_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1400,6 +1400,7 @@ void Net::Impl::setInput(InputArray blob, const String& name, double scalefactor
Mat blob_ = blob.getMat(); // can't use InputArray directly due MatExpr stuff
MatShape blobShape = shape(blob_);

#if 0 // TODO: DNNTestNetwork.MobileNet_SSD_Caffe_Different_Width_Height/0
if (pin.lid == 0)
{
CV_Assert(!netInputLayer.empty());
Expand All @@ -1411,18 +1412,17 @@ void Net::Impl::setInput(InputArray blob, const String& name, double scalefactor
if (!inputShapeLimitation.empty())
{
CV_CheckEQ(inputShapeLimitation.size(), blobShape.size(), "");
#if 0 // TODO: DNNTestNetwork.MobileNet_SSD_Caffe_Different_Width_Height/0
const size_t dims = inputShapeLimitation.size();
for (size_t dim = 0; dim < dims; dim++)
{
if (dims >= 3 && dim == 0 && inputShapeLimitation[0] == 1)
continue; // don't limit batch
CV_CheckEQ(inputShapeLimitation[dim], blobShape[dim], "");
}
#endif
}
}
}
#endif

LayerData& ld = layers[pin.lid];
const int numInputs = std::max(pin.oid + 1, (int)ld.requiredOutputs.size());
Expand Down
5 changes: 5 additions & 0 deletions modules/dnn/src/onnx/onnx_importer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -891,6 +891,11 @@ void ONNXImporter::populateNet()
}

dstNet.setInputsNames(netInputs);
if (!hasDynamicShapes)
{
for (int i = 0; i < netInputs.size(); ++i)
dstNet.setInputShape(netInputs[i], outShapes[netInputs[i]]);
}

// dump outputs
for (int i = 0; i < graph_proto.output_size(); ++i)
Expand Down
19 changes: 19 additions & 0 deletions modules/dnn/src/tflite/tflite_importer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,8 @@ void TFLiteImporter::populateNet()
CV_Assert(modelTensors);
layouts.resize(modelTensors->size(), DATA_LAYOUT_UNKNOWN);
size_t subgraph_inputs_size = subgraph_inputs->size();
std::vector<std::string> inputsNames(subgraph_inputs_size);
std::vector<MatShape> inputsShapes(subgraph_inputs_size);
for (size_t i = 0; i < subgraph_inputs_size; ++i)
{
int idx = subgraph_inputs->Get(i);
Expand All @@ -171,7 +173,24 @@ void TFLiteImporter::populateNet()
if (!tensor)
CV_Error(Error::StsError, cv::format("DNN/TFLite: subgraph input %d (%d) is NULL", (int)i, idx));
layouts[idx] = estimateLayout(*tensor);

// Keep info about origin inputs names and shapes
inputsNames[i] = tensor->name()->str();
std::vector<int> shape(tensor->shape()->begin(), tensor->shape()->end());
if (layouts[idx] == DATA_LAYOUT_NHWC) {
CV_CheckEQ(shape.size(), (size_t)4, "");
std::swap(shape[2], shape[3]);
std::swap(shape[1], shape[2]);
}
inputsShapes[i] = shape;
}

dstNet.setInputsNames(inputsNames);
for (size_t i = 0; i < subgraph_inputs_size; ++i)
{
dstNet.setInputShape(inputsNames[i], inputsShapes[i]);
}

const auto& all_operators = *subgraph_operators;
const size_t all_operators_size = all_operators.size();
for (size_t op_idx = 0; op_idx < all_operators_size; ++op_idx)
Expand Down
25 changes: 25 additions & 0 deletions modules/dnn/test/test_onnx_importer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,27 @@ class Test_ONNX_layers : public DNNTestLayer
pb
};

void testInputShapes(const Net& net, const std::vector<Mat>& inps)
{
std::vector<MatShape> inLayerShapes;
std::vector<MatShape> outLayerShapes;
net.getLayerShapes(MatShape(), 0, inLayerShapes, outLayerShapes);
ASSERT_EQ(inLayerShapes.size(), inps.size());

for (int i = 0; i < inps.size(); ++i) {
bool hasDynamicShapes = inLayerShapes[i].empty();
if (hasDynamicShapes)
continue;
if (inLayerShapes[i].size() == 1) { // 1D input
ASSERT_EQ(shape(inLayerShapes[i][0], 1), shape(inps[i]));
} else {
// Compare all axes except batch dimension which is variable.
inLayerShapes[i][0] = inps[i].size[0];
ASSERT_EQ(inLayerShapes[i], shape(inps[i]));
}
}
}

void testONNXModels(const String& basename, const Extension ext = npy,
const double l1 = 0, const float lInf = 0, const bool useSoftmax = false,
bool checkNoFallbacks = true, int numInps = 1)
Expand All @@ -54,6 +75,8 @@ class Test_ONNX_layers : public DNNTestLayer
Net net = readNetFromONNX(onnxmodel);
ASSERT_FALSE(net.empty());

testInputShapes(net, inps);

net.setPreferableBackend(backend);
net.setPreferableTarget(target);

Expand Down Expand Up @@ -2315,6 +2338,8 @@ TEST_P(Test_ONNX_nets, Resnet34_kinetics)
lInf = 0.06;
}

testInputShapes(net, {input0});

checkBackend(&input0, &ref0);
net.setInput(input0);
Mat out = net.forward().clone();
Expand Down
14 changes: 14 additions & 0 deletions modules/dnn/test/test_tflite_importer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ Test for TFLite models loading

#include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS
#include <opencv2/dnn/utils/debug_utils.hpp>
#include <opencv2/dnn/shape_utils.hpp>

#ifdef OPENCV_TEST_DNN_TFLITE

Expand All @@ -19,9 +20,21 @@ namespace opencv_test { namespace {
using namespace cv;
using namespace cv::dnn;

void testInputShapes(const Net& net, const std::vector<Mat>& inps) {
std::vector<MatShape> inLayerShapes;
std::vector<MatShape> outLayerShapes;
net.getLayerShapes(MatShape(), 0, inLayerShapes, outLayerShapes);
ASSERT_EQ(inLayerShapes.size(), inps.size());

for (int i = 0; i < inps.size(); ++i) {
ASSERT_EQ(inLayerShapes[i], shape(inps[i]));
}
}

void testModel(const std::string& modelName, const Mat& input, double l1 = 1e-5, double lInf = 1e-4)
{
Net net = readNet(findDataFile("dnn/tflite/" + modelName + ".tflite", false));
testInputShapes(net, {input});
net.setInput(input);

std::vector<String> outNames = net.getUnconnectedOutLayersNames();
Expand Down Expand Up @@ -72,6 +85,7 @@ TEST(Test_TFLite, max_unpooling)
cvtColor(input, input, COLOR_BGR2RGBA);
input = input.mul(Scalar(1, 1, 1, 0));
input = blobFromImage(input, 1.0 / 255);
testInputShapes(net, {input});
net.setInput(input);

std::vector<std::vector<Mat> > outs;
Expand Down