Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 39 additions & 9 deletions modules/gapi/src/backends/onnx/gonnxbackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ class ONNXCompiled {
std::vector<cv::Mat> out_data;

void Run(const std::vector<cv::Mat>& ins,
const std::vector<cv::Mat>& outs);
std::vector<cv::Mat>& outs);

std::vector<std::string> in_names_without_const;
public:
Expand Down Expand Up @@ -322,22 +322,20 @@ inline std::vector<int64_t> toORT(const cv::MatSize &sz) {
inline void preprocess(const cv::Mat& src,
const cv::gimpl::onnx::TensorInfo& ti,
cv::Mat& dst) {
GAPI_Assert(src.depth() == CV_32F || src.depth() == CV_8U);
// CNN input type
const auto type = toCV(ti.type);
if (src.depth() == CV_32F) {
if (src.depth() != CV_8U) {
// Just pass the tensor as-is.
// No layout or dimension transformations done here!
// TODO: This needs to be aligned across all NN backends.
GAPI_Assert(type == CV_32F && "Only 32F model input is supported for 32F input data");
const auto tensor_dims = toORT(src.size);
if (tensor_dims.size() == ti.dims.size()) {
for (size_t i = 0; i < ti.dims.size(); ++i) {
GAPI_Assert((ti.dims[i] == -1 || ti.dims[i] == tensor_dims[i]) &&
"32F tensor dimensions should match with all non-dynamic NN input dimensions");
"Non-U8 tensor dimensions should match with all non-dynamic NN input dimensions");
}
} else {
GAPI_Error("32F tensor size should match with NN input");
GAPI_Error("Non-U8 tensor size should match with NN input");
}

dst = src;
Expand Down Expand Up @@ -471,6 +469,25 @@ inline Ort::Value createTensor(const Ort::MemoryInfo& memory_info,
return createTensor<float>(memory_info, tensor_params, data);
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
return createTensor<int32_t>(memory_info, tensor_params, data);
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:{
// cv::Mat does not support int64 data directly.
// Following steps are applied to create an ONNX tensor from cv::Mat data:
// - First create a new ONNX tensor 'i64_tensor' with data type int64_t using the default allocator
// - Next retrieve a pointer to the mutable data buffer of 'i64_tensor'
// - Convert the data from int32 (see toCV function) to int64 and deep copy it into 'i64_tensor'
auto ort_dims = toORT(data.size);

Ort::AllocatorWithDefaultOptions allocator;
Ort::Value i64_tensor = Ort::Value::CreateTensor<int64_t>(allocator,
ort_dims.data(),
ort_dims.size());
int64_t* tensor_data = i64_tensor.GetTensorMutableData<int64_t>();

cv::gimpl::convertInt32ToInt64(data.ptr<int>(),
tensor_data,
data.total());
return i64_tensor;
}
default:
GAPI_Error("ONNX. Unsupported data type");
}
Expand Down Expand Up @@ -747,9 +764,11 @@ ONNXCompiled::ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp)
in_tensor_info.end(),
[](const cv::gimpl::onnx::TensorInfo &p) {
return p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT
|| p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8;
|| p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8
|| p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32
|| p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64;
})
&& "Only FP32 and U8 inputs for NN are supported");
&& "Only FP32, INT32, INT64 and U8 inputs for NN are supported");

// Put mean and std in appropriate tensor params
if (!params.mean.empty() || !params.stdev.empty()) {
Expand Down Expand Up @@ -864,7 +883,7 @@ cv::Mat ONNXCompiled::allocOutput(int i) const {
}

void ONNXCompiled::Run(const std::vector<cv::Mat>& ins,
const std::vector<cv::Mat>& outs) {
std::vector<cv::Mat>& outs) {
std::vector<Ort::Value> in_tensors, out_tensors;

// Layer names order for run
Expand Down Expand Up @@ -909,6 +928,17 @@ void ONNXCompiled::Run(const std::vector<cv::Mat>& ins,
out_run_names.data(),
&out_tensors.front(),
params.output_names.size());
if (out_tensor_info[0].type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64) {
// cv::Mat does not support int64 output data.
// Conversion from int 64 to int 32 is carried in the copyFromONNX function
// The output is written to out_mat
for (auto &&iter : ade::util::zip(ade::util::toRange(out_tensors),
ade::util::toRange(outs))) {
auto &out_tensor = std::get<0>(iter);
auto &out_mat = std::get<1>(iter);
copyFromONNX(out_tensor, out_mat);
}
}
} else {
// Hard path - run session & user-defined post-processing
// NOTE: use another list of output names here
Expand Down