8000 Merge pull request #24386 from fengyuentau:fix_dtype_nary_eltwise · opencv/opencv@0507043 · GitHub
[go: up one dir, main page]

Skip to content

Commit 0507043

Browse files
authored
Merge pull request #24386 from fengyuentau:fix_dtype_nary_eltwise
dnn: fix inconsistent input dtype for nary eltwise layers #24386 Resolves #24385 Merge with opencv/opencv_extra#1107 Relates #24092 (comment) ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake
1 parent 58285e5 commit 0507043

File tree

3 files changed

+20
-6
lines changed

3 files changed

+20
-6
lines changed

modules/dnn/src/layers/const_layer.cpp

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -62,10 +62,15 @@ class ConstLayerImpl CV_FINAL : public ConstLayer
6262
{
6363
std::vector<UMat> outputs;
6464
outs.getUMatVector(outputs);
65-
if (outs.depth() == CV_16S)
66-
convertFp16(blobs[0], outputs[0]);
65+
if (outs.depth() == CV_16S) {
66+
auto blob = blobs[0];
67+
if (blob.type() != CV_32F) {
68+
blob.convertTo(blob, CV_32F);
69+
}
70+
convertFp16(blob, outputs[0]);
71+
}
6772
else
68-
blobs[0].copyTo(outputs[0]);
73+
blobs[0].convertTo(outputs[0], outputs[0].type());
6974
return true;
7075
}
7176
#endif
@@ -80,7 +85,7 @@ class ConstLayerImpl CV_FINAL : public ConstLayer
8085

8186
std::vector<Mat> outputs;
8287
outputs_arr.getMatVector(outputs);
83-
blobs[0].copyTo(outputs[0]);
88+
blobs[0].convertTo(outputs[0], outputs[0].type());
8489
}
8590

8691
#ifdef HAVE_CANN
@@ -126,6 +131,8 @@ class ConstLayerImpl CV_FINAL : public ConstLayer
126131
ngraph::element::Type dType;
127132
if (blobs[0].depth() == CV_32F) {
128133
dType = ngraph::element::f32;
134+
} else if (blobs[0].depth() == CV_32S) {
135+
dType = ngraph::element::i32;
129136
} else if (blobs[0].depth() == CV_8S) {
130137
dType = ngraph::element::i8;
131138
} else {
@@ -163,7 +170,11 @@ class ConstLayerImpl CV_FINAL : public ConstLayer
163170
auto context = reinterpret_cast<csl::CSLContext*>(context_);
164171

165172
CV_Assert(blobs.size() == 1);
166-
return make_cuda_node<cuda4dnn::ConstOp>(preferableTarget, std::move(context->stream), blobs[0]);
173+ 8000
Mat blob = blobs[0];
174+
if (blob.type() != CV_32F) {
175+
blob.convertTo(blob, CV_32F);
176+
}
177+
return make_cuda_node<cuda4dnn::ConstOp>(preferableTarget, std::move(context->stream), blob);
167178
}
168179
#endif
169180

modules/dnn/src/onnx/onnx_importer.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -383,7 +383,7 @@ void runLayer(LayerParams& params, const std::vector<Mat>& inputs,
383383
{
384384
inpShapes[i] = shape(inputs[i]);
385385
if (i > 0 && ddepth != inputs[i].depth())
386-
CV_Error(Error::StsNotImplemented, "Mixed input data types.");
386+
CV_Error(Error::StsNotImplemented, cv::format("Mixed input data types. Required type: %d, actual type: %d", ddepth, inputs[i].depth()));
387387

388388
// Quantize and Dequantize layer have different output type than input.
389389
if (params.type != "Quantize" && params.type != "Dequantize")

modules/dnn/test/test_onnx_importer.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -675,6 +675,9 @@ TEST_P(Test_ONNX_layers, Compare_GT)
675675

676676
testONNXModels("greater");
677677
}
678+
TEST_P(Test_ONNX_layers, Greater_input_dtype_int64) {
679+
testONNXModels("greater_input_dtype_int64");
680+
}
678681

679682
TEST_P(Test_ONNX_layers, Compare_LT)
680683
{

0 commit comments

Comments
 (0)
0