Skip to content

Commit aebf814

Browse files
iefodesbalandi
andauthored
[ IE TESTS ] Update tensor comparation function according plugin requirments (openvinotoolkit#23226)
### Details: - *Comparation function was changed to compare tensors based on element comparation* - *`std::abs(ref_value - plugin_value) <= abs_threshold + rel_threshold * ref_value`* - *`abs_threshold ` = std::max(std::numeric_limits::eps<plugin_element_type>(), std::numeric_limits::eps<ref_element_type>())* - *`ref_threshold = eps_by_expected_type()`, which is based on half `bit length of mantissa`* ### Tickets: - [CVS-133173](https://jira.devtools.intel.com/browse/CVS-133173) - [CVS-135540](https://jira.devtools.intel.com/browse/CVS-135540) --------- Co-authored-by: sbalandi <sofya.balandina@intel.com>
1 parent b520763 commit aebf814

File tree

37 files changed

+597
-132
lines changed

37 files changed

+597
-132
lines changed

.github/workflows/job_cxx_unit_tests.yml

+6
Original file line numberDiff line numberDiff line change
@@ -215,6 +215,12 @@ jobs:
215215
--gtest_filter=*smoke* \
216216
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TemplateFuncTests.xml
217217
218+
- name: OV utils unit tests
219+
run: |
220+
source ${INSTALL_DIR}/setupvars.sh
221+
${INSTALL_TEST_DIR}/ov_util_tests --gtest_print_time=1 \
222+
--gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_util_tests.xml
223+
218224
- name: OpenVINO C API tests
219225
if: fromJSON(inputs.affected-components).C_API.test
220226
run: |

src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/augru_cell.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@ class AUGRUCellCPUTest : public testing::WithParamInterface<AUGRUCellCpuSpecific
104104

105105
if (additionalConfig[ov::hint::inference_precision.name()] == ov::element::bf16) {
106106
selectedType = makeSelectedTypeStr(selectedType, ElementType::bf16);
107+
abs_threshold = 2e-2;
107108
} else {
108109
selectedType = makeSelectedTypeStr(selectedType, netPrecision);
109110
}

src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp

+8
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,14 @@ void ActivationLayerCPUTest::SetUp() {
132132
auto activation = utils::make_activation(params, netPrecision, activationType, activationShapes, constantsValue);
133133
activation->get_rt_info() = getCPUInfo();
134134
function = std::make_shared<ov::Model>(ov::NodeVector{activation}, ov::ParameterVector{params}, "Activation");
135+
#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64)
136+
if (netPrecision == ov::element::f32 && outPrecision == ov::element::f32) {
137+
abs_threshold = 8e-4;
138+
}
139+
#endif
140+
if (netPrecision == ov::element::bf16 && outPrecision == ov::element::f32) {
141+
abs_threshold = 6e-2;
142+
}
135143
}
136144

137145
std::string ActivationLayerCPUTest::getPrimitiveType(const utils::ActivationTypes& activation_type,

src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/pooling.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ void PoolingLayerCPUTest::SetUp() {
9898

9999
std::shared_ptr<ov::Node> poolInput = params[0];
100100
if (isInt8) {
101+
abs_threshold = 2e-2;
101102
ov::Shape newShape(poolInput->get_output_partial_shape(0).size(), 1);
102103
poolInput = ov::test::utils::make_fake_quantize(poolInput, inPrc, 256, newShape);
103104
}

src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/reduce.cpp

+7
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,13 @@ void ReduceCPULayerTest::SetUp() {
138138
}
139139

140140
function = makeNgraphFunction(netPrecision, params, reduce, "Reduce");
141+
142+
if (ov::with_cpu_x86_avx512_core_amx()) {
143+
if (netPrecision == ov::element::f32 && configuration.count(ov::hint::inference_precision.name()) &&
144+
configuration.at(ov::hint::inference_precision.name()) == ov::element::f16) {
145+
abs_threshold = 5e-3;
146+
}
147+
}
141148
}
142149

143150
void ReduceCPULayerTest::generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) {

src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/deformable_convolution.cpp

+4
Original file line numberDiff line numberDiff line change
@@ -217,6 +217,10 @@ class DefConvLayerCPUTest : public testing::WithParamInterface<DefConvLayerCPUTe
217217
}
218218

219219
function = makeNgraphFunction(netPrecision, parameters, deformable_conv, "deformable_convolution");
220+
221+
if (netPrecision == ov::element::f32) {
222+
abs_threshold = 5e-6;
223+
}
220224
}
221225
};
222226

src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/fake_quantize.cpp

+4
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,10 @@ class FakeQuantizeLayerCPUTest : public testing::WithParamInterface<fqLayerTestP
123123
}
124124

125125
function = makeNgraphFunction(inPrec, params, fq, "FakeQuantizeCPU");
126+
127+
if (inPrec == ov::element::f32) {
128+
abs_threshold = 1e-4;
129+
}
126130
}
127131

128132
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override {

src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/lrn.cpp

+3
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,9 @@ class LRNLayerCPUTest : public testing::WithParamInterface<LRNParams>, public ov
5959
auto axesNode = ov::op::v0::Constant::create(ov::element::i32, { axes.size() }, axes);
6060
auto lrn = std::make_shared<ov::op::v0::LRN>(params[0], axesNode, alpha, beta, bias, size);
6161
function = makeNgraphFunction(inputPrecision, params, lrn, "LRN");
62+
if (inputPrecision == ov::element::f32) {
63+
abs_threshold = 5e-3;
64+
}
6265
}
6366
};
6467

src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/rdft.cpp

+4
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,10 @@ class RDFTTestCPU : public testing::WithParamInterface<std::tuple<ov::element::T
146146
}
147147
}
148148
function = std::make_shared<Model>(rdft, inputs);
149+
150+
if (precision == ov::element::f32) {
151+
abs_threshold = 1e-4;
152+
}
149153
}
150154

151155
void generate_inputs(const std::vector<Shape>& targetInputStaticShapes) override {

src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/conv_sum_broadcast.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -290,8 +290,6 @@ class ConvSumBroadcastTest : public ConvSumInPlaceTest {
290290
};
291291

292292
TEST_P(ConvSumBroadcastTest, CompareWithRefs) {
293-
SKIP_IF_CURRENT_TEST_IS_DISABLED()
294-
295293
run();
296294

297295
CheckPluginRelatedResults(compiledModel, "Convolution");

src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/convs_and_sums.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,8 @@ class ConvsAndSums : virtual public SubgraphBaseStaticTest {
120120

121121
auto result = std::make_shared<ov::op::v0::Result>(relu3);
122122
function = std::make_shared<ov::Model>(result, params, "SimpleNet");
123+
124+
abs_threshold = 9e-4;
123125
}
124126
};
125127

src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/fuse_scaleshift_and_fakequantize.cpp

+3
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,9 @@ class FuseScaleShiftAndFakeQuantizeTest : public testing::WithParamInterface<Fus
7070
quantizeIntervals[3]);
7171
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(quantize)};
7272
function = std::make_shared<ov::Model>(results, ov::ParameterVector{param}, "FuseScaleShiftAndQuantize");
73+
if (inputPrecision == element::f32) {
74+
abs_threshold = 2e-7;
75+
}
7376
}
7477
};
7578

src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/matmul_weights_decompression.cpp

+8
Original file line numberDiff line numberDiff line change
@@ -279,6 +279,14 @@ class MatmulWeightsDecompression : public testing::WithParamInterface<MatmulWeig
279279
std::tie(postOpMgrPtr, fusedOps) = fusing_params;
280280
init_input_shapes({shape_params.data_shape, {{}, {{shape_params.weights_shape}}}});
281281

282+
// if dynamic quantization is enabled
283+
if (configuration.count(ov::hint::dynamic_quantization_group_size.name()) &&
284+
configuration.at(ov::hint::dynamic_quantization_group_size.name()) != 0) {
285+
abs_threshold = 0.1;
286+
} else if (!configuration.count(ov::hint::dynamic_quantization_group_size.name())) {
287+
abs_threshold = 5e-3;
288+
}
289+
282290
ElementType netType = ov::element::f32;
283291
inType = outType = netType;
284292

0 commit comments

Comments
 (0)