Skip to content

Commit b0e507c

Browse files
Removed runtime::Tensor alias (openvinotoolkit#22429)
* Removed AllocatorImpl, runtime::Tensor alias * Update OMZ * Updated submodule
1 parent cb679a7 commit b0e507c

File tree

35 files changed

+116
-127
lines changed

35 files changed

+116
-127
lines changed

src/core/include/openvino/runtime/allocator.hpp

+1-7
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@
1414

1515
#include "openvino/core/any.hpp"
1616
#include "openvino/core/core_visibility.hpp"
17-
#include "openvino/core/deprecated.hpp"
1817

1918
namespace ov {
19+
2020
class Tensor;
2121

2222
/**
@@ -158,10 +158,4 @@ class OPENVINO_API Allocator {
158158
explicit operator bool() const noexcept;
159159
};
160160

161-
OPENVINO_SUPPRESS_DEPRECATED_START
162-
namespace runtime {
163-
using ov::Allocator;
164-
} // namespace runtime
165-
OPENVINO_SUPPRESS_DEPRECATED_END
166-
167161
} // namespace ov

src/core/include/openvino/runtime/tensor.hpp

-5
Original file line numberDiff line numberDiff line change
@@ -259,9 +259,4 @@ class OPENVINO_API Tensor {
259259
*/
260260
using TensorVector = std::vector<Tensor>;
261261

262-
namespace runtime {
263-
using ov::Tensor;
264-
using ov::TensorVector;
265-
} // namespace runtime
266-
267262
} // namespace ov

src/core/src/preprocess/preprocess_impls.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,7 @@ class InputTensorInfo::InputTensorInfoImpl : public TensorInfoImplBase {
218218
m_shape_set = true;
219219
}
220220

221-
void set_from(const ov::runtime::Tensor& runtime_tensor) {
221+
void set_from(const ov::Tensor& runtime_tensor) {
222222
set_shape(runtime_tensor.get_shape());
223223
set_element_type(runtime_tensor.get_element_type());
224224
}

src/plugins/intel_cpu/tests/functional/single_layer_tests/gather.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ class GatherLayerTestCPU : public testing::WithParamInterface<GatherLayerTestCPU
128128

129129
for (size_t i = 0; i < funcInputs.size(); ++i) {
130130
const auto& funcInput = funcInputs[i];
131-
ov::runtime::Tensor tensor;
131+
ov::Tensor tensor;
132132
ov::test::utils::InputGenerateData in_data;
133133

134134
if (funcInput.get_node()->get_friendly_name() == "data") {

src/plugins/intel_cpu/tests/functional/single_layer_tests/grid_sample.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ class GridSampleLayerTestCPU : public testing::WithParamInterface<GridSampleLaye
136136

137137
for (size_t i = 0; i < funcInputs.size(); ++i) {
138138
const auto& funcInput = funcInputs[i];
139-
ov::runtime::Tensor tensor;
139+
ov::Tensor tensor;
140140
ov::test::utils::InputGenerateData in_data;
141141

142142
if (funcInput.get_node()->get_friendly_name() == "data") {

src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ class NmsLayerCPUTest : public testing::WithParamInterface<NmsParams>, virtual p
9696
auto node = funcInputs[2].get_node_shared_ptr();
9797
auto it = inputs.find(node);
9898
if (it == inputs.end()) return;
99-
auto tensor = ov::runtime::Tensor(node->get_element_type(), targetInputStaticShapes[2], &maxOutBoxesPerClass);
99+
auto tensor = ov::Tensor(node->get_element_type(), targetInputStaticShapes[2], &maxOutBoxesPerClass);
100100
inputs[node] = tensor;
101101
}
102102

src/plugins/intel_cpu/tests/functional/single_layer_tests/rdft.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -152,21 +152,21 @@ class RDFTTestCPU : public testing::WithParamInterface<std::tuple<ov::element::T
152152
const auto& funcInputs = function->inputs();
153153
auto funcInput = funcInputs.begin();
154154
inputs.clear();
155-
runtime::Tensor data_tensor = test::utils::create_and_fill_tensor_normal_distribution(funcInput->get_element_type(),
155+
ov::Tensor data_tensor = test::utils::create_and_fill_tensor_normal_distribution(funcInput->get_element_type(),
156156
targetInputStaticShapes[0], 0, 1, 0);
157157

158158
inputs.insert({funcInput->get_node_shared_ptr(), data_tensor});
159159
funcInput++;
160160
if (!constAxes && funcInput != funcInputs.end()) {
161161
ASSERT_TRUE(inputIdx < axes.size());
162-
auto tensor = ov::runtime::Tensor{funcInput->get_element_type(), Shape{axes[inputIdx].size()}};
162+
auto tensor = ov::Tensor{funcInput->get_element_type(), Shape{axes[inputIdx].size()}};
163163
std::memcpy(tensor.data(), axes[inputIdx].data(), axes[inputIdx].size() * sizeof(axes[0][0]));
164164
inputs.insert({funcInput->get_node_shared_ptr(), tensor});
165165
funcInput++;
166166
}
167167
if (!constSignalSizes && funcInput != funcInputs.end()) {
168168
ASSERT_TRUE(inputIdx < signalSizes.size());
169-
auto tensor = ov::runtime::Tensor{funcInput->get_element_type(), Shape{signalSizes[inputIdx].size()}};
169+
auto tensor = ov::Tensor{funcInput->get_element_type(), Shape{signalSizes[inputIdx].size()}};
170170
std::memcpy(tensor.data(), signalSizes[inputIdx].data(), signalSizes[inputIdx].size() * sizeof(signalSizes[0][0]));
171171
inputs.insert({funcInput->get_node_shared_ptr(), tensor});
172172
}

src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -83,11 +83,11 @@ class ShapeOpsCPUTest : public testing::WithParamInterface<shapeOpsParams>,
8383
const auto& funcInputs = function->inputs();
8484
for (size_t i = 0; i < funcInputs.size(); ++i) {
8585
const auto& funcInput = funcInputs[i];
86-
ov::runtime::Tensor tensor;
86+
ov::Tensor tensor;
8787
if (i == 1) {
8888
#define RESHAPE_TEST_CASE(INT_TYPE) \
8989
case ov::element::Type_t::INT_TYPE: { \
90-
tensor = ov::runtime::Tensor{ov::element::INT_TYPE, targetInputStaticShapes[i]}; \
90+
tensor = ov::Tensor{ov::element::INT_TYPE, targetInputStaticShapes[i]}; \
9191
auto inputData = tensor.data<ov::element_type_traits<ov::element::INT_TYPE>::value_type>(); \
9292
ASSERT_TRUE(idx < data.size()); \
9393
for (size_t j = 0lu; j < data[idx].size(); ++j) { \

src/plugins/intel_cpu/tests/functional/single_layer_tests/unique.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ class UniqueLayerTestCPU : public testing::WithParamInterface<UniqueLayerTestCPU
120120

121121
for (size_t i = 0; i < funcInputs.size(); ++i) {
122122
const auto& funcInput = funcInputs[i];
123-
ov::runtime::Tensor tensor;
123+
ov::Tensor tensor;
124124

125125
if (funcInput.get_node()->get_friendly_name() == "data") {
126126
int32_t range = std::accumulate(targetInputStaticShapes[0].begin(), targetInputStaticShapes[0].end(), 1, std::multiplies<size_t>());

src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -57,9 +57,9 @@ class InPlaceReshapeFromConstantCheck : public SubgraphBaseTest {
5757
const auto& funcInputs = function->inputs();
5858
for (size_t i = 0; i < funcInputs.size(); ++i) {
5959
const auto& funcInput = funcInputs[i];
60-
ov::runtime::Tensor tensor;
60+
ov::Tensor tensor;
6161
if (i == 1) {
62-
tensor = ov::runtime::Tensor{ov::element::i32, targetInputStaticShapes[i]};
62+
tensor = ov::Tensor{ov::element::i32, targetInputStaticShapes[i]};
6363
auto inputData = tensor.data<ov::element_type_traits<ov::element::i32>::value_type>();
6464
const std::vector<unsigned> data = {38, 38, 15, 4};
6565
for (size_t j = 0lu; j < data.size(); ++j) {

src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ class GridSampleLayerTestGPU : public testing::WithParamInterface<GridSampleLaye
9999

100100
for (size_t i = 0; i < funcInputs.size(); ++i) {
101101
const auto& funcInput = funcInputs[i];
102-
ov::runtime::Tensor tensor;
102+
ov::Tensor tensor;
103103
ov::test::utils::InputGenerateData in_data;
104104

105105
if (funcInput.get_node()->get_friendly_name() == "data") {

src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ class NmsLayerGPUTest : public testing::WithParamInterface<NmsLayerTestParams>,
103103
auto node = funcInputs[2].get_node_shared_ptr();
104104
auto it = inputs.find(node);
105105
if (it == inputs.end()) return;
106-
auto tensor = ov::runtime::Tensor(node->get_element_type(), targetInputStaticShapes[2], &maxOutBoxesPerClass);
106+
auto tensor = ov::Tensor(node->get_element_type(), targetInputStaticShapes[2], &maxOutBoxesPerClass);
107107
inputs[node] = tensor;
108108
}
109109

src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_with_empty_tensor.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ class EmptyTensorDynamicGPUTest : public testing::WithParamInterface<emptyTensor
5454
const auto& funcInputs = function->inputs();
5555
for (size_t i = 0; i < funcInputs.size(); ++i) {
5656
auto node = funcInputs[i].get_node_shared_ptr();
57-
auto tensor = ov::runtime::Tensor(node->get_element_type(), targetInputStaticShapes[i]);
57+
auto tensor = ov::Tensor(node->get_element_type(), targetInputStaticShapes[i]);
5858
if (i == 0) {
5959
// All zero inputs for non_zero op
6060
auto tensor_ptr = static_cast<int32_t*>(tensor.data());

src/plugins/template/backend/executable.hpp

+8-8
Original file line numberDiff line numberDiff line change
@@ -19,16 +19,16 @@ class Executable {
1919
Executable();
2020
virtual ~Executable();
2121

22-
/// \param outputs vector of runtime::Tensor used as outputs
23-
/// \param inputs vector of runtime::Tensor used as inputs
22+
/// \param outputs vector of Tensor used as outputs
23+
/// \param inputs vector of Tensor used as inputs
2424
/// \param collect_performance Enable per operation performance statistic
2525
/// \returns true if iteration is successful, false otherwise
2626
virtual bool call(std::vector<ov::Tensor>& outputs,
2727
const std::vector<ov::Tensor>& inputs,
2828
bool collect_performance = false) = 0;
2929

30-
/// \param outputs vector of runtime::Tensor used as outputs
31-
/// \param inputs vector of runtime::Tensor used as inputs
30+
/// \param outputs vector of Tensor used as outputs
31+
/// \param inputs vector of Tensor used as inputs
3232
/// \param context Evaluation context
3333
/// \param collect_performance Enable per operation performance statistic
3434
/// \returns true if iteration is successful, false otherwise
@@ -41,14 +41,14 @@ class Executable {
4141
virtual void cancel() = 0;
4242

4343
/// \brief Executes a single iteration of a Function.
44-
/// \param outputs vector of runtime::Tensor used as outputs
45-
/// \param inputs vector of runtime::Tensor used as inputs
44+
/// \param outputs vector of Tensor used as outputs
45+
/// \param inputs vector of Tensor used as inputs
4646
/// \returns true if iteration is successful, false otherwise
4747
bool call_with_validate(std::vector<ov::Tensor>& outputs, const std::vector<ov::Tensor>& inputs);
4848

4949
/// \brief Validates a Function.
50-
/// \param outputs vector of runtime::Tensor used as outputs
51-
/// \param inputs vector of runtime::Tensor used as inputs
50+
/// \param outputs vector of Tensor used as outputs
51+
/// \param inputs vector of Tensor used as inputs
5252
void validate(const std::vector<ov::Tensor>& outputs, const std::vector<ov::Tensor>& inputs);
5353

5454
/// \brief Query the input Parameters

src/plugins/template/tests/functional/op_reference/interpolate.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@ struct InterpolateV1Params {
5050
Shape outShape;
5151
element::Type inType;
5252
element::Type outType;
53-
runtime::Tensor inData;
54-
runtime::Tensor outData;
53+
ov::Tensor inData;
54+
ov::Tensor outData;
5555
std::shared_ptr<op::v0::Constant> outShapeInput;
5656
op::v0::Interpolate::Attributes attrs;
5757
};
@@ -82,8 +82,8 @@ struct InterpolateV4Params {
8282
Shape outShape;
8383
element::Type inType;
8484
element::Type outType;
85-
runtime::Tensor inData;
86-
runtime::Tensor outData;
85+
ov::Tensor inData;
86+
ov::Tensor outData;
8787
std::vector<size_t> outShapeInput;
8888
element::Type outShapeInputType;
8989
std::vector<float> scales;

src/plugins/template/tests/functional/op_reference/memory.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@ struct ReadValueAssignParams {
3636
Shape m_output_shape;
3737
element::Type m_input_type;
3838
element::Type m_output_type;
39-
runtime::Tensor m_input_data;
40-
runtime::Tensor m_expected_data;
39+
ov::Tensor m_input_data;
40+
ov::Tensor m_expected_data;
4141
std::string m_variable_id;
4242
};
4343

@@ -245,8 +245,8 @@ struct MemoryTestParams {
245245
ov::Shape m_output_shape;
246246
ov::element::Type m_input_type;
247247
ov::element::Type m_output_type;
248-
ov::runtime::Tensor m_input_data;
249-
ov::runtime::Tensor m_expected_data;
248+
ov::Tensor m_input_data;
249+
ov::Tensor m_expected_data;
250250
std::vector<std::string> m_variable_id;
251251
size_t m_count_runs;
252252
size_t m_reset_on_run;

src/plugins/template/tests/functional/op_reference/region_yolo.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,8 @@ struct RegionYoloParams {
6767
ov::PartialShape inputShape;
6868
ov::element::Type inType;
6969
ov::element::Type outType;
70-
ov::runtime::Tensor inputData;
71-
ov::runtime::Tensor refData;
70+
ov::Tensor inputData;
71+
ov::Tensor refData;
7272
std::string testcaseName;
7373
};
7474

src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@ std::vector<ov::Tensor> ReadIRTest::calculate_refs() {
312312

313313
size_t pos = 0;
314314
for (const auto& output : functionRefs->outputs()) {
315-
auto out_tensor = ov::runtime::Tensor(output.get_element_type(), output.get_shape(), &ref_buffer[pos]);
315+
auto out_tensor = ov::Tensor(output.get_element_type(), output.get_shape(), &ref_buffer[pos]);
316316
pos += out_tensor.get_byte_size();
317317
}
318318
}

src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ class OVInferRequestDynamicTests : public testing::WithParamInterface<OVInferReq
4141

4242
protected:
4343
void SetUp() override;
44-
bool checkOutput(const ov::runtime::Tensor& in, const ov::runtime::Tensor& actual);
44+
bool checkOutput(const ov::Tensor& in, const ov::Tensor& actual);
4545

4646
std::shared_ptr<ov::Core> ie = utils::PluginCache::get().core();
4747
std::shared_ptr<Model> function;

src/tests/functional/plugin/shared/include/behavior/ov_infer_request/iteration_chaining.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ struct OVIterationChaining : public OVInferRequestTests {
3939

4040
private:
4141
static std::shared_ptr<ov::Model> getIterativeFunction();
42-
bool checkOutput(const ov::runtime::Tensor& in, const ov::runtime::Tensor& actual);
42+
bool checkOutput(const ov::Tensor& in, const ov::Tensor& actual);
4343
};
4444

4545
} // namespace behavior

src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp

+8-8
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ void OVInferRequestDynamicTests::SetUp() {
4444
APIBaseTest::SetUp();
4545
}
4646

47-
bool OVInferRequestDynamicTests::checkOutput(const ov::runtime::Tensor& in, const ov::runtime::Tensor& actual) {
47+
bool OVInferRequestDynamicTests::checkOutput(const ov::Tensor& in, const ov::Tensor& actual) {
4848
bool result = true;
4949
auto net = ie->compile_model(function, ov::test::utils::DEVICE_TEMPLATE);
5050
ov::InferRequest req;
@@ -96,7 +96,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetwork) {
9696
ov::test::utils::InputGenerateData in_data;
9797
in_data.start_from = -50;
9898
in_data.range = 100;
99-
ov::runtime::Tensor inTensor = ov::test::utils::create_and_fill_tensor(element::f32, shape, in_data);
99+
ov::Tensor inTensor = ov::test::utils::create_and_fill_tensor(element::f32, shape, in_data);
100100
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
101101
OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", inTensor));
102102
OV_ASSERT_NO_THROW(req.infer());
@@ -115,7 +115,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetUnexpectedOutputTensorB
115115
auto execNet = ie->compile_model(function, target_device, configuration);
116116
// Create InferRequest
117117
ov::InferRequest req;
118-
ov::runtime::Tensor tensor, otensor;
118+
ov::Tensor tensor, otensor;
119119
const std::string outputname = function->outputs().back().get_any_name();
120120
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
121121
ov::test::utils::InputGenerateData in_data;
@@ -144,7 +144,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetOutputTensorPreAllocate
144144
auto execNet = ie->compile_model(function, target_device, configuration);
145145
// Create InferRequest
146146
ov::InferRequest req;
147-
ov::runtime::Tensor tensor;
147+
ov::Tensor tensor;
148148
const std::string outputname = function->outputs().back().get_any_name();
149149
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
150150
ov::test::utils::InputGenerateData in_data;
@@ -153,7 +153,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetOutputTensorPreAllocate
153153
tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, in_data);
154154
OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", tensor));
155155
float ptr[5000];
156-
ov::runtime::Tensor otensor(element::f32, refOutShape, ptr);
156+
ov::Tensor otensor(element::f32, refOutShape, ptr);
157157
OV_ASSERT_NO_THROW(req.set_tensor(outputname, otensor));
158158
OV_ASSERT_NO_THROW(req.infer());
159159
ASSERT_EQ(req.get_tensor(outputname).data<float>(), ptr);
@@ -172,7 +172,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetOutputShapeBeforeInfer)
172172
auto execNet = ie->compile_model(function, target_device, configuration);
173173
// Create InferRequest
174174
ov::InferRequest req;
175-
ov::runtime::Tensor tensor, otensor;
175+
ov::Tensor tensor, otensor;
176176
const std::string outputname = function->outputs().back().get_any_name();
177177
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
178178
ov::test::utils::InputGenerateData in_data;
@@ -199,7 +199,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkGetOutputThenSetOutputTens
199199
auto execNet = ie->compile_model(function, target_device, configuration);
200200
// Create InferRequest
201201
ov::InferRequest req;
202-
ov::runtime::Tensor tensor;
202+
ov::Tensor tensor;
203203
const std::string outputname = function->outputs().back().get_any_name();
204204
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
205205
ov::test::utils::InputGenerateData in_data;
@@ -213,7 +213,7 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkGetOutputThenSetOutputTens
213213
ASSERT_TRUE(checkOutput(req.get_tensor("input_tensor"), req.get_tensor(outputname)));
214214
// then, set output tensor
215215
float ptr[5000];
216-
ov::runtime::Tensor otensor(element::f32, refOutShape, ptr);
216+
ov::Tensor otensor(element::f32, refOutShape, ptr);
217217
OV_ASSERT_NO_THROW(req.set_tensor(outputname, otensor));
218218
OV_ASSERT_NO_THROW(req.infer());
219219
ASSERT_EQ(req.get_tensor(outputname).data<float>(), ptr);

src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ TEST_P(OVInferRequestIOTensorTest, InferStaticNetworkSetChangedInputTensorThrow)
235235
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
236236
OV_ASSERT_NO_THROW(req.infer());
237237
// Get input_tensor
238-
ov::runtime::Tensor tensor;
238+
ov::Tensor tensor;
239239
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->inputs().back().get_any_name()));
240240
// Set shape
241241
OV_ASSERT_NO_THROW(tensor.set_shape(shape2));
@@ -258,7 +258,7 @@ TEST_P(OVInferRequestIOTensorTest, InferStaticNetworkSetChangedOutputTensorThrow
258258
OV_ASSERT_NO_THROW(req = execNet.create_infer_request());
259259
OV_ASSERT_NO_THROW(req.infer());
260260
// Get output_tensor
261-
ov::runtime::Tensor tensor;
261+
ov::Tensor tensor;
262262
OV_ASSERT_NO_THROW(tensor = req.get_tensor(function->outputs().back().get_any_name()););
263263
// Set shape
264264
OV_ASSERT_NO_THROW(tensor.set_shape(shape2));

src/tests/functional/plugin/shared/src/behavior/ov_infer_request/iteration_chaining.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ void OVIterationChaining::TearDown() {
7878
OVInferRequestTests::TearDown();
7979
}
8080

81-
bool OVIterationChaining::checkOutput(const ov::runtime::Tensor& in, const ov::runtime::Tensor& actual) {
81+
bool OVIterationChaining::checkOutput(const ov::Tensor& in, const ov::Tensor& actual) {
8282
bool result = true;
8383
auto net = core->compile_model(function, ov::test::utils::DEVICE_TEMPLATE);
8484
ov::InferRequest req;

src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ namespace utils {
1313
using CompareMap = std::map<ov::NodeTypeInfo, std::function<void(
1414
const std::shared_ptr<ov::Node> &node,
1515
size_t port,
16-
const ov::runtime::Tensor &expected,
17-
const ov::runtime::Tensor &actual,
16+
const ov::Tensor &expected,
17+
const ov::Tensor &actual,
1818
double absThreshold,
1919
double relThreshold)>>;
2020

src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ void reset_const_ranges();
1717

1818
std::vector<uint8_t> color_test_image(size_t height, size_t width, int b_step, ov::preprocess::ColorFormat format);
1919

20-
using InputsMap = std::map<ov::NodeTypeInfo, std::function<ov::runtime::Tensor(
20+
using InputsMap = std::map<ov::NodeTypeInfo, std::function<ov::Tensor(
2121
const std::shared_ptr<ov::Node>& node,
2222
size_t port,
2323
const ov::element::Type& elemType,

0 commit comments

Comments
 (0)