Skip to content

Commit 1f3cd69

Browse files
committed
temp commit-revert and validate
1 parent cf95887 commit 1f3cd69

File tree

1 file changed

+114
-5
lines changed

1 file changed

+114
-5
lines changed

src/core/tests/preprocess.cpp

+114-5
Original file line numberDiff line numberDiff line change
@@ -2,17 +2,19 @@
22
// SPDX-License-Identifier: Apache-2.0
33
//
44

5+
#include <openvino/runtime/core.hpp>
6+
57
#include "common_test_utils/test_assertions.hpp"
68
#include "common_test_utils/test_tools.hpp"
79
#include "gtest/gtest.h"
810
#include "openvino/core/except.hpp"
11+
#include "openvino/core/model.hpp"
912
#include "openvino/core/preprocess/pre_post_process.hpp"
13+
#include "openvino/core/shape.hpp"
14+
#include "openvino/core/type.hpp"
1015
#include "openvino/opsets/opset8.hpp"
1116
#include "openvino/util/common_util.hpp"
1217
#include "preprocess/color_utils.hpp"
13-
#include "openvino/core/shape.hpp"
14-
#include "openvino/core/type.hpp"
15-
#include "openvino/core/model.hpp"
1618

1719
using namespace ov;
1820
using namespace ov::preprocess;
@@ -29,7 +31,10 @@ static std::shared_ptr<Model> create_simple_function(element::Type type, const P
2931
res->get_output_tensor(0).set_names({"tensor_output1"});
3032
return std::make_shared<Model>(ResultVector{res}, ParameterVector{data1});
3133
}
32-
static std::shared_ptr<Model> create_clamp_function(element::Type type, const PartialShape& shape, float min_value, float max_value) {
34+
static std::shared_ptr<Model> create_clamp_function(element::Type type,
35+
const PartialShape& shape,
36+
float min_value,
37+
float max_value) {
3338
auto data = std::make_shared<op::v0::Parameter>(type, shape);
3439
data->set_friendly_name("input");
3540
data->get_output_tensor(0).set_names({"tensor_input"});
@@ -45,7 +50,6 @@ static std::shared_ptr<Model> create_clamp_function(element::Type type, const Pa
4550
return std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
4651
}
4752

48-
4953
static std::shared_ptr<Model> create_trivial(element::Type type, const PartialShape& shape) {
5054
auto data1 = std::make_shared<op::v0::Parameter>(type, shape);
5155
data1->set_friendly_name("input1");
@@ -110,16 +114,121 @@ TEST(pre_post_process, simple_clamp_f16) {
110114
auto f = create_clamp_function(element::f16, Shape{1, 3, 2, 2}, 0.0f, 1.0f);
111115
auto p = PrePostProcessor(f);
112116
p.input("tensor_input").preprocess().clamp(0.0f, 1.0f);
117+
p.output("tensor_output").postprocess().clamp(0.0f, 1.0f);
113118
f = p.build();
114119
EXPECT_EQ(f->get_output_element_type(0), element::f16);
120+
121+
EXPECT_EQ(f->input().get_shape(), (Shape{1, 3, 2, 2}));
122+
EXPECT_EQ(f->output().get_shape(), (Shape{1, 3, 2, 2}));
115123
}
116124

117125
TEST(pre_post_process, simple_clamp_f64) {
118126
auto f = create_clamp_function(element::f64, Shape{1, 3, 2, 2}, 0.0f, 1.0f);
119127
auto p = PrePostProcessor(f);
120128
p.input("tensor_input").preprocess().clamp(0.0f, 1.0f);
129+
p.output("tensor_output").postprocess().clamp(0.0f, 1.0f);
121130
f = p.build();
122131
EXPECT_EQ(f->get_output_element_type(0), element::f64);
132+
133+
EXPECT_EQ(f->input().get_shape(), (Shape{1, 3, 2, 2}));
134+
EXPECT_EQ(f->output().get_shape(), (Shape{1, 3, 2, 2}));
135+
}
136+
137+
class PreprocessClampTest : public ::testing::Test {
138+
protected:
139+
std::shared_ptr<Model> model;
140+
std::shared_ptr<op::v0::Parameter> input;
141+
142+
void SetUp() override {
143+
input = std::make_shared<op::v0::Parameter>(element::f64, Shape{1, 3, 2, 2});
144+
input->set_friendly_name("input");
145+
input->get_output_tensor(0).set_names({"tensor_input"});
146+
147+
auto clamp_op = std::make_shared<op::v0::Clamp>(input, 0.0, 1.0);
148+
clamp_op->set_friendly_name("Clamp");
149+
clamp_op->get_output_tensor(0).set_names({"tensor_clamp"});
150+
151+
auto result = std::make_shared<op::v0::Result>(clamp_op);
152+
result->set_friendly_name("Result");
153+
result->get_output_tensor(0).set_names({"tensor_output"});
154+
155+
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{input});
156+
}
157+
};
158+
159+
TEST_F(PreprocessClampTest, clamp_operation_on_input) {
160+
auto p = PrePostProcessor(model);
161+
p.input("tensor_input").preprocess().clamp(0.0, 1.0);
162+
model = p.build();
163+
164+
// Create input data with values outside the clamp range
165+
std::vector<double> input_data = {-1.0, 0.5, 2.0, -0.5, 1.5, 0.0, 1.0, 0.8, -0.2, 1.2, 0.3, 0.7};
166+
std::vector<double> expected = {0.0, 0.5, 1.0, 0.0, 1.0, 0.0, 1.0, 0.8, 0.0, 1.0, 0.3, 0.7};
167+
auto input_tensor = ov::Tensor(element::f64, Shape{1, 3, 2, 2}, input_data.data());
168+
169+
// Create an inference request
170+
ov::Core core;
171+
auto compiled_model = core.compile_model(model, "CPU");
172+
auto infer_request = compiled_model.create_infer_request();
173+
174+
// Set input tensor
175+
infer_request.set_tensor("tensor_input", input_tensor);
176+
177+
// Run inference
178+
infer_request.infer();
179+
{
180+
// Get input tensor
181+
auto input_tensor = infer_request.get_tensor("tensor_input");
182+
auto input_data = input_tensor.data<double>();
183+
184+
// Check if the input data is within the clamp range
185+
for (size_t i = 0; i < input_tensor.get_size(); ++i) {
186+
SCOPED_TRACE("i = " + std::to_string(i));
187+
EXPECT_GE(input_data[i], 0.0);
188+
EXPECT_LE(input_data[i], 1.0);
189+
190+
EXPECT_NEAR(input_data[i], expected[i], 1e-5);
191+
}
192+
}
193+
}
194+
195+
TEST_F(PreprocessClampTest, clamp_operation_on_output) {
196+
auto p = PrePostProcessor(model);
197+
p.output("tensor_output").postprocess().clamp(0.0, 1.0);
198+
model = p.build();
199+
200+
// Create input data with values outside the clamp range
201+
std::vector<double> input_data = {-1.0, 0.5, 2.0, -0.5, 1.5, 0.0, 1.0, 0.8, -0.2, 1.2, 0.3, 0.7};
202+
std::vector<double> expected = {0.0, 0.5, 1.0, 0.0, 1.0, 0.0, 1.0, 0.8, 0.0, 1.0, 0.3, 0.7};
203+
204+
auto input_tensor = ov::Tensor(element::f64, Shape{1, 3, 2, 2}, input_data.data());
205+
206+
// Create an inference request
207+
ov::Core core;
208+
auto compiled_model = core.compile_model(model, "CPU");
209+
auto infer_request = compiled_model.create_infer_request();
210+
211+
// Set input tensor
212+
infer_request.set_tensor("tensor_input", input_tensor);
213+
214+
// Run inference
215+
infer_request.infer();
216+
217+
{
218+
// Get output tensor
219+
auto output_tensor = infer_request.get_tensor("tensor_output");
220+
auto output_data = output_tensor.data<double>();
221+
222+
// Check if the output data is within the clamp range
223+
for (size_t i = 0; i < output_tensor.get_size(); ++i) {
224+
SCOPED_TRACE("i = " + std::to_string(i));
225+
226+
EXPECT_GE(output_data[i], 0.0);
227+
EXPECT_LE(output_data[i], 1.0);
228+
229+
EXPECT_NEAR(output_data[i], expected[i], 1e-5);
230+
}
231+
}
123232
}
124233

125234
TEST(pre_post_process, convert_element_type_and_scale) {

0 commit comments

Comments
 (0)