2
2
// SPDX-License-Identifier: Apache-2.0
3
3
//
4
4
5
+ #include < openvino/runtime/core.hpp>
6
+
5
7
#include " common_test_utils/test_assertions.hpp"
6
8
#include " common_test_utils/test_tools.hpp"
7
9
#include " gtest/gtest.h"
8
10
#include " openvino/core/except.hpp"
11
+ #include " openvino/core/model.hpp"
9
12
#include " openvino/core/preprocess/pre_post_process.hpp"
13
+ #include " openvino/core/shape.hpp"
14
+ #include " openvino/core/type.hpp"
10
15
#include " openvino/opsets/opset8.hpp"
11
16
#include " openvino/util/common_util.hpp"
12
17
#include " preprocess/color_utils.hpp"
13
- #include " openvino/core/shape.hpp"
14
- #include " openvino/core/type.hpp"
15
- #include " openvino/core/model.hpp"
16
18
17
19
using namespace ov ;
18
20
using namespace ov ::preprocess;
@@ -29,7 +31,10 @@ static std::shared_ptr<Model> create_simple_function(element::Type type, const P
29
31
res->get_output_tensor (0 ).set_names ({" tensor_output1" });
30
32
return std::make_shared<Model>(ResultVector{res}, ParameterVector{data1});
31
33
}
32
- static std::shared_ptr<Model> create_clamp_function (element::Type type, const PartialShape& shape, float min_value, float max_value) {
34
+ static std::shared_ptr<Model> create_clamp_function (element::Type type,
35
+ const PartialShape& shape,
36
+ float min_value,
37
+ float max_value) {
33
38
auto data = std::make_shared<op::v0::Parameter>(type, shape);
34
39
data->set_friendly_name (" input" );
35
40
data->get_output_tensor (0 ).set_names ({" tensor_input" });
@@ -45,7 +50,6 @@ static std::shared_ptr<Model> create_clamp_function(element::Type type, const Pa
45
50
return std::make_shared<Model>(ResultVector{result}, ParameterVector{data});
46
51
}
47
52
48
-
49
53
static std::shared_ptr<Model> create_trivial (element::Type type, const PartialShape& shape) {
50
54
auto data1 = std::make_shared<op::v0::Parameter>(type, shape);
51
55
data1->set_friendly_name (" input1" );
@@ -110,16 +114,121 @@ TEST(pre_post_process, simple_clamp_f16) {
110
114
auto f = create_clamp_function (element::f16, Shape{1 , 3 , 2 , 2 }, 0 .0f , 1 .0f );
111
115
auto p = PrePostProcessor (f);
112
116
p.input (" tensor_input" ).preprocess ().clamp (0 .0f , 1 .0f );
117
+ p.output (" tensor_output" ).postprocess ().clamp (0 .0f , 1 .0f );
113
118
f = p.build ();
114
119
EXPECT_EQ (f->get_output_element_type (0 ), element::f16);
120
+
121
+ EXPECT_EQ (f->input ().get_shape (), (Shape{1 , 3 , 2 , 2 }));
122
+ EXPECT_EQ (f->output ().get_shape (), (Shape{1 , 3 , 2 , 2 }));
115
123
}
116
124
117
125
TEST (pre_post_process, simple_clamp_f64) {
118
126
auto f = create_clamp_function (element::f64, Shape{1 , 3 , 2 , 2 }, 0 .0f , 1 .0f );
119
127
auto p = PrePostProcessor (f);
120
128
p.input (" tensor_input" ).preprocess ().clamp (0 .0f , 1 .0f );
129
+ p.output (" tensor_output" ).postprocess ().clamp (0 .0f , 1 .0f );
121
130
f = p.build ();
122
131
EXPECT_EQ (f->get_output_element_type (0 ), element::f64);
132
+
133
+ EXPECT_EQ (f->input ().get_shape (), (Shape{1 , 3 , 2 , 2 }));
134
+ EXPECT_EQ (f->output ().get_shape (), (Shape{1 , 3 , 2 , 2 }));
135
+ }
136
+
137
+ class PreprocessClampTest : public ::testing::Test {
138
+ protected:
139
+ std::shared_ptr<Model> model;
140
+ std::shared_ptr<op::v0::Parameter> input;
141
+
142
+ void SetUp () override {
143
+ input = std::make_shared<op::v0::Parameter>(element::f64, Shape{1 , 3 , 2 , 2 });
144
+ input->set_friendly_name (" input" );
145
+ input->get_output_tensor (0 ).set_names ({" tensor_input" });
146
+
147
+ auto clamp_op = std::make_shared<op::v0::Clamp>(input, 0.0 , 1.0 );
148
+ clamp_op->set_friendly_name (" Clamp" );
149
+ clamp_op->get_output_tensor (0 ).set_names ({" tensor_clamp" });
150
+
151
+ auto result = std::make_shared<op::v0::Result>(clamp_op);
152
+ result->set_friendly_name (" Result" );
153
+ result->get_output_tensor (0 ).set_names ({" tensor_output" });
154
+
155
+ model = std::make_shared<Model>(ResultVector{result}, ParameterVector{input});
156
+ }
157
+ };
158
+
159
+ TEST_F (PreprocessClampTest, clamp_operation_on_input) {
160
+ auto p = PrePostProcessor (model);
161
+ p.input (" tensor_input" ).preprocess ().clamp (0.0 , 1.0 );
162
+ model = p.build ();
163
+
164
+ // Create input data with values outside the clamp range
165
+ std::vector<double > input_data = {-1.0 , 0.5 , 2.0 , -0.5 , 1.5 , 0.0 , 1.0 , 0.8 , -0.2 , 1.2 , 0.3 , 0.7 };
166
+ std::vector<double > expected = {0.0 , 0.5 , 1.0 , 0.0 , 1.0 , 0.0 , 1.0 , 0.8 , 0.0 , 1.0 , 0.3 , 0.7 };
167
+ auto input_tensor = ov::Tensor (element::f64, Shape{1 , 3 , 2 , 2 }, input_data.data ());
168
+
169
+ // Create an inference request
170
+ ov::Core core;
171
+ auto compiled_model = core.compile_model (model, " CPU" );
172
+ auto infer_request = compiled_model.create_infer_request ();
173
+
174
+ // Set input tensor
175
+ infer_request.set_tensor (" tensor_input" , input_tensor);
176
+
177
+ // Run inference
178
+ infer_request.infer ();
179
+ {
180
+ // Get input tensor
181
+ auto input_tensor = infer_request.get_tensor (" tensor_input" );
182
+ auto input_data = input_tensor.data <double >();
183
+
184
+ // Check if the input data is within the clamp range
185
+ for (size_t i = 0 ; i < input_tensor.get_size (); ++i) {
186
+ SCOPED_TRACE (" i = " + std::to_string (i));
187
+ EXPECT_GE (input_data[i], 0.0 );
188
+ EXPECT_LE (input_data[i], 1.0 );
189
+
190
+ EXPECT_NEAR (input_data[i], expected[i], 1e-5 );
191
+ }
192
+ }
193
+ }
194
+
195
+ TEST_F (PreprocessClampTest, clamp_operation_on_output) {
196
+ auto p = PrePostProcessor (model);
197
+ p.output (" tensor_output" ).postprocess ().clamp (0.0 , 1.0 );
198
+ model = p.build ();
199
+
200
+ // Create input data with values outside the clamp range
201
+ std::vector<double > input_data = {-1.0 , 0.5 , 2.0 , -0.5 , 1.5 , 0.0 , 1.0 , 0.8 , -0.2 , 1.2 , 0.3 , 0.7 };
202
+ std::vector<double > expected = {0.0 , 0.5 , 1.0 , 0.0 , 1.0 , 0.0 , 1.0 , 0.8 , 0.0 , 1.0 , 0.3 , 0.7 };
203
+
204
+ auto input_tensor = ov::Tensor (element::f64, Shape{1 , 3 , 2 , 2 }, input_data.data ());
205
+
206
+ // Create an inference request
207
+ ov::Core core;
208
+ auto compiled_model = core.compile_model (model, " CPU" );
209
+ auto infer_request = compiled_model.create_infer_request ();
210
+
211
+ // Set input tensor
212
+ infer_request.set_tensor (" tensor_input" , input_tensor);
213
+
214
+ // Run inference
215
+ infer_request.infer ();
216
+
217
+ {
218
+ // Get output tensor
219
+ auto output_tensor = infer_request.get_tensor (" tensor_output" );
220
+ auto output_data = output_tensor.data <double >();
221
+
222
+ // Check if the output data is within the clamp range
223
+ for (size_t i = 0 ; i < output_tensor.get_size (); ++i) {
224
+ SCOPED_TRACE (" i = " + std::to_string (i));
225
+
226
+ EXPECT_GE (output_data[i], 0.0 );
227
+ EXPECT_LE (output_data[i], 1.0 );
228
+
229
+ EXPECT_NEAR (output_data[i], expected[i], 1e-5 );
230
+ }
231
+ }
123
232
}
124
233
125
234
TEST (pre_post_process, convert_element_type_and_scale) {
0 commit comments