Skip to content

Commit fd793e4

Browse files
committed
Refactor PreprocessClampTest
1 parent 64fe5ae commit fd793e4

File tree

1 file changed

+25
-68
lines changed

1 file changed

+25
-68
lines changed

src/core/tests/preprocess.cpp

+25-68
Original file line numberDiff line numberDiff line change
@@ -159,94 +159,51 @@ class PreprocessClampTest : public ::testing::Test {
159159
std::shared_ptr<op::v0::Parameter> input;
160160

161161
void SetUp() override {
162-
input = std::make_shared<op::v0::Parameter>(element::f64, Shape{1, 3, 2, 2});
162+
input = std::make_shared<op::v0::Parameter>(element::f64, Shape{1});
163163
input->set_friendly_name("input");
164164
input->get_output_tensor(0).set_names({"tensor_input"});
165165

166-
auto clamp_op = std::make_shared<op::v0::Clamp>(input, 0.0, 1.0);
167-
clamp_op->set_friendly_name("Clamp");
168-
clamp_op->get_output_tensor(0).set_names({"tensor_clamp"});
166+
auto add_op = std::make_shared<op::v1::Add>(input, input);
167+
add_op->set_friendly_name("Add");
168+
add_op->get_output_tensor(0).set_names({"tensor_add"});
169169

170-
auto result = std::make_shared<op::v0::Result>(clamp_op);
170+
auto result = std::make_shared<op::v0::Result>(add_op);
171171
result->set_friendly_name("Result");
172172
result->get_output_tensor(0).set_names({"tensor_output"});
173173

174174
model = std::make_shared<Model>(ResultVector{result}, ParameterVector{input});
175175
}
176176
};
177177

178-
TEST_F(PreprocessClampTest, clamp_operation_on_input) {
178+
TEST_F(PreprocessClampTest, clamp_operation_on_input_preprocess) {
179+
{
180+
auto input_node = model->get_parameters().front();
181+
auto connected_node = input_node->output(0).get_target_inputs().begin()->get_node();
182+
EXPECT_STREQ(connected_node->get_type_name(), "Add");
183+
}
179184
auto p = PrePostProcessor(model);
180-
p.input("tensor_input").preprocess().clamp(0.0, 1.0);
185+
p.input().preprocess().clamp(0.0, 1.0);
181186
model = p.build();
182-
183-
// Create input data with values outside the clamp range
184-
std::vector<double> input_data = {-1.0, 0.5, 2.0, -0.5, 1.5, 0.0, 1.0, 0.8, -0.2, 1.2, 0.3, 0.7};
185-
std::vector<double> expected = {0.0, 0.5, 1.0, 0.0, 1.0, 0.0, 1.0, 0.8, 0.0, 1.0, 0.3, 0.7};
186-
auto input_tensor = ov::Tensor(element::f64, Shape{1, 3, 2, 2}, input_data.data());
187-
188-
// Create an inference request
189-
ov::Core core;
190-
auto compiled_model = core.compile_model(model, "CPU");
191-
auto infer_request = compiled_model.create_infer_request();
192-
193-
// Set input tensor
194-
infer_request.set_tensor("tensor_input", input_tensor);
195-
196-
// Run inference
197-
infer_request.infer();
198187
{
199-
// Get input tensor
200-
auto input_tensor = infer_request.get_tensor("tensor_input");
201-
auto input_data = input_tensor.data<double>();
202-
203-
// Check if the input data is within the clamp range
204-
for (size_t i = 0; i < input_tensor.get_size(); ++i) {
205-
SCOPED_TRACE("i = " + std::to_string(i));
206-
EXPECT_GE(input_data[i], 0.0);
207-
EXPECT_LE(input_data[i], 1.0);
208-
209-
EXPECT_NEAR(input_data[i], expected[i], 1e-5);
210-
}
188+
auto input_node = model->get_parameters().front();
189+
auto connected_node = input_node->output(0).get_target_inputs().begin()->get_node();
190+
EXPECT_STREQ(connected_node->get_type_name(), "Clamp");
211191
}
212192
}
213193

214-
TEST_F(PreprocessClampTest, clamp_operation_on_output) {
194+
TEST_F(PreprocessClampTest, clamp_operation_on_output_postprocess) {
195+
{
196+
auto result_node = model->get_results().front();
197+
auto connected_node = result_node->input_value(0).get_node_shared_ptr();
198+
EXPECT_STREQ(connected_node->get_type_name(), "Add");
199+
}
215200
auto p = PrePostProcessor(model);
216-
p.output("tensor_output").postprocess().clamp(0.0, 1.0);
201+
p.output().postprocess().clamp(0.0, 1.0);
217202
model = p.build();
218-
219-
// Create input data with values outside the clamp range
220-
std::vector<double> input_data = {-1.0, 0.5, 2.0, -0.5, 1.5, 0.0, 1.0, 0.8, -0.2, 1.2, 0.3, 0.7};
221-
std::vector<double> expected = {0.0, 0.5, 1.0, 0.0, 1.0, 0.0, 1.0, 0.8, 0.0, 1.0, 0.3, 0.7};
222-
223-
auto input_tensor = ov::Tensor(element::f64, Shape{1, 3, 2, 2}, input_data.data());
224-
225-
// Create an inference request
226-
ov::Core core;
227-
auto compiled_model = core.compile_model(model, "CPU");
228-
auto infer_request = compiled_model.create_infer_request();
229-
230-
// Set input tensor
231-
infer_request.set_tensor("tensor_input", input_tensor);
232-
233-
// Run inference
234-
infer_request.infer();
235-
236203
{
237-
// Get output tensor
238-
auto output_tensor = infer_request.get_tensor("tensor_output");
239-
auto output_data = output_tensor.data<double>();
240-
241-
// Check if the output data is within the clamp range
242-
for (size_t i = 0; i < output_tensor.get_size(); ++i) {
243-
SCOPED_TRACE("i = " + std::to_string(i));
244-
245-
EXPECT_GE(output_data[i], 0.0);
246-
EXPECT_LE(output_data[i], 1.0);
247-
248-
EXPECT_NEAR(output_data[i], expected[i], 1e-5);
249-
}
204+
auto result_node = model->get_results().front();
205+
auto connected_node = result_node->input_value(0).get_node_shared_ptr();
206+
EXPECT_STREQ(connected_node->get_type_name(), "Clamp");
250207
}
251208
}
252209

0 commit comments

Comments
 (0)