Skip to content

Commit 4e6a261

Browse files
jaeyun-jungmyungjoo
authored andcommitted
[Tizen/Api] basic implementation for single shot
Add base code for single shot model. Update .spec and add simple testcases. TODO: update name rules (function names and common util) Signed-off-by: Jaeyun Jung <jy1210.jung@samsung.com>
1 parent 01c5abb commit 4e6a261

File tree

7 files changed

+819
-33
lines changed

7 files changed

+819
-33
lines changed

packaging/nnstreamer.spec

+2
Original file line numberDiff line numberDiff line change
@@ -184,6 +184,7 @@ meson --buildtype=plain --prefix=%{_prefix} --sysconfdir=%{_sysconfdir} --libdir
184184
ninja -C build %{?_smp_mflags}
185185

186186
%if 0%{?unit_test}
187+
export NNSTREAMER_BUILD_ROOT_PATH=$(pwd)
187188
pushd build
188189
export GST_PLUGIN_PATH=$(pwd)/gst/nnstreamer
189190
export NNSTREAMER_CONF=$(pwd)/nnstreamer-test.ini
@@ -316,6 +317,7 @@ popd
316317

317318
%files -n capi-nnstreamer-devel
318319
%{_includedir}/nnstreamer/nnstreamer.h
320+
%{_includedir}/nnstreamer/nnstreamer-single.h
319321
%{_libdir}/pkgconfig/capi-nnstreamer.pc
320322
%{_libdir}/libcapi-nnstreamer.so
321323
%{_libdir}/libcapi-nnstreamer.a

tests/tizen_capi/unittest_tizen_capi.cpp

+223
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
*/
99

1010
#include <nnstreamer.h>
11+
#include <nnstreamer-single.h>
1112
#include <gtest/gtest.h>
1213
#include <glib.h>
1314
#include <glib/gstdio.h> /* GStatBuf */
@@ -982,6 +983,228 @@ TEST (nnstreamer_capi_switch, failure_01)
982983
g_free (pipeline);
983984
}
984985

986+
/**
987+
* @brief Test NNStreamer single shot
988+
*/
989+
TEST (nnstreamer_capi_singleshot, invoke_01)
990+
{
991+
ml_simpleshot_model_h model;
992+
nns_tensors_info_s in_info, out_info;
993+
nns_tensors_info_s in_res, out_res;
994+
tensor_data *input, *output1, *output2;
995+
int status;
996+
997+
const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH");
998+
gchar *test_model;
999+
1000+
memset (&in_info, 0, sizeof (nns_tensors_info_s));
1001+
memset (&out_info, 0, sizeof (nns_tensors_info_s));
1002+
memset (&in_res, 0, sizeof (nns_tensors_info_s));
1003+
memset (&out_res, 0, sizeof (nns_tensors_info_s));
1004+
1005+
ASSERT_TRUE (root_path != NULL);
1006+
test_model = g_build_filename (root_path, "tests", "test_models", "models",
1007+
"mobilenet_v1_1.0_224_quant.tflite", NULL);
1008+
1009+
in_info.num_tensors = 1;
1010+
in_info.info[0].type = NNS_TENSOR_TYPE_UINT8;
1011+
in_info.info[0].dimension[0] = 3;
1012+
in_info.info[0].dimension[1] = 224;
1013+
in_info.info[0].dimension[2] = 224;
1014+
in_info.info[0].dimension[3] = 1;
1015+
1016+
out_info.num_tensors = 1;
1017+
out_info.info[0].type = NNS_TENSOR_TYPE_UINT8;
1018+
out_info.info[0].dimension[0] = 1001;
1019+
out_info.info[0].dimension[1] = 1;
1020+
out_info.info[0].dimension[2] = 1;
1021+
out_info.info[0].dimension[3] = 1;
1022+
1023+
status = ml_model_open (test_model, &model, &in_info, &out_info,
1024+
ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
1025+
EXPECT_EQ (status, NNS_ERROR_NONE);
1026+
1027+
/* input tensor in filter */
1028+
status = ml_model_get_input_type (model, &in_res);
1029+
EXPECT_EQ (status, NNS_ERROR_NONE);
1030+
1031+
EXPECT_TRUE (in_info.num_tensors == in_res.num_tensors);
1032+
for (guint idx = 0; idx < in_res.num_tensors; idx++) {
1033+
EXPECT_TRUE (in_info.info[idx].type == in_res.info[idx].type);
1034+
EXPECT_TRUE (in_info.info[idx].dimension[0] == in_res.info[idx].dimension[0]);
1035+
EXPECT_TRUE (in_info.info[idx].dimension[1] == in_res.info[idx].dimension[1]);
1036+
EXPECT_TRUE (in_info.info[idx].dimension[2] == in_res.info[idx].dimension[2]);
1037+
EXPECT_TRUE (in_info.info[idx].dimension[3] == in_res.info[idx].dimension[3]);
1038+
}
1039+
1040+
/* output tensor in filter */
1041+
status = ml_model_get_output_type (model, &out_res);
1042+
EXPECT_EQ (status, NNS_ERROR_NONE);
1043+
1044+
EXPECT_TRUE (out_info.num_tensors == out_res.num_tensors);
1045+
for (guint idx = 0; idx < out_res.num_tensors; idx++) {
1046+
EXPECT_TRUE (out_info.info[idx].type == out_res.info[idx].type);
1047+
EXPECT_TRUE (out_info.info[idx].dimension[0] == out_res.info[idx].dimension[0]);
1048+
EXPECT_TRUE (out_info.info[idx].dimension[1] == out_res.info[idx].dimension[1]);
1049+
EXPECT_TRUE (out_info.info[idx].dimension[2] == out_res.info[idx].dimension[2]);
1050+
EXPECT_TRUE (out_info.info[idx].dimension[3] == out_res.info[idx].dimension[3]);
1051+
}
1052+
1053+
/* generate dummy data */
1054+
input = ml_model_allocate_tensor_data (&in_info);
1055+
EXPECT_TRUE (input != NULL);
1056+
1057+
output1 = ml_model_inference (model, input, NULL);
1058+
EXPECT_TRUE (output1 != NULL);
1059+
ml_model_free_tensor_data (output1);
1060+
1061+
output2 = ml_model_allocate_tensor_data (&out_info);
1062+
EXPECT_TRUE (output2 != NULL);
1063+
1064+
output1 = ml_model_inference (model, input, output2);
1065+
EXPECT_TRUE (output1 != NULL);
1066+
EXPECT_TRUE (output1 == output2);
1067+
ml_model_free_tensor_data (output2);
1068+
1069+
ml_model_free_tensor_data (input);
1070+
1071+
status = ml_model_close (model);
1072+
EXPECT_EQ (status, NNS_ERROR_NONE);
1073+
1074+
g_free (test_model);
1075+
}
1076+
1077+
/**
1078+
* @brief Test NNStreamer single shot
1079+
* @detail Start pipeline without tensor info
1080+
*/
1081+
TEST (nnstreamer_capi_singleshot, invoke_02)
1082+
{
1083+
ml_simpleshot_model_h model;
1084+
nns_tensors_info_s in_info, out_info;
1085+
tensor_data *input, *output1, *output2;
1086+
int status;
1087+
1088+
const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH");
1089+
gchar *test_model;
1090+
1091+
ASSERT_TRUE (root_path != NULL);
1092+
test_model = g_build_filename (root_path, "tests", "test_models", "models",
1093+
"mobilenet_v1_1.0_224_quant.tflite", NULL);
1094+
1095+
in_info.num_tensors = 1;
1096+
in_info.info[0].type = NNS_TENSOR_TYPE_UINT8;
1097+
in_info.info[0].dimension[0] = 3;
1098+
in_info.info[0].dimension[1] = 224;
1099+
in_info.info[0].dimension[2] = 224;
1100+
in_info.info[0].dimension[3] = 1;
1101+
1102+
out_info.num_tensors = 1;
1103+
out_info.info[0].type = NNS_TENSOR_TYPE_UINT8;
1104+
out_info.info[0].dimension[0] = 1001;
1105+
out_info.info[0].dimension[1] = 1;
1106+
out_info.info[0].dimension[2] = 1;
1107+
out_info.info[0].dimension[3] = 1;
1108+
1109+
status = ml_model_open (test_model, &model, NULL, NULL,
1110+
ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
1111+
EXPECT_EQ (status, NNS_ERROR_NONE);
1112+
1113+
/* generate dummy data */
1114+
input = ml_model_allocate_tensor_data (&in_info);
1115+
EXPECT_TRUE (input != NULL);
1116+
1117+
output1 = ml_model_inference (model, input, NULL);
1118+
EXPECT_TRUE (output1 != NULL);
1119+
ml_model_free_tensor_data (output1);
1120+
1121+
output2 = ml_model_allocate_tensor_data (&out_info);
1122+
EXPECT_TRUE (output2 != NULL);
1123+
1124+
output1 = ml_model_inference (model, input, output2);
1125+
EXPECT_TRUE (output1 != NULL);
1126+
EXPECT_TRUE (output1 == output2);
1127+
ml_model_free_tensor_data (output2);
1128+
1129+
ml_model_free_tensor_data (input);
1130+
1131+
status = ml_model_close (model);
1132+
EXPECT_EQ (status, NNS_ERROR_NONE);
1133+
1134+
g_free (test_model);
1135+
}
1136+
1137+
/**
1138+
* @brief Test NNStreamer single shot
1139+
* @detail Failure case with invalid param.
1140+
*/
1141+
TEST (nnstreamer_capi_singleshot, failure_01)
1142+
{
1143+
ml_simpleshot_model_h model;
1144+
nns_tensors_info_s in_info, out_info;
1145+
int status;
1146+
1147+
const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH");
1148+
gchar *test_model;
1149+
1150+
memset (&in_info, 0, sizeof (nns_tensors_info_s));
1151+
memset (&out_info, 0, sizeof (nns_tensors_info_s));
1152+
1153+
ASSERT_TRUE (root_path != NULL);
1154+
test_model = g_build_filename (root_path, "tests", "test_models", "models",
1155+
"mobilenet_v1_1.0_224_quant.tflite", NULL);
1156+
1157+
/* invalid file path */
1158+
status = ml_model_open ("wrong_file_name", &model, &in_info, &out_info,
1159+
ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
1160+
EXPECT_EQ (status, NNS_ERROR_INVALID_PARAMETER);
1161+
1162+
/* null file path */
1163+
status = ml_model_open (NULL, &model, &in_info, &out_info,
1164+
ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
1165+
EXPECT_EQ (status, NNS_ERROR_INVALID_PARAMETER);
1166+
1167+
/* invalid handle */
1168+
status = ml_model_open (test_model, NULL, &in_info, &out_info,
1169+
ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
1170+
EXPECT_EQ (status, NNS_ERROR_INVALID_PARAMETER);
1171+
1172+
/* invalid input tensor info */
1173+
status = ml_model_open (test_model, &model, &in_info, &out_info,
1174+
ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
1175+
EXPECT_EQ (status, NNS_ERROR_INVALID_PARAMETER);
1176+
1177+
in_info.num_tensors = 1;
1178+
in_info.info[0].type = NNS_TENSOR_TYPE_UINT8;
1179+
in_info.info[0].dimension[0] = 3;
1180+
in_info.info[0].dimension[1] = 224;
1181+
in_info.info[0].dimension[2] = 224;
1182+
in_info.info[0].dimension[3] = 1;
1183+
1184+
/* invalid output tensor info */
1185+
status = ml_model_open (test_model, &model, &in_info, &out_info,
1186+
ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
1187+
EXPECT_EQ (status, NNS_ERROR_INVALID_PARAMETER);
1188+
1189+
out_info.num_tensors = 1;
1190+
out_info.info[0].type = NNS_TENSOR_TYPE_UINT8;
1191+
out_info.info[0].dimension[0] = 1001;
1192+
out_info.info[0].dimension[1] = 1;
1193+
out_info.info[0].dimension[2] = 1;
1194+
out_info.info[0].dimension[3] = 1;
1195+
1196+
/* unknown fw type */
1197+
status = ml_model_open (test_model, &model, &in_info, &out_info,
1198+
ML_NNFW_UNKNOWN, ML_NNFW_HW_DO_NOT_CARE);
1199+
EXPECT_EQ (status, NNS_ERROR_NOT_SUPPORTED);
1200+
1201+
/* invalid handle */
1202+
status = ml_model_close (model);
1203+
EXPECT_EQ (status, NNS_ERROR_INVALID_PARAMETER);
1204+
1205+
g_free (test_model);
1206+
}
1207+
9851208
/**
9861209
* @brief Main gtest
9871210
*/

tizen-api/include/nnstreamer-single.h

+21-20
Original file line numberDiff line numberDiff line change
@@ -90,13 +90,14 @@ typedef enum {
9090
* @param[in] model_path This is the path to the neural network model file.
9191
* @param[out] model This is the model opened. Users are required to close
9292
* the given model with ml_model_close().
93-
* @param[in] inputtype This is required if the given model has flexible input
93+
* @param[in] input_type This is required if the given model has flexible input
9494
* dimension, where the input dimension MUST be given
9595
* before executing the model.
9696
* However, once it's given, the input dimension cannot
9797
* be changed for the given model handle.
98-
* Is is required by some custom filters of nnstreamer.
98+
* It is required by some custom filters of nnstreamer.
9999
* You may set NULL if it's not required.
100+
* @param[in] output_type This is required if the given model has flexible output dimension.
100101
* @param[in] nnfw The nerual network framework used to open the given
101102
* @model_path. Set ML_NNFW_UNKNOWN to let it auto-detect.
102103
* @param[in] hw Tell the corresponding @nnfw to use a specific hardware.
@@ -109,8 +110,8 @@ typedef enum {
109110
* same dimension.
110111
*/
111112
int ml_model_open (const char *model_path, ml_simpleshot_model_h *model,
112-
const nns_tensors_info_s *inputtype, ml_model_nnfw nnfw,
113-
ml_model_hw hw);
113+
const nns_tensors_info_s *input_type, const nns_tensors_info_s *output_type,
114+
ml_model_nnfw nnfw, ml_model_hw hw);
114115

115116
/**
116117
* @brief Close the opened model handle.
@@ -154,12 +155,12 @@ tensor_data * ml_model_inference (ml_simpleshot_model_h model,
154155
* types are available.
155156
* @since_tizen 5.5
156157
* @param[in] model The model to be investigated
157-
* @param[out] inputtype The type of input tensor.
158+
* @param[out] input_type The type of input tensor.
158159
* @return @c 0 on success. otherwise a negative error value
159160
* @retval #NNS_ERROR_NONE Successful
160161
*/
161-
int ml_model_get_inputtype (ml_simpleshot_model_h model,
162-
nns_tensors_info_s *inputtype);
162+
int ml_model_get_input_type (ml_simpleshot_model_h model,
163+
nns_tensors_info_s *input_type);
163164

164165
/**
165166
* @brief Get type (tensor dimension, type, name and so on) of output
@@ -171,52 +172,52 @@ int ml_model_get_inputtype (ml_simpleshot_model_h model,
171172
* types are available.
172173
* @since_tizen 5.5
173174
* @param[in] model The model to be investigated
174-
* @param[out] outputtype The type of output tensor.
175+
* @param[out] output_type The type of output tensor.
175176
* @return @c 0 on success. otherwise a negative error value
176177
* @retval #NNS_ERROR_NONE Successful
177178
*/
178-
int ml_model_get_outputtype (ml_simpleshot_model_h model,
179-
nns_tensors_info_s *outputtype);
179+
int ml_model_get_output_type (ml_simpleshot_model_h model,
180+
nns_tensors_info_s *output_type);
180181

181182
/**
182183
* @brief Get the byte size of the given tensor type.
183184
* @since_tizen 5.5
184-
* @param[in] tensor_type The tensor type to be investigated.
185+
* @param[in] info The tensor information to be investigated.
185186
* @return @c >= 0 on success with byte size. otherwise a negative error value
186187
*/
187-
int ml_model_get_tensor_size (const nns_tensor_info_s *tensor_type);
188+
size_t ml_util_get_tensor_size (const nns_tensor_info_s *info);
188189

189190
/**
190191
* @brief Get the byte size of the given tensors type.
191192
* @since_tizen 5.5
192-
* @param[in] tensors_type The tensors type to be investigated.
193+
* @param[in] info The tensors information to be investigated.
193194
* @return @c >= 0 on success with byte size. otherwise a negative error value
194195
*/
195-
int ml_model_get_tensors_size (const nns_tensors_info_s *tensors_type);
196+
size_t ml_util_get_tensors_size (const nns_tensors_info_s *info);
196197

197198
/**
198-
* @brief Free the tensors type pointer
199+
* @brief Free the tensors type pointer.
199200
* @since_tizen 5.5
200201
* @param[in] type the tensors type pointer to be freed.
201202
*/
202-
void ml_model_free_tensorsinfo (nns_tensors_info_s *type);
203+
void ml_model_free_tensors_info (nns_tensors_info_s *type);
203204

204205
/**
205-
* @brief Free the tensors data pointer
206+
* @brief Free the tensors data pointer.
206207
* @since_tizen 5.5
207208
* @param[in] tensor the tensors data pointer to be freed.
208209
*/
209-
void ml_model_free_tensordata (tensor_data *tensor);
210+
void ml_model_free_tensor_data (tensor_data *tensor);
210211

211212
/**
212213
* @brief Allocate a tensor data frame with the given tensors type.
213214
* @since_tizen 5.5
214-
* @param[in] type the tensors type pointer for the allocation
215+
* @param[in] info The tensors information for the allocation
215216
* @return @c Tensors data pointer allocated. Null if error.
216217
* @retval NULL there is an error. call get_last_result() to get specific
217218
* error numbers.
218219
*/
219-
tensor_data *ml_model_allocate_tensors (const nns_tensors_info_s *type);
220+
tensor_data *ml_model_allocate_tensor_data (const nns_tensors_info_s *info);
220221

221222
/**
222223
* @brief Check the availability of the given execution environments.

tizen-api/include/nnstreamer.h

+1
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,7 @@ typedef enum _nns_tensor_type_e
9999
NNS_TENSOR_TYPE_FLOAT32, /**< Float 32bit */
100100
NNS_TENSOR_TYPE_INT64, /**< Integer 64bit */
101101
NNS_TENSOR_TYPE_UINT64, /**< Unsigned integer 64bit */
102+
ML_TENSOR_TYPE_UNKNOWN /**< Unknown type */
102103
} nns_tensor_type_e;
103104

104105
/**

tizen-api/meson.build

+2
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,11 @@ endif
2020

2121
capi_main = []
2222
capi_main += join_paths(meson.current_source_dir(), 'src', 'tizen-api-pipeline.c')
23+
capi_main += join_paths(meson.current_source_dir(), 'src', 'nnstreamer-single.c')
2324

2425
capi_devel_main = []
2526
capi_devel_main += join_paths(meson.current_source_dir(), 'include', 'nnstreamer.h')
27+
capi_devel_main += join_paths(meson.current_source_dir(), 'include', 'nnstreamer-single.h')
2628

2729
inc = include_directories('include')
2830
nninc = include_directories('../gst')

0 commit comments

Comments
 (0)