@@ -280,65 +280,88 @@ ov::Any CompiledModel::get_property(const std::string& name) const {
280
280
// @todo Does not seem ok to 'dump()' the whole graph everytime in order to get a name
281
281
const std::string modelName = graph.dump ()->get_friendly_name ();
282
282
return decltype (ov::model_name)::value_type (modelName);
283
- } else if (name == ov::optimal_number_of_infer_requests) {
283
+ }
284
+ if (name == ov::optimal_number_of_infer_requests) {
284
285
const auto streams = config.streamExecutorConfig .get_streams ();
285
286
return static_cast <decltype (ov::optimal_number_of_infer_requests)::value_type>(
286
287
streams > 0 ? streams : 1 ); // ov::optimal_number_of_infer_requests has no negative values
287
- } else if (name == ov::num_streams) {
288
+ }
289
+ if (name == ov::num_streams) {
288
290
const auto streams = config.streamExecutorConfig .get_streams ();
289
291
return decltype (ov::num_streams)::value_type (
290
292
streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2)
291
- } else if (name == ov::inference_num_threads) {
293
+ }
294
+ if (name == ov::inference_num_threads) {
292
295
const auto num_threads = config.streamExecutorConfig .get_threads ();
293
296
return static_cast <decltype (ov::inference_num_threads)::value_type>(num_threads);
294
- } else if (name == ov::enable_profiling.name ()) {
297
+ }
298
+ if (name == ov::enable_profiling.name ()) {
295
299
const bool perfCount = config.collectPerfCounters ;
296
300
return static_cast <decltype (ov::enable_profiling)::value_type>(perfCount);
297
- } else if (name == ov::hint::inference_precision) {
301
+ }
302
+ if (name == ov::hint::inference_precision) {
298
303
return decltype (ov::hint::inference_precision)::value_type (config.inferencePrecision );
299
- } else if (name == ov::hint::performance_mode) {
304
+ }
305
+ if (name == ov::hint::performance_mode) {
300
306
return static_cast <decltype (ov::hint::performance_mode)::value_type>(config.hintPerfMode );
301
- } else if (name == ov::log ::level) {
307
+ }
308
+ if (name == ov::log ::level) {
302
309
return static_cast <decltype (ov::log ::level)::value_type>(config.logLevel );
303
- } else if (name == ov::hint::enable_cpu_pinning.name ()) {
310
+ }
311
+ if (name == ov::hint::enable_cpu_pinning.name ()) {
304
312
const bool use_pin = config.enableCpuPinning ;
305
313
return static_cast <decltype (ov::hint::enable_cpu_pinning)::value_type>(use_pin);
306
- } else if (name == ov::hint::enable_cpu_reservation.name ()) {
314
+ }
315
+ if (name == ov::hint::enable_cpu_reservation.name ()) {
307
316
const bool use_reserve = config.enableCpuReservation ;
308
317
return static_cast <decltype (ov::hint::enable_cpu_reservation)::value_type>(use_reserve);
309
- } else if (name == ov::hint::scheduling_core_type) {
318
+ }
319
+ if (name == ov::hint::scheduling_core_type) {
310
320
const auto stream_mode = config.schedulingCoreType ;
311
321
return stream_mode;
312
- } else if (name == ov::hint::model_distribution_policy) {
322
+ }
323
+ if (name == ov::hint::model_distribution_policy) {
313
324
const auto & distribution_policy = config.modelDistributionPolicy ;
314
325
return distribution_policy;
315
- } else if (name == ov::hint::enable_hyper_threading.name ()) {
326
+ }
327
+ if (name == ov::hint::enable_hyper_threading.name ()) {
316
328
const bool use_ht = config.enableHyperThreading ;
317
329
return static_cast <decltype (ov::hint::enable_hyper_threading)::value_type>(use_ht);
318
- } else if (name == ov::hint::execution_mode) {
330
+ }
331
+ if (name == ov::hint::execution_mode) {
319
332
return config.executionMode ;
320
- } else if (name == ov::hint::num_requests) {
333
+ }
334
+ if (name == ov::hint::num_requests) {
321
335
return static_cast <decltype (ov::hint::num_requests)::value_type>(config.hintNumRequests );
322
- } else if (name == ov::execution_devices) {
336
+ }
337
+ if (name == ov::execution_devices) {
323
338
return decltype (ov::execution_devices)::value_type{m_plugin->get_device_name ()};
324
- } else if (name == ov::intel_cpu::denormals_optimization) {
339
+ }
340
+ if (name == ov::intel_cpu::denormals_optimization) {
325
341
return static_cast <decltype (ov::intel_cpu::denormals_optimization)::value_type>(
326
342
config.denormalsOptMode == Config::DenormalsOptMode::DO_On);
327
- } else if (name == ov::intel_cpu::sparse_weights_decompression_rate) {
343
+ }
344
+ if (name == ov::intel_cpu::sparse_weights_decompression_rate) {
328
345
return static_cast <decltype (ov::intel_cpu::sparse_weights_decompression_rate)::value_type>(
329
346
config.fcSparseWeiDecompressionRate );
330
- } else if (name == ov::hint::dynamic_quantization_group_size) {
347
+ }
348
+ if (name == ov::hint::dynamic_quantization_group_size) {
331
349
return static_cast <decltype (ov::hint::dynamic_quantization_group_size)::value_type>(
332
350
config.fcDynamicQuantizationGroupSize );
333
- } else if (name == ov::hint::kv_cache_precision) {
351
+ }
352
+ if (name == ov::hint::kv_cache_precision) {
334
353
return decltype (ov::hint::kv_cache_precision)::value_type (config.kvCachePrecision );
335
- } else if (name == ov::key_cache_precision) {
354
+ }
355
+ if (name == ov::key_cache_precision) {
336
356
return decltype (ov::key_cache_precision)::value_type (config.keyCachePrecision );
337
- } else if (name == ov::value_cache_precision) {
357
+ }
358
+ if (name == ov::value_cache_precision) {
338
359
return decltype (ov::value_cache_precision)::value_type (config.valueCachePrecision );
339
- } else if (name == ov::key_cache_group_size) {
360
+ }
361
+ if (name == ov::key_cache_group_size) {
340
362
return static_cast <decltype (ov::key_cache_group_size)::value_type>(config.keyCacheGroupSize );
341
- } else if (name == ov::value_cache_group_size) {
363
+ }
364
+ if (name == ov::value_cache_group_size) {
342
365
return static_cast <decltype (ov::value_cache_group_size)::value_type>(config.valueCacheGroupSize );
343
366
}
344
367
OPENVINO_THROW (" Unsupported property: " , name);
0 commit comments