From c9f97d20db5c776f924409157170c54ebc1af1e0 Mon Sep 17 00:00:00 2001 From: Anastasiya Pronina Date: Fri, 17 Jan 2025 16:55:14 +0000 Subject: [PATCH] Fixed coverity issues of Low impact --- .../intel_npu/src/plugin/npuw/base_sync_infer_request.cpp | 6 ++---- src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp | 6 +++--- .../intel_npu/src/plugin/npuw/llm_compiled_model.cpp | 8 ++++---- .../intel_npu/src/plugin/npuw/llm_infer_request.cpp | 4 ++-- src/plugins/intel_npu/src/plugin/npuw/serialization.hpp | 4 ++-- 5 files changed, 13 insertions(+), 15 deletions(-) diff --git a/src/plugins/intel_npu/src/plugin/npuw/base_sync_infer_request.cpp b/src/plugins/intel_npu/src/plugin/npuw/base_sync_infer_request.cpp index 0a6ecfa7d556bf..19305742fda68e 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/base_sync_infer_request.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/base_sync_infer_request.cpp @@ -504,8 +504,7 @@ void ov::npuw::IBaseInferRequest::dump_input_tensors(std::size_t idx) { // Note: // - _name is used for the user option (no leading 00s for indices) // - _path is used for disk dump (will have leading 00s for indices) - const auto comp_submodel_name = subgr_name(idx); - const auto comp_submodel_path = m_npuw_model->m_name + subgr_path_suffix(idx) + iter_path_suffix(idx); + const auto& comp_submodel_path = m_npuw_model->m_name + subgr_path_suffix(idx) + iter_path_suffix(idx); const auto num_inputs = comp_submodel->inputs().size(); // There's different approaches to dumping normal and spatial subgraphs. @@ -584,8 +583,7 @@ void ov::npuw::IBaseInferRequest::dump_output_tensors(std::size_t idx) { // - _name is used for the user option (no leading 00s for indices) // - _path is used for disk dump (will have leading 00s for indices) // FIXME: Duplication is evil - const auto comp_submodel_name = subgr_name(idx); - const auto comp_submodel_path = m_npuw_model->m_name + subgr_path_suffix(idx) + iter_path_suffix(idx); + const auto& comp_submodel_path = m_npuw_model->m_name + subgr_path_suffix(idx) + iter_path_suffix(idx); const std::size_t num_outputs = comp_submodel->outputs().size(); // Same approach as in above. Spatial tensors require special handling diff --git a/src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp b/src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp index f0d9950c2e3520..2bee8198c19448 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp @@ -695,7 +695,7 @@ std::shared_ptr ov::npuw::CompiledModel::deserialize( read(stream, key); ov::Any val; read_any(stream, val); - compiled->m_non_npuw_props[key] = val; + compiled->m_non_npuw_props[key] = std::move(val); } compiled->implement_properties(); @@ -831,7 +831,7 @@ void ov::npuw::CompiledModel::detach_memory() { std::string ov::npuw::CompiledModel::global_mem_device() const { // Force globally set device if set - const std::string device_alloc = m_cfg.get<::intel_npu::NPUW_WEIGHTS_BANK_ALLOC>(); + const std::string& device_alloc = m_cfg.get<::intel_npu::NPUW_WEIGHTS_BANK_ALLOC>(); if (!device_alloc.empty()) { return device_alloc; } @@ -852,7 +852,7 @@ std::string ov::npuw::CompiledModel::global_mem_device() const { std::string ov::npuw::CompiledModel::funcall_mem_device(const std::size_t idx) const { // Force globally set device if set - const std::string device_alloc = m_cfg.get<::intel_npu::NPUW_WEIGHTS_BANK_ALLOC>(); + const std::string& device_alloc = m_cfg.get<::intel_npu::NPUW_WEIGHTS_BANK_ALLOC>(); if (!device_alloc.empty()) { return device_alloc; } diff --git a/src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp b/src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp index 6e3cb68e2f1ad2..673d3c2cddbc8d 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp @@ -225,7 +225,7 @@ std::shared_ptr redirect_new_kv_to_output(const std::shared_ptr cvt_value_tensors_layout(std::shared_ptr model) { ov::preprocess::PrePostProcessor ppp(model); - for (auto tensor : model->outputs()) { + for (const auto& tensor : model->outputs()) { if (tensor.get_any_name().find("value") != std::string::npos) { // NB: [batch, num_heads, seq_len, emb_size] -> [batch, num_heads, emb_size, seq_len] ppp.output(tensor.get_any_name()).model().set_layout(ov::Layout("BHSE")); @@ -311,7 +311,7 @@ std::optional extract_npu_descriptor(const std::shared_ptrget_property(ov::device::architecture.name(), ov::AnyMap{}).as(); const int64_t max_tiles = plugin->get_property(ov::intel_npu::max_tiles.name(), ov::AnyMap{}).as(); bool compiler_dq = false; - const auto supported_properties = + const auto& supported_properties = plugin->get_property(ov::supported_properties.name(), ov::AnyMap{}).as>(); if (std::find(supported_properties.begin(), supported_properties.end(), "NPU_COMPILER_DYNAMIC_QUANTIZATION") != supported_properties.end()) { @@ -506,9 +506,9 @@ ov::npuw::LLMCompiledModel::LLMCompiledModel(const std::shared_ptr& m generate_config_opt.value_or(get_default_generate_config(kvcache_model, npudesc, generate_hint)) .as(); - auto prefill_config_addition_value = + const auto& prefill_config_addition_value = prefill_config_addition.has_value() ? prefill_config_addition.value().as() : ov::AnyMap{}; - auto generate_config_addition_value = + const auto& generate_config_addition_value = generate_config_addition.has_value() ? generate_config_addition.value().as() : ov::AnyMap{}; merge_config_with(prefill_config, other_props); diff --git a/src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp b/src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp index 2e987036483e34..f7cd649f20b5b9 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp @@ -78,8 +78,8 @@ void copy_columns_by_row_chunks(ov::SoPtr src, ov::SoPtrget_shape()); OPENVINO_ASSERT(src->get_byte_size() == dst->get_byte_size()); - const auto src_strides = src->get_strides(); - const auto dst_strides = dst->get_strides(); + const auto& src_strides = src->get_strides(); + const auto& dst_strides = dst->get_strides(); const auto elem_size = src->get_byte_size() / src->get_size(); const auto C = src_shape[1]; diff --git a/src/plugins/intel_npu/src/plugin/npuw/serialization.hpp b/src/plugins/intel_npu/src/plugin/npuw/serialization.hpp index 170631f644da12..bdfb7cfa10d1b6 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/serialization.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/serialization.hpp @@ -159,7 +159,7 @@ void read(std::istream& stream, std::vector& var) { for (std::size_t i = 0; i < var_size; ++i) { T elem; read(stream, elem); - var.push_back(elem); + var.push_back(std::move(elem)); } } @@ -180,7 +180,7 @@ void read(std::istream& stream, std::unordered_set& var) { for (std::size_t i = 0; i < var_size; ++i) { T elem; read(stream, elem); - var.insert(elem); + var.insert(std::move(elem)); } }