Skip to content

Commit

Permalink
Fixed coverity issues of Low impact
Browse files Browse the repository at this point in the history
  • Loading branch information
AsyaPronina committed Jan 17, 2025
1 parent 83c0474 commit c9f97d2
Show file tree
Hide file tree
Showing 5 changed files with 13 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -504,8 +504,7 @@ void ov::npuw::IBaseInferRequest::dump_input_tensors(std::size_t idx) {
// Note:
// - _name is used for the user option (no leading 00s for indices)
// - _path is used for disk dump (will have leading 00s for indices)
const auto comp_submodel_name = subgr_name(idx);
const auto comp_submodel_path = m_npuw_model->m_name + subgr_path_suffix(idx) + iter_path_suffix(idx);
const auto& comp_submodel_path = m_npuw_model->m_name + subgr_path_suffix(idx) + iter_path_suffix(idx);
const auto num_inputs = comp_submodel->inputs().size();

// There's different approaches to dumping normal and spatial subgraphs.
Expand Down Expand Up @@ -584,8 +583,7 @@ void ov::npuw::IBaseInferRequest::dump_output_tensors(std::size_t idx) {
// - _name is used for the user option (no leading 00s for indices)
// - _path is used for disk dump (will have leading 00s for indices)
// FIXME: Duplication is evil
const auto comp_submodel_name = subgr_name(idx);
const auto comp_submodel_path = m_npuw_model->m_name + subgr_path_suffix(idx) + iter_path_suffix(idx);
const auto& comp_submodel_path = m_npuw_model->m_name + subgr_path_suffix(idx) + iter_path_suffix(idx);
const std::size_t num_outputs = comp_submodel->outputs().size();

// Same approach as in above. Spatial tensors require special handling
Expand Down
6 changes: 3 additions & 3 deletions src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -695,7 +695,7 @@ std::shared_ptr<ov::npuw::CompiledModel> ov::npuw::CompiledModel::deserialize(
read(stream, key);
ov::Any val;
read_any(stream, val);
compiled->m_non_npuw_props[key] = val;
compiled->m_non_npuw_props[key] = std::move(val);
}
compiled->implement_properties();

Expand Down Expand Up @@ -831,7 +831,7 @@ void ov::npuw::CompiledModel::detach_memory() {

std::string ov::npuw::CompiledModel::global_mem_device() const {
// Force globally set device if set
const std::string device_alloc = m_cfg.get<::intel_npu::NPUW_WEIGHTS_BANK_ALLOC>();
const std::string& device_alloc = m_cfg.get<::intel_npu::NPUW_WEIGHTS_BANK_ALLOC>();
if (!device_alloc.empty()) {
return device_alloc;
}
Expand All @@ -852,7 +852,7 @@ std::string ov::npuw::CompiledModel::global_mem_device() const {

std::string ov::npuw::CompiledModel::funcall_mem_device(const std::size_t idx) const {
// Force globally set device if set
const std::string device_alloc = m_cfg.get<::intel_npu::NPUW_WEIGHTS_BANK_ALLOC>();
const std::string& device_alloc = m_cfg.get<::intel_npu::NPUW_WEIGHTS_BANK_ALLOC>();
if (!device_alloc.empty()) {
return device_alloc;
}
Expand Down
8 changes: 4 additions & 4 deletions src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ std::shared_ptr<ov::Model> redirect_new_kv_to_output(const std::shared_ptr<ov::M

std::shared_ptr<ov::Model> cvt_value_tensors_layout(std::shared_ptr<ov::Model> model) {
ov::preprocess::PrePostProcessor ppp(model);
for (auto tensor : model->outputs()) {
for (const auto& tensor : model->outputs()) {
if (tensor.get_any_name().find("value") != std::string::npos) {
// NB: [batch, num_heads, seq_len, emb_size] -> [batch, num_heads, emb_size, seq_len]
ppp.output(tensor.get_any_name()).model().set_layout(ov::Layout("BHSE"));
Expand Down Expand Up @@ -311,7 +311,7 @@ std::optional<NPUDesc> extract_npu_descriptor(const std::shared_ptr<const ov::IP
const std::string arch = plugin->get_property(ov::device::architecture.name(), ov::AnyMap{}).as<std::string>();
const int64_t max_tiles = plugin->get_property(ov::intel_npu::max_tiles.name(), ov::AnyMap{}).as<int64_t>();
bool compiler_dq = false;
const auto supported_properties =
const auto& supported_properties =
plugin->get_property(ov::supported_properties.name(), ov::AnyMap{}).as<std::vector<ov::PropertyName>>();
if (std::find(supported_properties.begin(), supported_properties.end(), "NPU_COMPILER_DYNAMIC_QUANTIZATION") !=
supported_properties.end()) {
Expand Down Expand Up @@ -506,9 +506,9 @@ ov::npuw::LLMCompiledModel::LLMCompiledModel(const std::shared_ptr<ov::Model>& m
generate_config_opt.value_or(get_default_generate_config(kvcache_model, npudesc, generate_hint))
.as<ov::AnyMap>();

auto prefill_config_addition_value =
const auto& prefill_config_addition_value =
prefill_config_addition.has_value() ? prefill_config_addition.value().as<ov::AnyMap>() : ov::AnyMap{};
auto generate_config_addition_value =
const auto& generate_config_addition_value =
generate_config_addition.has_value() ? generate_config_addition.value().as<ov::AnyMap>() : ov::AnyMap{};

merge_config_with(prefill_config, other_props);
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ void copy_columns_by_row_chunks(ov::SoPtr<ov::ITensor> src, ov::SoPtr<ov::ITenso
OPENVINO_ASSERT(src_shape == dst->get_shape());
OPENVINO_ASSERT(src->get_byte_size() == dst->get_byte_size());

const auto src_strides = src->get_strides();
const auto dst_strides = dst->get_strides();
const auto& src_strides = src->get_strides();
const auto& dst_strides = dst->get_strides();
const auto elem_size = src->get_byte_size() / src->get_size();

const auto C = src_shape[1];
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_npu/src/plugin/npuw/serialization.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ void read(std::istream& stream, std::vector<T>& var) {
for (std::size_t i = 0; i < var_size; ++i) {
T elem;
read(stream, elem);
var.push_back(elem);
var.push_back(std::move(elem));
}
}

Expand All @@ -180,7 +180,7 @@ void read(std::istream& stream, std::unordered_set<T>& var) {
for (std::size_t i = 0; i < var_size; ++i) {
T elem;
read(stream, elem);
var.insert(elem);
var.insert(std::move(elem));
}
}

Expand Down

0 comments on commit c9f97d2

Please sign in to comment.