Skip to content

Commit 07ecdf0

Browse files
authoredDec 5, 2024
Fix for coverity issues of medium and low impact (openvinotoolkit#27916)
### Details: - *Fixed "Data race condition" (Medium), "COPY_INSTEAD_OF_MOVE" and "Use of auto that causes a copy" (Low) issues* - *...* ### Tickets: - *EISW-149544*
1 parent 64b1f2a commit 07ecdf0

File tree

4 files changed

+18
-13
lines changed

4 files changed

+18
-13
lines changed
 

‎src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -30,13 +30,13 @@ std::shared_ptr<ov::Model> redirect_new_kv_to_output(const std::shared_ptr<ov::M
3030
std::shared_ptr<ov::Model> cvt_kvcache_to_fp16(const std::shared_ptr<ov::Model>& model) {
3131
ov::preprocess::PrePostProcessor ppp(model);
3232

33-
for (auto tensor : model->inputs()) {
33+
for (const auto& tensor : model->inputs()) {
3434
if (tensor.get_any_name().find("past_key") != std::string::npos) {
3535
ppp.input(tensor.get_any_name()).tensor().set_element_type(ov::element::Type_t::f16);
3636
}
3737
}
3838

39-
for (auto tensor : model->outputs()) {
39+
for (const auto& tensor : model->outputs()) {
4040
if (tensor.get_any_name().find("present") != std::string::npos) {
4141
ppp.output(tensor.get_any_name()).tensor().set_element_type(ov::element::Type_t::f16);
4242
}
@@ -55,7 +55,7 @@ void reshape_to_static(std::shared_ptr<ov::Model> model,
5555
const uint32_t kvcache_size,
5656
const KVAxesPosition& kv_axes_position) {
5757
std::map<std::string, ov::PartialShape> new_shapes;
58-
for (auto input : model->inputs()) {
58+
for (const auto& input : model->inputs()) {
5959
const auto& input_name = input.get_any_name();
6060
ov::PartialShape new_shape;
6161
if (input_name.find("input_ids") != std::string::npos) {
@@ -275,7 +275,7 @@ ov::npuw::LLMCompiledModel::LLMCompiledModel(const std::shared_ptr<ov::Model>& m
275275

276276
auto npudesc = extract_npu_descriptor(plugin);
277277

278-
ov::AnyMap properties_copy = other_props;
278+
ov::AnyMap properties_copy = std::move(other_props);
279279
auto prefill_config = get_default_prefill_config(model, npudesc);
280280
// NB: GENERATE_HINT is only applicable for default generate config!
281281
const ::intel_npu::npuw::llm::GenerateHint generate_hint = m_cfg.get<::intel_npu::NPUW_LLM_GENERATE_HINT>();

‎src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -36,17 +36,17 @@ ov::npuw::LLMInferRequest::LLMInferRequest(const std::shared_ptr<ov::npuw::LLMCo
3636
m_kvcache_request = compiled_model->m_kvcache_compiled->create_infer_request();
3737
m_prefill_request = compiled_model->m_prefill_compiled->create_infer_request();
3838

39-
for (auto input_port : m_prefill_request->get_compiled_model()->inputs()) {
39+
for (const auto& input_port : m_prefill_request->get_compiled_model()->inputs()) {
4040
m_prefill_in_ports.emplace(input_port.get_any_name(), input_port);
4141
}
42-
for (auto output_port : m_prefill_request->get_compiled_model()->outputs()) {
42+
for (const auto& output_port : m_prefill_request->get_compiled_model()->outputs()) {
4343
m_prefill_out_ports.emplace(output_port.get_any_name(), output_port);
4444
}
4545

46-
for (auto input_port : m_kvcache_request->get_compiled_model()->inputs()) {
46+
for (const auto& input_port : m_kvcache_request->get_compiled_model()->inputs()) {
4747
m_kvcache_in_ports.emplace(input_port.get_any_name(), input_port);
4848
}
49-
for (auto output_port : m_kvcache_request->get_compiled_model()->outputs()) {
49+
for (const auto& output_port : m_kvcache_request->get_compiled_model()->outputs()) {
5050
m_kvcache_out_ports.emplace(output_port.get_any_name(), output_port);
5151
}
5252
}

‎src/plugins/intel_npu/src/plugin/npuw/weights_bank.cpp

+8-3
Original file line numberDiff line numberDiff line change
@@ -132,10 +132,15 @@ ov::Tensor Bank::eval_and_alloc(const LazyTensor& tensor,
132132

133133
bool Bank::is_remote(const LazyTensor& tensor) const {
134134
// FIXME: make generic
135+
std::lock_guard<std::mutex> guard(m_mutex);
136+
135137
auto npu_bank = m_device_banks.find("NPU");
136-
if (npu_bank != m_device_banks.end() && npu_bank->second.storage.find(tensor) != npu_bank->second.storage.end()) {
137-
// Found in NPU bank so considered remote (utterly wrong for the generic case)
138-
return true;
138+
if (npu_bank != m_device_banks.end()) {
139+
std::lock_guard<std::mutex> dev_guard(npu_bank->second.mutex);
140+
if (npu_bank->second.storage.find(tensor) != npu_bank->second.storage.end()) {
141+
// Found in NPU bank so considered remote (utterly wrong for the generic case)
142+
return true;
143+
}
139144
}
140145
return false;
141146
}

‎src/plugins/intel_npu/src/plugin/npuw/weights_bank.hpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -38,13 +38,13 @@ class Bank {
3838
// Bank for specified device and their allocated memory
3939
struct DeviceBank {
4040
std::unordered_map<LazyTensor, ov::Tensor, LazyTensor::Hash> storage;
41-
std::mutex mutex;
41+
mutable std::mutex mutex;
4242
};
4343
std::unordered_map<std::string, DeviceBank> m_device_banks;
4444

4545
ov::Tensor eval_and_alloc(const LazyTensor& tensor, DeviceBank& dbank, const std::string& device);
4646

47-
std::mutex m_mutex;
47+
mutable std::mutex m_mutex;
4848
std::shared_ptr<const ov::ICore> m_core = nullptr;
4949
std::string m_alloc_device;
5050
};

0 commit comments

Comments
 (0)