Skip to content

Commit 8a19942

Browse files
authored
Fix performance inefficiencies coverity scan issues in CPU plugin (openvinotoolkit#28291)
### Details: - Fix "COPY_INSTEAD_OF_MOVE" coverity scans: 1560671, 1560670, 1560668, 1560667, 1560666, 1560059, 1560057, 1560055, 1560054, 1560052, 1560051, 1560050, 1560047, 1560046, 1560045, 1560043, 1560042, 1560041, 1559840, 1559804, 1559802, 1559777, 1559761, 1559697, 1559559, 1559558, 1559557, 1559556, 1559553, 1559552, 1559549, 1559547, 1559545, 1559543, 1559542, 1559539 - Fix "Use of auto that causes a copy" coverity scans: 1560673, 1560669, 1560044, 1559820, 1559817, 1559561, 1559560, 1559555, 1559544, 1559540 ### Tickets: - [CVS-153062](https://jira.devtools.intel.com/browse/CVS-153062)
1 parent c0650a0 commit 8a19942

23 files changed

+40
-40
lines changed

src/plugins/intel_cpu/src/compiled_model.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
132132
ov::hint::SchedulingCoreType::ANY_CORE,
133133
false,
134134
true,
135-
sub_streams_table,
135+
std::move(sub_streams_table),
136136
sub_cfg.streamsRankTable[i]};
137137
m_sub_compiled_models.push_back(
138138
std::make_shared<CompiledModel>(model, plugin, sub_cfg, loaded_from_cache, m_sub_memory_manager));

src/plugins/intel_cpu/src/config.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -311,7 +311,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
311311
}
312312
} else if (key == ov::cache_encryption_callbacks.name()) {
313313
try {
314-
auto encryption_callbacks = val.as<EncryptionCallbacks>();
314+
const auto& encryption_callbacks = val.as<EncryptionCallbacks>();
315315
cacheEncrypt = encryption_callbacks.encrypt;
316316
cacheDecrypt = encryption_callbacks.decrypt;
317317
} catch (ov::Exception&) {

src/plugins/intel_cpu/src/cpu_memory.cpp

+7-7
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ Memory::Memory(const dnnl::engine& eng, const MemoryDesc& desc, const void* data
8484
Memory::Memory(const dnnl::engine& eng, MemoryDescPtr desc, MemoryBlockPtr block)
8585
: m_eng(eng),
8686
m_pMemDesc(desc),
87-
m_blockHandle(block, this),
87+
m_blockHandle(std::move(block), this),
8888
dnnlMemHandle(this) {
8989
if (desc->getPrecision() == element::string) {
9090
OPENVINO_THROW("[CPU] Memory object can't be created for string data.");
@@ -95,7 +95,7 @@ Memory::Memory(const dnnl::engine& eng, MemoryDescPtr desc, MemoryBlockPtr block
9595
}
9696

9797
Memory::Memory(const dnnl::engine& eng, const MemoryDesc& desc, MemoryBlockPtr block)
98-
: Memory::Memory(eng, desc.clone(), block) {}
98+
: Memory::Memory(eng, desc.clone(), std::move(block)) {}
9999

100100
size_t Memory::getSize() const {
101101
auto size = getDesc().getCurrentMemSize();
@@ -592,14 +592,14 @@ bool mbind_move(const dnnl::memory mem, int numaNodeID) {
592592
}
593593

594594
MemoryPtr split_horizontal(const dnnl::engine& eng,
595-
const MemoryPtr src,
595+
const MemoryPtr& src,
596596
int dim,
597597
int w_rank,
598598
int w_size,
599599
bool need_fill) {
600600
auto desc = src->getDescPtr();
601601
auto shape = src->getShape();
602-
auto dims = shape.getDims();
602+
const auto& dims = shape.getDims();
603603
auto prec = src->getPrecision();
604604
if (dim < 0) {
605605
dim += dims.size();
@@ -655,14 +655,14 @@ MemoryPtr split_horizontal(const dnnl::engine& eng,
655655
}
656656

657657
MemoryPtr split_vertical(const dnnl::engine& eng,
658-
const MemoryPtr src,
658+
const MemoryPtr& src,
659659
int dim,
660660
int w_rank,
661661
int w_size,
662662
bool need_fill) {
663663
auto desc = src->getDescPtr();
664-
auto shape = src->getShape();
665-
auto dims = shape.getDims();
664+
const auto& shape = src->getShape();
665+
const auto& dims = shape.getDims();
666666
auto prec = src->getPrecision();
667667
if (dim < 0) {
668668
dim += dims.size();

src/plugins/intel_cpu/src/cpu_memory.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ using MemoryBlockCPtr = std::shared_ptr<const IMemoryBlockObserver>;
122122

123123
class DnnlMemBlockHandle {
124124
public:
125-
DnnlMemBlockHandle(MemoryBlockPtr pBlock, Memory* pMem) : m_pMemBlock(pBlock), m_pMem(pMem) {
125+
DnnlMemBlockHandle(MemoryBlockPtr pBlock, Memory* pMem) : m_pMemBlock(std::move(pBlock)), m_pMem(pMem) {
126126
if (m_pMemBlock) {
127127
m_pMemBlock->registerMemory(m_pMem);
128128
}
@@ -447,13 +447,13 @@ bool mbind_move(const MemoryCPtr mem, int numaNodeID);
447447
bool mbind_move(const dnnl::memory mem, int numaNodeID);
448448

449449
MemoryPtr split_horizontal(const dnnl::engine& eng,
450-
const MemoryPtr src,
450+
const MemoryPtr& src,
451451
int dim,
452452
int w_rank,
453453
int w_size,
454454
bool need_fill = true);
455455
MemoryPtr split_vertical(const dnnl::engine& eng,
456-
const MemoryPtr src,
456+
const MemoryPtr& src,
457457
int dim,
458458
int w_rank,
459459
int w_size,

src/plugins/intel_cpu/src/edge.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ void Edge::allocate(MemoryBlockPtr memBlock) {
300300

301301
auto allocateFunc = [OV_CAPTURE_CPY_AND_THIS](const MemoryDesc& inputDesc) -> MemoryPtr {
302302
auto parentPtr = getParent();
303-
return std::make_shared<Memory>(parentPtr->getEngine(), inputDesc, memBlock);
303+
return std::make_shared<Memory>(parentPtr->getEngine(), inputDesc, std::move(memBlock));
304304
};
305305

306306
allocateCommon(allocateFunc);

src/plugins/intel_cpu/src/graph.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ void Graph::Replicate(const std::shared_ptr<const ov::Model>& model,
193193
}
194194

195195
// Add stub output node for unused data
196-
for (auto unusedOutput : unusedOutputs) {
196+
for (const auto& unusedOutput : unusedOutputs) {
197197
auto parentNode = op2node[unusedOutput.get_node_shared_ptr()];
198198
const auto port = unusedOutput.get_index();
199199
const auto nodeName =

src/plugins/intel_cpu/src/graph_context.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ GraphContext::GraphContext(const Config& config,
1919
weightsCache(std::move(w_cache)),
2020
isGraphQuantizedFlag(isGraphQuantized),
2121
streamExecutor(streamExecutor),
22-
subMemoryManager(sub_memory_manager),
22+
subMemoryManager(std::move(sub_memory_manager)),
2323
memoryStatesRegister(std::make_shared<node::MemoryStatesRegister>()),
2424
networkMemoryControl(std::make_shared<NetworkMemoryControl>()) {
2525
rtParamsCache = std::make_shared<MultiCache>(config.rtCacheCapacity);

src/plugins/intel_cpu/src/infer_request.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ void SyncInferRequest::change_default_ptr(Graph& graph) {
273273
: // then swap internal buffer to avoid data corruption
274274
controlBlock.currentMemBlock(); // else reuse the existing buffer
275275

276-
outputMemBlock->setMemBlockResize(memBlock);
276+
outputMemBlock->setMemBlockResize(std::move(memBlock));
277277
DEBUG_LOG("reset proxy ",
278278
outputMemBlock,
279279
", actual ",

src/plugins/intel_cpu/src/memory_control.cpp

+4-2
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,9 @@ namespace {
1616

1717
class StaticPartitionMemoryBlock : public IMemoryBlockObserver {
1818
public:
19-
StaticPartitionMemoryBlock(MemoryBlockPtr pBlock, ptrdiff_t offset) : m_pBlock(pBlock), m_offset(offset) {
19+
StaticPartitionMemoryBlock(MemoryBlockPtr pBlock, ptrdiff_t offset)
20+
: m_pBlock(std::move(pBlock)),
21+
m_offset(offset) {
2022
OPENVINO_ASSERT(m_pBlock, "Memory block is uninitialized");
2123
}
2224

@@ -410,7 +412,7 @@ edgeClusters MemoryControl::findEdgeClusters(const std::vector<EdgePtr>& graphEd
410412
}
411413

412414
MemoryControl& NetworkMemoryControl::createMemoryControlUnit(std::vector<size_t> syncInds) {
413-
m_controlUnits.emplace_back(std::unique_ptr<MemoryControl>(new MemoryControl(syncInds)));
415+
m_controlUnits.emplace_back(std::unique_ptr<MemoryControl>(new MemoryControl(std::move(syncInds))));
414416
return *(m_controlUnits.back());
415417
}
416418

src/plugins/intel_cpu/src/nodes/composite.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ void Composite::selectOptimalPrimitiveDescriptor() {
4646
for (size_t i = 0; i < getParentEdges().size(); i++) {
4747
auto desc = getParentOutputMemDesc(getParentEdgeAt(i));
4848
inConfs.emplace_back(desc);
49-
graphInputConfig.emplace_back(node::Input::InputConfig{desc, true});
49+
graphInputConfig.emplace_back(node::Input::InputConfig{std::move(desc), true});
5050
}
5151

5252
std::vector<Input::OutputConfig> graphOutputConfig;
@@ -65,7 +65,7 @@ void Composite::selectOptimalPrimitiveDescriptor() {
6565
outConfs.emplace_back(desc);
6666
}
6767

68-
const NodeConfig config(inConfs, outConfs);
68+
const NodeConfig config(std::move(inConfs), std::move(outConfs));
6969

7070
supportedPrimitiveDescriptors.clear();
7171
supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::undef);
@@ -99,7 +99,7 @@ void Composite::execute(dnnl::stream) {
9999
}
100100

101101
void Composite::executeDynamicImpl(dnnl::stream strm) {
102-
execute(strm);
102+
execute(std::move(strm));
103103

104104
// since the shape inference is not performed for the composite node
105105
// a memory of the extra child edges, attached to the output ports

src/plugins/intel_cpu/src/nodes/fullyconnected.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ void FullyConnected::needPrepareParamsForTensorParallel() {
220220
};
221221

222222
int dim = -1;
223-
auto dst_shape = dstMemoryBuffer->getShape();
223+
const auto& dst_shape = dstMemoryBuffer->getShape();
224224
auto dst_desc = dstMemoryBuffer->getDescPtr();
225225
auto dims = dst_shape.getDims();
226226
if (dim < 0) {
@@ -625,7 +625,7 @@ void FullyConnected::needUpdateTensorParalelConfig() {
625625
// 1. weight shape is dynamic
626626
// 2. last dim can be splited.
627627
if (tp_cfg.enable_tensor_parallel) {
628-
auto& shape = getSrcMemoryAtPort(WEIGHTS)->getShape();
628+
const auto& shape = getSrcMemoryAtPort(WEIGHTS)->getShape();
629629
if (shape.isDynamic()) {
630630
tp_cfg.enable_tensor_parallel = false;
631631
} else if (shape.getDims()[0] < static_cast<size_t>(tp_cfg.w_size)) {

src/plugins/intel_cpu/src/nodes/llm_mlp.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -224,9 +224,8 @@ class LinearGateUp {
224224
blkN++;
225225
}
226226
if (blkN) {
227-
auto shared_atomic = std::make_shared<std::atomic_int>(0);
228227
auto& work = works[ithr];
229-
work.sync_flag = shared_atomic;
228+
work.sync_flag = std::make_shared<std::atomic_int>(0);
230229
work.blk_K_size = cache_blk_k_size;
231230

232231
work.n0 = (start_blkN)*REG_BLK_N_SIZE;

src/plugins/intel_cpu/src/nodes/pad.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ void Pad::createPrimitive() {
192192
prepareParams();
193193
if (padValue) {
194194
// restore original memory object
195-
srcMemory[PAD_VALUE_ID] = padValue;
195+
srcMemory[PAD_VALUE_ID] = std::move(padValue);
196196
}
197197

198198
updateLastInputDims();

src/plugins/intel_cpu/src/nodes/reorder.cpp

+3-4
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ void Reorder::createReorderPrimitive(const DnnlMemoryDescPtr& srcDesc, const Dnn
296296
// useful in situations when rank in IR does not much rank that is required by the oneDNN primitive,
297297
// but the input tensor can be reshaped (e.g. weights for grouped convolutions, biases etc.)
298298
if (srcDesc->hasLayoutType(LayoutType::ncsp) && srcDesc->getShape().getRank() != dstDesc->getShape().getRank()) {
299-
const auto newDims = dstDesc->getShape().getStaticDims();
299+
const auto& newDims = dstDesc->getShape().getStaticDims();
300300
const auto newFormat = DnnlExtensionUtils::GetPlainFormatByRank(newDims.size());
301301

302302
src_desc = dnnl::memory::desc(DnnlExtensionUtils::convertToDnnlDims(newDims),
@@ -307,9 +307,8 @@ void Reorder::createReorderPrimitive(const DnnlMemoryDescPtr& srcDesc, const Dnn
307307
DEBUG_LOG("CreateReorderPrimitive is called for node", getName(), " src desc: ", src_desc, " dst_desc: ", dst_desc);
308308
CPU_NODE_ASSERT(src_desc.get_ndims() == dst_desc.get_ndims(),
309309
"OneDNN doesn't support reorder with different ranks.");
310-
auto result = getReorderPrim(context->getParamsCache(), getEngine(), src_desc, dst_desc);
311-
CPU_NODE_ASSERT(result, "could not create reorder primitive: unsupported reorder case.");
312-
prim = result;
310+
prim = getReorderPrim(context->getParamsCache(), getEngine(), src_desc, dst_desc);
311+
CPU_NODE_ASSERT(prim, "could not create reorder primitive: unsupported reorder case.");
313312

314313
selectedPD->setImplementationType(
315314
parse_impl_name(DnnlExtensionUtils::query_impl_info_str(prim.get_primitive_desc())));

src/plugins/intel_cpu/src/nodes/rms_norm.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ class RMSNorm : public Node {
2323
return false;
2424
}
2525
void executeDynamicImpl(dnnl::stream strm) override {
26-
execute(strm);
26+
execute(std::move(strm));
2727
}
2828
void initSupportedPrimitiveDescriptors() override;
2929
void execute(dnnl::stream strm) override;

src/plugins/intel_cpu/src/nodes/rnn.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1030,7 +1030,7 @@ void RNN::prepareMemory(const DnnlMemoryDescPtr& new_desc, size_t idx) {
10301030
res_ptr = create();
10311031
}
10321032

1033-
internalBlobMemory[idx] = res_ptr;
1033+
internalBlobMemory[idx] = std::move(res_ptr);
10341034
}
10351035

10361036
void RNN::copyWeightsData() {

src/plugins/intel_cpu/src/nodes/string_tensor_pack.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ bool StringTensorPack::needPrepareParams() const {
5555
}
5656

5757
void StringTensorPack::executeDynamicImpl(dnnl::stream strm) {
58-
execute(strm);
58+
execute(std::move(strm));
5959
}
6060

6161
template <class T_idx>

src/plugins/intel_cpu/src/nodes/string_tensor_unpack.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ void StringTensorUnpack::executeDynamicImpl(dnnl::stream strm) {
6464
totalCharLength += srcData[i].length();
6565
}
6666
redefineOutputMemory({srcDataDims, srcDataDims, {totalCharLength}});
67-
execute(strm);
67+
execute(std::move(strm));
6868
}
6969

7070
void StringTensorUnpack::execute(dnnl::stream strm) {

src/plugins/intel_cpu/src/partitioned_mem_blk.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ class PartitionedMemoryBlock : public IMemoryBlockObserver {
1919
size_t total_chunks = 1,
2020
ptrdiff_t offset_chunks = 0,
2121
size_t size_chunks = 1)
22-
: m_pBlock(pBlock),
22+
: m_pBlock(std::move(pBlock)),
2323
m_total_chunks(total_chunks),
2424
m_offset_chunks(offset_chunks),
2525
m_size_chunks(size_chunks) {

src/plugins/intel_cpu/src/plugin.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -544,7 +544,7 @@ std::shared_ptr<ov::ICompiledModel> Plugin::import_model(std::istream& model_str
544544
CacheDecrypt decrypt{codec_xor};
545545
bool decript_from_string = false;
546546
if (config.count(ov::cache_encryption_callbacks.name())) {
547-
auto encryption_callbacks = config.at(ov::cache_encryption_callbacks.name()).as<EncryptionCallbacks>();
547+
const auto& encryption_callbacks = config.at(ov::cache_encryption_callbacks.name()).as<EncryptionCallbacks>();
548548
decrypt.m_decrypt_str = encryption_callbacks.decrypt;
549549
decript_from_string = true;
550550
}

src/plugins/intel_cpu/src/proxy_mem_blk.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ void ProxyMemoryBlock::setMemBlock(std::shared_ptr<IMemoryBlock> pBlock) {
1414
return;
1515
}
1616

17-
m_pMemBlock = pBlock;
17+
m_pMemBlock = std::move(pBlock);
1818
notifyUpdate();
1919
}
2020

@@ -24,7 +24,7 @@ void ProxyMemoryBlock::setMemBlockResize(std::shared_ptr<IMemoryBlock> pBlock) {
2424
return;
2525
}
2626

27-
m_pMemBlock = pBlock;
27+
m_pMemBlock = std::move(pBlock);
2828
m_pMemBlock->resize(m_size);
2929
notifyUpdate();
3030
}

src/plugins/intel_cpu/src/proxy_mem_blk.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ class ProxyMemoryBlock : public IMemoryBlockObserver {
1717
ProxyMemoryBlock() : m_pOrigBlock(std::make_shared<MemoryBlockWithReuse>()), m_pMemBlock(m_pOrigBlock) {}
1818
explicit ProxyMemoryBlock(std::shared_ptr<IMemoryBlock> pBlock) {
1919
OPENVINO_ASSERT(pBlock, "Memory block is uninitialized");
20-
m_pMemBlock = pBlock;
20+
m_pMemBlock = std::move(pBlock);
2121
}
2222

2323
void* getRawPtr() const noexcept override;

src/plugins/intel_cpu/src/shape_inference/custom/rms_norm.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ namespace node {
1313

1414
class RMSNormShapeInferFactory : public ShapeInferFactory {
1515
public:
16-
RMSNormShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(op) {}
16+
RMSNormShapeInferFactory(std::shared_ptr<ov::Node> op) : m_op(std::move(op)) {}
1717
ShapeInferPtr makeShapeInfer() const override;
1818

1919
private:

0 commit comments

Comments
 (0)