diff --git a/src/plugins/intel_cpu/src/compiled_model.cpp b/src/plugins/intel_cpu/src/compiled_model.cpp index fe347f3b4e252a..14a9b6e41c516f 100644 --- a/src/plugins/intel_cpu/src/compiled_model.cpp +++ b/src/plugins/intel_cpu/src/compiled_model.cpp @@ -132,7 +132,7 @@ CompiledModel::CompiledModel(const std::shared_ptr& model, ov::hint::SchedulingCoreType::ANY_CORE, false, true, - sub_streams_table, + std::move(sub_streams_table), sub_cfg.streamsRankTable[i]}; m_sub_compiled_models.push_back( std::make_shared(model, plugin, sub_cfg, loaded_from_cache, m_sub_memory_manager)); diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index 594523e697390b..9937c3fe1e82fa 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -311,7 +311,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { } } else if (key == ov::cache_encryption_callbacks.name()) { try { - auto encryption_callbacks = val.as(); + const auto& encryption_callbacks = val.as(); cacheEncrypt = encryption_callbacks.encrypt; cacheDecrypt = encryption_callbacks.decrypt; } catch (ov::Exception&) { diff --git a/src/plugins/intel_cpu/src/cpu_memory.cpp b/src/plugins/intel_cpu/src/cpu_memory.cpp index 71851c529c6095..38ee24da7cba2f 100644 --- a/src/plugins/intel_cpu/src/cpu_memory.cpp +++ b/src/plugins/intel_cpu/src/cpu_memory.cpp @@ -84,7 +84,7 @@ Memory::Memory(const dnnl::engine& eng, const MemoryDesc& desc, const void* data Memory::Memory(const dnnl::engine& eng, MemoryDescPtr desc, MemoryBlockPtr block) : m_eng(eng), m_pMemDesc(desc), - m_blockHandle(block, this), + m_blockHandle(std::move(block), this), dnnlMemHandle(this) { if (desc->getPrecision() == element::string) { OPENVINO_THROW("[CPU] Memory object can't be created for string data."); @@ -95,7 +95,7 @@ Memory::Memory(const dnnl::engine& eng, MemoryDescPtr desc, MemoryBlockPtr block } Memory::Memory(const dnnl::engine& eng, const MemoryDesc& desc, MemoryBlockPtr block) - : Memory::Memory(eng, desc.clone(), block) {} + : Memory::Memory(eng, desc.clone(), std::move(block)) {} size_t Memory::getSize() const { auto size = getDesc().getCurrentMemSize(); @@ -592,14 +592,14 @@ bool mbind_move(const dnnl::memory mem, int numaNodeID) { } MemoryPtr split_horizontal(const dnnl::engine& eng, - const MemoryPtr src, + const MemoryPtr& src, int dim, int w_rank, int w_size, bool need_fill) { auto desc = src->getDescPtr(); auto shape = src->getShape(); - auto dims = shape.getDims(); + const auto& dims = shape.getDims(); auto prec = src->getPrecision(); if (dim < 0) { dim += dims.size(); @@ -655,14 +655,14 @@ MemoryPtr split_horizontal(const dnnl::engine& eng, } MemoryPtr split_vertical(const dnnl::engine& eng, - const MemoryPtr src, + const MemoryPtr& src, int dim, int w_rank, int w_size, bool need_fill) { auto desc = src->getDescPtr(); - auto shape = src->getShape(); - auto dims = shape.getDims(); + const auto& shape = src->getShape(); + const auto& dims = shape.getDims(); auto prec = src->getPrecision(); if (dim < 0) { dim += dims.size(); diff --git a/src/plugins/intel_cpu/src/cpu_memory.h b/src/plugins/intel_cpu/src/cpu_memory.h index f6837064babfa6..8c02578234d7b0 100644 --- a/src/plugins/intel_cpu/src/cpu_memory.h +++ b/src/plugins/intel_cpu/src/cpu_memory.h @@ -122,7 +122,7 @@ using MemoryBlockCPtr = std::shared_ptr; class DnnlMemBlockHandle { public: - DnnlMemBlockHandle(MemoryBlockPtr pBlock, Memory* pMem) : m_pMemBlock(pBlock), m_pMem(pMem) { + DnnlMemBlockHandle(MemoryBlockPtr pBlock, Memory* pMem) : m_pMemBlock(std::move(pBlock)), m_pMem(pMem) { if (m_pMemBlock) { m_pMemBlock->registerMemory(m_pMem); } @@ -447,13 +447,13 @@ bool mbind_move(const MemoryCPtr mem, int numaNodeID); bool mbind_move(const dnnl::memory mem, int numaNodeID); MemoryPtr split_horizontal(const dnnl::engine& eng, - const MemoryPtr src, + const MemoryPtr& src, int dim, int w_rank, int w_size, bool need_fill = true); MemoryPtr split_vertical(const dnnl::engine& eng, - const MemoryPtr src, + const MemoryPtr& src, int dim, int w_rank, int w_size, diff --git a/src/plugins/intel_cpu/src/edge.cpp b/src/plugins/intel_cpu/src/edge.cpp index 1eabc6275bf4b0..1185934265c883 100644 --- a/src/plugins/intel_cpu/src/edge.cpp +++ b/src/plugins/intel_cpu/src/edge.cpp @@ -300,7 +300,7 @@ void Edge::allocate(MemoryBlockPtr memBlock) { auto allocateFunc = [OV_CAPTURE_CPY_AND_THIS](const MemoryDesc& inputDesc) -> MemoryPtr { auto parentPtr = getParent(); - return std::make_shared(parentPtr->getEngine(), inputDesc, memBlock); + return std::make_shared(parentPtr->getEngine(), inputDesc, std::move(memBlock)); }; allocateCommon(allocateFunc); diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index aab78a4d5f15bd..c4fd784baa8800 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -193,7 +193,7 @@ void Graph::Replicate(const std::shared_ptr& model, } // Add stub output node for unused data - for (auto unusedOutput : unusedOutputs) { + for (const auto& unusedOutput : unusedOutputs) { auto parentNode = op2node[unusedOutput.get_node_shared_ptr()]; const auto port = unusedOutput.get_index(); const auto nodeName = diff --git a/src/plugins/intel_cpu/src/graph_context.cpp b/src/plugins/intel_cpu/src/graph_context.cpp index 462cdab2a9b5c0..cb675e150490a2 100644 --- a/src/plugins/intel_cpu/src/graph_context.cpp +++ b/src/plugins/intel_cpu/src/graph_context.cpp @@ -19,7 +19,7 @@ GraphContext::GraphContext(const Config& config, weightsCache(std::move(w_cache)), isGraphQuantizedFlag(isGraphQuantized), streamExecutor(streamExecutor), - subMemoryManager(sub_memory_manager), + subMemoryManager(std::move(sub_memory_manager)), memoryStatesRegister(std::make_shared()), networkMemoryControl(std::make_shared()) { rtParamsCache = std::make_shared(config.rtCacheCapacity); diff --git a/src/plugins/intel_cpu/src/infer_request.cpp b/src/plugins/intel_cpu/src/infer_request.cpp index 44b9904bde202a..363c774a0bfba8 100644 --- a/src/plugins/intel_cpu/src/infer_request.cpp +++ b/src/plugins/intel_cpu/src/infer_request.cpp @@ -273,7 +273,7 @@ void SyncInferRequest::change_default_ptr(Graph& graph) { : // then swap internal buffer to avoid data corruption controlBlock.currentMemBlock(); // else reuse the existing buffer - outputMemBlock->setMemBlockResize(memBlock); + outputMemBlock->setMemBlockResize(std::move(memBlock)); DEBUG_LOG("reset proxy ", outputMemBlock, ", actual ", diff --git a/src/plugins/intel_cpu/src/memory_control.cpp b/src/plugins/intel_cpu/src/memory_control.cpp index 26cd8459458b9d..bdcbc3e0dcb02e 100644 --- a/src/plugins/intel_cpu/src/memory_control.cpp +++ b/src/plugins/intel_cpu/src/memory_control.cpp @@ -16,7 +16,9 @@ namespace { class StaticPartitionMemoryBlock : public IMemoryBlockObserver { public: - StaticPartitionMemoryBlock(MemoryBlockPtr pBlock, ptrdiff_t offset) : m_pBlock(pBlock), m_offset(offset) { + StaticPartitionMemoryBlock(MemoryBlockPtr pBlock, ptrdiff_t offset) + : m_pBlock(std::move(pBlock)), + m_offset(offset) { OPENVINO_ASSERT(m_pBlock, "Memory block is uninitialized"); } @@ -410,7 +412,7 @@ edgeClusters MemoryControl::findEdgeClusters(const std::vector& graphEd } MemoryControl& NetworkMemoryControl::createMemoryControlUnit(std::vector syncInds) { - m_controlUnits.emplace_back(std::unique_ptr(new MemoryControl(syncInds))); + m_controlUnits.emplace_back(std::unique_ptr(new MemoryControl(std::move(syncInds)))); return *(m_controlUnits.back()); } diff --git a/src/plugins/intel_cpu/src/nodes/composite.cpp b/src/plugins/intel_cpu/src/nodes/composite.cpp index 0d8b33d90fbd9c..4c86eb43eb3fdc 100644 --- a/src/plugins/intel_cpu/src/nodes/composite.cpp +++ b/src/plugins/intel_cpu/src/nodes/composite.cpp @@ -46,7 +46,7 @@ void Composite::selectOptimalPrimitiveDescriptor() { for (size_t i = 0; i < getParentEdges().size(); i++) { auto desc = getParentOutputMemDesc(getParentEdgeAt(i)); inConfs.emplace_back(desc); - graphInputConfig.emplace_back(node::Input::InputConfig{desc, true}); + graphInputConfig.emplace_back(node::Input::InputConfig{std::move(desc), true}); } std::vector graphOutputConfig; @@ -65,7 +65,7 @@ void Composite::selectOptimalPrimitiveDescriptor() { outConfs.emplace_back(desc); } - const NodeConfig config(inConfs, outConfs); + const NodeConfig config(std::move(inConfs), std::move(outConfs)); supportedPrimitiveDescriptors.clear(); supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::undef); @@ -99,7 +99,7 @@ void Composite::execute(dnnl::stream) { } void Composite::executeDynamicImpl(dnnl::stream strm) { - execute(strm); + execute(std::move(strm)); // since the shape inference is not performed for the composite node // a memory of the extra child edges, attached to the output ports diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index 4a2e3728887087..6ffa2fa1fd273b 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -220,7 +220,7 @@ void FullyConnected::needPrepareParamsForTensorParallel() { }; int dim = -1; - auto dst_shape = dstMemoryBuffer->getShape(); + const auto& dst_shape = dstMemoryBuffer->getShape(); auto dst_desc = dstMemoryBuffer->getDescPtr(); auto dims = dst_shape.getDims(); if (dim < 0) { @@ -625,7 +625,7 @@ void FullyConnected::needUpdateTensorParalelConfig() { // 1. weight shape is dynamic // 2. last dim can be splited. if (tp_cfg.enable_tensor_parallel) { - auto& shape = getSrcMemoryAtPort(WEIGHTS)->getShape(); + const auto& shape = getSrcMemoryAtPort(WEIGHTS)->getShape(); if (shape.isDynamic()) { tp_cfg.enable_tensor_parallel = false; } else if (shape.getDims()[0] < static_cast(tp_cfg.w_size)) { diff --git a/src/plugins/intel_cpu/src/nodes/llm_mlp.cpp b/src/plugins/intel_cpu/src/nodes/llm_mlp.cpp index 91a72fdbeb4cab..7b3476e607aa62 100644 --- a/src/plugins/intel_cpu/src/nodes/llm_mlp.cpp +++ b/src/plugins/intel_cpu/src/nodes/llm_mlp.cpp @@ -224,9 +224,8 @@ class LinearGateUp { blkN++; } if (blkN) { - auto shared_atomic = std::make_shared(0); auto& work = works[ithr]; - work.sync_flag = shared_atomic; + work.sync_flag = std::make_shared(0); work.blk_K_size = cache_blk_k_size; work.n0 = (start_blkN)*REG_BLK_N_SIZE; diff --git a/src/plugins/intel_cpu/src/nodes/pad.cpp b/src/plugins/intel_cpu/src/nodes/pad.cpp index eb1db0c6034a5e..0b0060230afc0d 100644 --- a/src/plugins/intel_cpu/src/nodes/pad.cpp +++ b/src/plugins/intel_cpu/src/nodes/pad.cpp @@ -192,7 +192,7 @@ void Pad::createPrimitive() { prepareParams(); if (padValue) { // restore original memory object - srcMemory[PAD_VALUE_ID] = padValue; + srcMemory[PAD_VALUE_ID] = std::move(padValue); } updateLastInputDims(); diff --git a/src/plugins/intel_cpu/src/nodes/reorder.cpp b/src/plugins/intel_cpu/src/nodes/reorder.cpp index 67ef17e3aa9d87..ca72fbcc29cf03 100644 --- a/src/plugins/intel_cpu/src/nodes/reorder.cpp +++ b/src/plugins/intel_cpu/src/nodes/reorder.cpp @@ -296,7 +296,7 @@ void Reorder::createReorderPrimitive(const DnnlMemoryDescPtr& srcDesc, const Dnn // useful in situations when rank in IR does not much rank that is required by the oneDNN primitive, // but the input tensor can be reshaped (e.g. weights for grouped convolutions, biases etc.) if (srcDesc->hasLayoutType(LayoutType::ncsp) && srcDesc->getShape().getRank() != dstDesc->getShape().getRank()) { - const auto newDims = dstDesc->getShape().getStaticDims(); + const auto& newDims = dstDesc->getShape().getStaticDims(); const auto newFormat = DnnlExtensionUtils::GetPlainFormatByRank(newDims.size()); src_desc = dnnl::memory::desc(DnnlExtensionUtils::convertToDnnlDims(newDims), @@ -307,9 +307,8 @@ void Reorder::createReorderPrimitive(const DnnlMemoryDescPtr& srcDesc, const Dnn DEBUG_LOG("CreateReorderPrimitive is called for node", getName(), " src desc: ", src_desc, " dst_desc: ", dst_desc); CPU_NODE_ASSERT(src_desc.get_ndims() == dst_desc.get_ndims(), "OneDNN doesn't support reorder with different ranks."); - auto result = getReorderPrim(context->getParamsCache(), getEngine(), src_desc, dst_desc); - CPU_NODE_ASSERT(result, "could not create reorder primitive: unsupported reorder case."); - prim = result; + prim = getReorderPrim(context->getParamsCache(), getEngine(), src_desc, dst_desc); + CPU_NODE_ASSERT(prim, "could not create reorder primitive: unsupported reorder case."); selectedPD->setImplementationType( parse_impl_name(DnnlExtensionUtils::query_impl_info_str(prim.get_primitive_desc()))); diff --git a/src/plugins/intel_cpu/src/nodes/rms_norm.h b/src/plugins/intel_cpu/src/nodes/rms_norm.h index 0adc320a3c07fb..cb3b4ffc1a198f 100644 --- a/src/plugins/intel_cpu/src/nodes/rms_norm.h +++ b/src/plugins/intel_cpu/src/nodes/rms_norm.h @@ -23,7 +23,7 @@ class RMSNorm : public Node { return false; } void executeDynamicImpl(dnnl::stream strm) override { - execute(strm); + execute(std::move(strm)); } void initSupportedPrimitiveDescriptors() override; void execute(dnnl::stream strm) override; diff --git a/src/plugins/intel_cpu/src/nodes/rnn.cpp b/src/plugins/intel_cpu/src/nodes/rnn.cpp index a1f6d60e289491..e6138c1bc7f5d5 100644 --- a/src/plugins/intel_cpu/src/nodes/rnn.cpp +++ b/src/plugins/intel_cpu/src/nodes/rnn.cpp @@ -1030,7 +1030,7 @@ void RNN::prepareMemory(const DnnlMemoryDescPtr& new_desc, size_t idx) { res_ptr = create(); } - internalBlobMemory[idx] = res_ptr; + internalBlobMemory[idx] = std::move(res_ptr); } void RNN::copyWeightsData() { diff --git a/src/plugins/intel_cpu/src/nodes/string_tensor_pack.cpp b/src/plugins/intel_cpu/src/nodes/string_tensor_pack.cpp index ff52cc32fe1cb1..06055f551c3f65 100644 --- a/src/plugins/intel_cpu/src/nodes/string_tensor_pack.cpp +++ b/src/plugins/intel_cpu/src/nodes/string_tensor_pack.cpp @@ -55,7 +55,7 @@ bool StringTensorPack::needPrepareParams() const { } void StringTensorPack::executeDynamicImpl(dnnl::stream strm) { - execute(strm); + execute(std::move(strm)); } template diff --git a/src/plugins/intel_cpu/src/nodes/string_tensor_unpack.cpp b/src/plugins/intel_cpu/src/nodes/string_tensor_unpack.cpp index 4d6b2c0135b912..49f9cbf7846b9e 100644 --- a/src/plugins/intel_cpu/src/nodes/string_tensor_unpack.cpp +++ b/src/plugins/intel_cpu/src/nodes/string_tensor_unpack.cpp @@ -64,7 +64,7 @@ void StringTensorUnpack::executeDynamicImpl(dnnl::stream strm) { totalCharLength += srcData[i].length(); } redefineOutputMemory({srcDataDims, srcDataDims, {totalCharLength}}); - execute(strm); + execute(std::move(strm)); } void StringTensorUnpack::execute(dnnl::stream strm) { diff --git a/src/plugins/intel_cpu/src/partitioned_mem_blk.h b/src/plugins/intel_cpu/src/partitioned_mem_blk.h index b205e8ff45e99e..5290bd85263498 100644 --- a/src/plugins/intel_cpu/src/partitioned_mem_blk.h +++ b/src/plugins/intel_cpu/src/partitioned_mem_blk.h @@ -19,7 +19,7 @@ class PartitionedMemoryBlock : public IMemoryBlockObserver { size_t total_chunks = 1, ptrdiff_t offset_chunks = 0, size_t size_chunks = 1) - : m_pBlock(pBlock), + : m_pBlock(std::move(pBlock)), m_total_chunks(total_chunks), m_offset_chunks(offset_chunks), m_size_chunks(size_chunks) { diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index dff002f275d68e..c0d8d655a753c8 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -544,7 +544,7 @@ std::shared_ptr Plugin::import_model(std::istream& model_str CacheDecrypt decrypt{codec_xor}; bool decript_from_string = false; if (config.count(ov::cache_encryption_callbacks.name())) { - auto encryption_callbacks = config.at(ov::cache_encryption_callbacks.name()).as(); + const auto& encryption_callbacks = config.at(ov::cache_encryption_callbacks.name()).as(); decrypt.m_decrypt_str = encryption_callbacks.decrypt; decript_from_string = true; } diff --git a/src/plugins/intel_cpu/src/proxy_mem_blk.cpp b/src/plugins/intel_cpu/src/proxy_mem_blk.cpp index af639a61013d14..35a0126eba7ed6 100644 --- a/src/plugins/intel_cpu/src/proxy_mem_blk.cpp +++ b/src/plugins/intel_cpu/src/proxy_mem_blk.cpp @@ -14,7 +14,7 @@ void ProxyMemoryBlock::setMemBlock(std::shared_ptr pBlock) { return; } - m_pMemBlock = pBlock; + m_pMemBlock = std::move(pBlock); notifyUpdate(); } @@ -24,7 +24,7 @@ void ProxyMemoryBlock::setMemBlockResize(std::shared_ptr pBlock) { return; } - m_pMemBlock = pBlock; + m_pMemBlock = std::move(pBlock); m_pMemBlock->resize(m_size); notifyUpdate(); } diff --git a/src/plugins/intel_cpu/src/proxy_mem_blk.h b/src/plugins/intel_cpu/src/proxy_mem_blk.h index a6fce67eacf880..5591bfbc2764dc 100644 --- a/src/plugins/intel_cpu/src/proxy_mem_blk.h +++ b/src/plugins/intel_cpu/src/proxy_mem_blk.h @@ -17,7 +17,7 @@ class ProxyMemoryBlock : public IMemoryBlockObserver { ProxyMemoryBlock() : m_pOrigBlock(std::make_shared()), m_pMemBlock(m_pOrigBlock) {} explicit ProxyMemoryBlock(std::shared_ptr pBlock) { OPENVINO_ASSERT(pBlock, "Memory block is uninitialized"); - m_pMemBlock = pBlock; + m_pMemBlock = std::move(pBlock); } void* getRawPtr() const noexcept override; diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/rms_norm.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/rms_norm.hpp index b1eacae9ce4778..82d831c65ff648 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/rms_norm.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/rms_norm.hpp @@ -13,7 +13,7 @@ namespace node { class RMSNormShapeInferFactory : public ShapeInferFactory { public: - RMSNormShapeInferFactory(std::shared_ptr op) : m_op(op) {} + RMSNormShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: