diff --git a/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp b/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp index 3517dbdd0e9c71..e102cb3f8e560e 100644 --- a/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp +++ b/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp @@ -48,4 +48,8 @@ class ZeroRemoteTensor final : public RemoteTensor { bool _external_memory_support = false; }; +inline bool is_remote_tensor(const std::shared_ptr& tensor) { + return std::dynamic_pointer_cast(tensor) != nullptr; +} + } // namespace intel_npu diff --git a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp index 2e11b80807c543..15569186a7f545 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp @@ -467,8 +467,7 @@ void ZeroInferRequest::infer_async() { auto zeroTensor = std::dynamic_pointer_cast(levelZeroTensor.at(SINGLE_TENSOR)); if (is_batched_input(ioIndex) || inputDescriptor.isShapeTensor || inputDescriptor.isStateInput || - std::dynamic_pointer_cast(levelZeroTensor.at(SINGLE_TENSOR)) != nullptr || - zeroTensor == nullptr) { + is_remote_tensor(levelZeroTensor.at(SINGLE_TENSOR)) || zeroTensor == nullptr) { ++ioIndex; continue; } @@ -494,8 +493,7 @@ void ZeroInferRequest::infer_async() { auto zeroTensor = std::dynamic_pointer_cast(levelZeroTensor); if (outputDescriptor.isShapeTensor || outputDescriptor.isStateOutput || - std::dynamic_pointer_cast(levelZeroTensor) != nullptr || - zeroTensor == nullptr) { + is_remote_tensor(levelZeroTensor) || zeroTensor == nullptr) { ++ioIndex; continue; } @@ -535,9 +533,7 @@ void ZeroInferRequest::infer_async() { if (is_batched_input(inputIndex)) { if (_graph->get_batch_size().has_value()) { for (size_t i = 0; i < userTensor.size(); i++) { - auto levelZeroBatchRemoteTensor = - std::dynamic_pointer_cast(get_level_zero_input(inputIndex, i)); - if (levelZeroBatchRemoteTensor == nullptr) { + if (!is_remote_tensor(get_level_zero_input(inputIndex, i))) { void* levelZeroBuffer = get_level_zero_input(inputIndex, i)->data(); auto userBatchRemoteTensor = std::dynamic_pointer_cast(userTensor.at(i)._ptr); @@ -587,8 +583,7 @@ void ZeroInferRequest::infer_async() { : extract_object(userRemoteTensor->get_properties(), ov::intel_npu::mem_handle); const auto& levelZeroTensor = get_level_zero_input(inputIndex); - auto levelZeroRemoteTensor = std::dynamic_pointer_cast(levelZeroTensor); - if (levelZeroRemoteTensor == nullptr) { + if (!is_remote_tensor(levelZeroTensor)) { void* levelZeroBuffer = levelZeroTensor->data(); if (userBuffer != levelZeroBuffer) { @@ -639,8 +634,7 @@ void ZeroInferRequest::get_result() { : extract_object(userRemoteTensor->get_properties(), ov::intel_npu::mem_handle); const std::shared_ptr& levelZeroTensor = _levelZeroOutputTensors.at(outputIndex); - auto levelZeroRemoteTensor = std::dynamic_pointer_cast(levelZeroTensor); - if (levelZeroRemoteTensor == nullptr) { + if (!is_remote_tensor(levelZeroTensor)) { void* levelZeroBuffer = levelZeroTensor->data(); if (userBuffer != levelZeroBuffer) {