Skip to content

Commit

Permalink
Fix for coverity issues of medium and low impact (openvinotoolkit#27916)
Browse files Browse the repository at this point in the history
### Details:
- *Fixed "Data race condition" (Medium), "COPY_INSTEAD_OF_MOVE" and "Use
of auto that causes a copy" (Low) issues*
 - *...*

### Tickets:
 - *EISW-149544*
  • Loading branch information
AsyaPronina authored Dec 5, 2024
1 parent 64b1f2a commit 07ecdf0
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 13 deletions.
8 changes: 4 additions & 4 deletions src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@ std::shared_ptr<ov::Model> redirect_new_kv_to_output(const std::shared_ptr<ov::M
std::shared_ptr<ov::Model> cvt_kvcache_to_fp16(const std::shared_ptr<ov::Model>& model) {
ov::preprocess::PrePostProcessor ppp(model);

for (auto tensor : model->inputs()) {
for (const auto& tensor : model->inputs()) {
if (tensor.get_any_name().find("past_key") != std::string::npos) {
ppp.input(tensor.get_any_name()).tensor().set_element_type(ov::element::Type_t::f16);
}
}

for (auto tensor : model->outputs()) {
for (const auto& tensor : model->outputs()) {
if (tensor.get_any_name().find("present") != std::string::npos) {
ppp.output(tensor.get_any_name()).tensor().set_element_type(ov::element::Type_t::f16);
}
Expand All @@ -55,7 +55,7 @@ void reshape_to_static(std::shared_ptr<ov::Model> model,
const uint32_t kvcache_size,
const KVAxesPosition& kv_axes_position) {
std::map<std::string, ov::PartialShape> new_shapes;
for (auto input : model->inputs()) {
for (const auto& input : model->inputs()) {
const auto& input_name = input.get_any_name();
ov::PartialShape new_shape;
if (input_name.find("input_ids") != std::string::npos) {
Expand Down Expand Up @@ -275,7 +275,7 @@ ov::npuw::LLMCompiledModel::LLMCompiledModel(const std::shared_ptr<ov::Model>& m

auto npudesc = extract_npu_descriptor(plugin);

ov::AnyMap properties_copy = other_props;
ov::AnyMap properties_copy = std::move(other_props);
auto prefill_config = get_default_prefill_config(model, npudesc);
// NB: GENERATE_HINT is only applicable for default generate config!
const ::intel_npu::npuw::llm::GenerateHint generate_hint = m_cfg.get<::intel_npu::NPUW_LLM_GENERATE_HINT>();
Expand Down
8 changes: 4 additions & 4 deletions src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,17 +36,17 @@ ov::npuw::LLMInferRequest::LLMInferRequest(const std::shared_ptr<ov::npuw::LLMCo
m_kvcache_request = compiled_model->m_kvcache_compiled->create_infer_request();
m_prefill_request = compiled_model->m_prefill_compiled->create_infer_request();

for (auto input_port : m_prefill_request->get_compiled_model()->inputs()) {
for (const auto& input_port : m_prefill_request->get_compiled_model()->inputs()) {
m_prefill_in_ports.emplace(input_port.get_any_name(), input_port);
}
for (auto output_port : m_prefill_request->get_compiled_model()->outputs()) {
for (const auto& output_port : m_prefill_request->get_compiled_model()->outputs()) {
m_prefill_out_ports.emplace(output_port.get_any_name(), output_port);
}

for (auto input_port : m_kvcache_request->get_compiled_model()->inputs()) {
for (const auto& input_port : m_kvcache_request->get_compiled_model()->inputs()) {
m_kvcache_in_ports.emplace(input_port.get_any_name(), input_port);
}
for (auto output_port : m_kvcache_request->get_compiled_model()->outputs()) {
for (const auto& output_port : m_kvcache_request->get_compiled_model()->outputs()) {
m_kvcache_out_ports.emplace(output_port.get_any_name(), output_port);
}
}
Expand Down
11 changes: 8 additions & 3 deletions src/plugins/intel_npu/src/plugin/npuw/weights_bank.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,15 @@ ov::Tensor Bank::eval_and_alloc(const LazyTensor& tensor,

bool Bank::is_remote(const LazyTensor& tensor) const {
// FIXME: make generic
std::lock_guard<std::mutex> guard(m_mutex);

auto npu_bank = m_device_banks.find("NPU");
if (npu_bank != m_device_banks.end() && npu_bank->second.storage.find(tensor) != npu_bank->second.storage.end()) {
// Found in NPU bank so considered remote (utterly wrong for the generic case)
return true;
if (npu_bank != m_device_banks.end()) {
std::lock_guard<std::mutex> dev_guard(npu_bank->second.mutex);
if (npu_bank->second.storage.find(tensor) != npu_bank->second.storage.end()) {
// Found in NPU bank so considered remote (utterly wrong for the generic case)
return true;
}
}
return false;
}
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_npu/src/plugin/npuw/weights_bank.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,13 @@ class Bank {
// Bank for specified device and their allocated memory
struct DeviceBank {
std::unordered_map<LazyTensor, ov::Tensor, LazyTensor::Hash> storage;
std::mutex mutex;
mutable std::mutex mutex;
};
std::unordered_map<std::string, DeviceBank> m_device_banks;

ov::Tensor eval_and_alloc(const LazyTensor& tensor, DeviceBank& dbank, const std::string& device);

std::mutex m_mutex;
mutable std::mutex m_mutex;
std::shared_ptr<const ov::ICore> m_core = nullptr;
std::string m_alloc_device;
};
Expand Down

0 comments on commit 07ecdf0

Please sign in to comment.