Skip to content

Commit

Permalink
update.
Browse files Browse the repository at this point in the history
  • Loading branch information
yangwang201911 committed Dec 20, 2024
1 parent acdb52e commit 4e27715
Show file tree
Hide file tree
Showing 3 changed files with 161 additions and 48 deletions.
5 changes: 1 addition & 4 deletions src/plugins/auto/src/schedule.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -89,10 +89,7 @@ void Schedule::generate_workers(const std::string& device, const SoCompiledModel
(m_context->m_device_priorities.end() == it_numrequests || it_numrequests->num_requests_per_devices == -1)
? optimal_num
: it_numrequests->num_requests_per_devices;
num_requests =
(num_requests == 1 && m_context->m_performance_hint != ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)
? 2
: num_requests;
num_requests = (num_requests == 1) ? 2 : num_requests;
auto& worker_requests = m_worker_requests[device];
auto& idle_worker_requests = m_idle_worker_requests[device];
worker_requests.resize(num_requests);
Expand Down
28 changes: 22 additions & 6 deletions src/plugins/auto/tests/unit/dynamic_output_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@

#include "include/auto_unit_test.hpp"
#include "openvino/runtime/threading/immediate_executor.hpp"

using DynamicOutputConfigParams = std::tuple<ov::Any, // priority device list
ov::Any // expected device to run inference on
>;
Expand All @@ -21,14 +20,18 @@ class DynamicOutputInferenceTest : public tests::AutoTest, public ::testing::Tes
mockExecutor.reset();
mockExecutorActual.reset();
mockInferrequest.reset();
mockInferrequest_2.reset();
mockInferrequestActual.reset();
mockInferrequestActual_2.reset();
}

protected:
ov::Any priorityList;
ov::Any targetList;
std::shared_ptr<ov::mock_auto_plugin::MockAsyncInferRequest> mockInferrequest;
std::shared_ptr<ov::mock_auto_plugin::MockAsyncInferRequest> mockInferrequest_2;
std::shared_ptr<ov::mock_auto_plugin::MockAsyncInferRequest> mockInferrequestActual;
std::shared_ptr<ov::mock_auto_plugin::MockAsyncInferRequest> mockInferrequestActual_2;
std::shared_ptr<ov::threading::ImmediateExecutor> mockExecutor;
std::shared_ptr<ov::threading::ImmediateExecutor> mockExecutorActual;
};
Expand All @@ -53,10 +56,22 @@ void DynamicOutputInferenceTest::SetUp() {
mockExecutorActual = std::make_shared<ov::threading::ImmediateExecutor>();
mockInferrequest =
std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternal, mockExecutor, nullptr, false);
// will be at least 2 infer requests for mocked CPU/GPU
auto inferReqInternal_2 = std::make_shared<ov::mock_auto_plugin::MockISyncInferRequest>(mockIExeNet);
mockInferrequest_2 =
std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternal_2, mockExecutor, nullptr, false);

auto inferReqInternalActual_2 = std::make_shared<ov::mock_auto_plugin::MockISyncInferRequest>(mockIExeNetActual);

mockInferrequestActual = std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternalActual,
mockExecutorActual,
nullptr,
false);
mockInferrequestActual_2 = std::make_shared<ov::mock_auto_plugin::MockAsyncInferRequest>(inferReqInternalActual_2,
mockExecutorActual,
nullptr,
false);

std::tie(priorityList, targetList) = GetParam();
auto targets = targetList.as<std::vector<std::string>>();
ON_CALL(*core, get_available_devices()).WillByDefault(Return(targets));
Expand Down Expand Up @@ -103,11 +118,12 @@ TEST_P(DynamicOutputInferenceTest, CanInferWithOutputChangedFromDynamicOnAutoToS
auto tensor = inferReqInternal->get_tensor(it);
tensor->set_shape(ov::Shape{2, 3});
}
ON_CALL(*mockIExeNet.get(), create_infer_request()).WillByDefault(Return(mockInferrequest));
ON_CALL(*mockIExeNetActual.get(), create_infer_request()).WillByDefault(InvokeWithoutArgs([this]() {
std::this_thread::sleep_for(std::chrono::milliseconds(0));
return mockInferrequestActual;
}));
EXPECT_CALL(*mockIExeNet.get(), create_infer_request())
.WillOnce(Return(mockInferrequest))
.WillOnce(Return(mockInferrequest_2));
EXPECT_CALL(*mockIExeNetActual.get(), create_infer_request())
.WillOnce(Return(mockInferrequestActual))
.WillOnce(Return(mockInferrequestActual_2));
config.insert(ov::device::priorities(priorityList.as<std::string>()));
config.insert(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT));
std::shared_ptr<ov::ICompiledModel> exeNetwork;
Expand Down
Loading

0 comments on commit 4e27715

Please sign in to comment.