Skip to content

Commit

Permalink
[OV] Updated performance properties (#550)
Browse files Browse the repository at this point in the history
### Details:
- Properties `CPU_THREADS_NUM` and `CPU_THROUGHPUT_STREAMS` are
deprecated. Now we have to use `props.inference_num_threads` and
`props.num_streams`. The
[link](https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes/cpu-device/performance-hint-and-thread-scheduling.html)
to the documentation.
- Added smoke tests to validate the case when the user manually sets
thread count and stream count
  • Loading branch information
a-sidorova authored Oct 7, 2024
1 parent cf6adb8 commit b0d60e6
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 5 deletions.
14 changes: 9 additions & 5 deletions src/inference/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

try:
from openvino.runtime import Core, Tensor, PartialShape
import openvino.properties as props
import openvino.properties.hint as hints
_ov_runtime_supported = True
except ImportError:
_ov_runtime_supported = False
Expand Down Expand Up @@ -84,21 +86,23 @@ def set_property(core, devices, nthreads, nstreams, dump, mode):
for device in device_list:
if device == 'CPU':
if nthreads:
core.set_property('CPU', {'CPU_THREADS_NUM': str(nthreads)})
core.set_property('CPU', {props.inference_num_threads: nthreads})
if 'MULTI' in devices and 'GPU' in devices:
core.set_property({'CPU_BIND_THREAD': 'NO'}, 'CPU')
if mode == 'async':
cpu_throughput = {'CPU_THROUGHPUT_STREAMS': 'CPU_THROUGHPUT_AUTO'}
core.set_property('CPU', {hints.performance_mode: hints.PerformanceMode.THROUGHPUT})
cpu_throughput = {props.num_streams: props.streams.Num.AUTO}
if device in streams_dict.keys() and streams_dict[device]:
cpu_throughput['CPU_THROUGHPUT_STREAMS'] = streams_dict['CPU']
cpu_throughput[props.num_streams] = streams_dict['CPU']
core.set_property('CPU', cpu_throughput)
if device == 'GPU':
if 'MULTI' in devices and 'СPU' in devices:
core.set_property('GPU', {'GPU_QUEUE_THROTTLE': '1'})
if mode == 'async':
gpu_throughput = {'GPU_THROUGHPUT_STREAMS': 'GPU_THROUGHPUT_AUTO'}
core.set_property('GPU', {hints.performance_mode: hints.PerformanceMode.THROUGHPUT})
gpu_throughput = {props.num_streams: props.streams.Num.AUTO}
if device in streams_dict.keys() and streams_dict[device]:
gpu_throughput['GPU_THROUGHPUT_STREAMS'] = streams_dict['GPU']
gpu_throughput[props.num_streams] = streams_dict['GPU']
core.set_property('GPU', gpu_throughput)
if dump:
if 'HETERO' in devices:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,32 @@
<StreamCount></StreamCount>
</FrameworkDependent>
</Test>
<Test>
<Model>
<Task>classification</Task>
<Name>mobilenet-v1-1.0-224-tf</Name>
<Precision>FP32</Precision>
<SourceFramework>tf</SourceFramework>
<ModelPath>../models_dir/public/mobilenet-v1-1.0-224-tf/FP32/mobilenet-v1-1.0-224-tf.xml</ModelPath>
<WeightsPath>../models_dir/public/mobilenet-v1-1.0-224-tf/FP32/mobilenet-v1-1.0-224-tf.bin</WeightsPath>
</Model>
<Dataset>
<Name>Data</Name>
<Path>../test_images/black_square.jpg</Path>
</Dataset>
<FrameworkIndependent>
<InferenceFramework>OpenVINO DLDT</InferenceFramework>
<BatchSize>1</BatchSize>
<Device>CPU</Device>
<IterationCount>5</IterationCount>
<TestTimeLimit>1</TestTimeLimit>
</FrameworkIndependent>
<FrameworkDependent>
<Mode>async</Mode>
<Extension></Extension>
<AsyncRequestCount>1</AsyncRequestCount>
<ThreadCount></ThreadCount>
<StreamCount>2</StreamCount>
</FrameworkDependent>
</Test>
</Tests>
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,32 @@
<StreamCount></StreamCount>
</FrameworkDependent>
</Test>
<Test>
<Model>
<Task>classification</Task>
<Name>mobilenet-v1-1.0-224-tf</Name>
<Precision>FP32</Precision>
<SourceFramework>tf</SourceFramework>
<ModelPath>../models_dir/public/mobilenet-v1-1.0-224-tf/FP32/mobilenet-v1-1.0-224-tf.xml</ModelPath>
<WeightsPath>../models_dir/public/mobilenet-v1-1.0-224-tf/FP32/mobilenet-v1-1.0-224-tf.bin</WeightsPath>
</Model>
<Dataset>
<Name>Data</Name>
<Path>../test_images/black_square.jpg</Path>
</Dataset>
<FrameworkIndependent>
<InferenceFramework>OpenVINO DLDT</InferenceFramework>
<BatchSize>1</BatchSize>
<Device>CPU</Device>
<IterationCount>5</IterationCount>
<TestTimeLimit>1</TestTimeLimit>
</FrameworkIndependent>
<FrameworkDependent>
<Mode>sync</Mode>
<Extension></Extension>
<AsyncRequestCount></AsyncRequestCount>
<ThreadCount>2</ThreadCount>
<StreamCount></StreamCount>
</FrameworkDependent>
</Test>
</Tests>

0 comments on commit b0d60e6

Please sign in to comment.