diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml
index 9fd3dd162..103e668cd 100644
--- a/.ci/azure/linux.yml
+++ b/.ci/azure/linux.yml
@@ -6,6 +6,7 @@ trigger:
paths:
exclude:
- modules/nvidia_plugin
+ - modules/csharp_api
pr:
branches:
@@ -15,6 +16,7 @@ pr:
paths:
exclude:
- modules/nvidia_plugin
+ - modules/csharp_api
resources:
repositories:
diff --git a/.ci/azure/mac.yml b/.ci/azure/mac.yml
index adafa2ee7..5c68fef7b 100644
--- a/.ci/azure/mac.yml
+++ b/.ci/azure/mac.yml
@@ -6,6 +6,7 @@ trigger:
paths:
exclude:
- modules/nvidia_plugin
+ - modules/csharp_api
pr:
branches:
@@ -15,6 +16,7 @@ pr:
paths:
exclude:
- modules/nvidia_plugin
+ - modules/csharp_api
resources:
repositories:
diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml
index fd49a097e..9bbcaf1d7 100644
--- a/.ci/azure/windows.yml
+++ b/.ci/azure/windows.yml
@@ -6,6 +6,7 @@ trigger:
paths:
exclude:
- modules/nvidia_plugin
+ - modules/csharp_api
pr:
branches:
@@ -15,6 +16,7 @@ pr:
paths:
exclude:
- modules/nvidia_plugin
+ - modules/csharp_api
resources:
repositories:
diff --git a/modules/csharp_api/README.md b/modules/csharp_api/README.md
new file mode 100644
index 000000000..13d1a71b9
--- /dev/null
+++ b/modules/csharp_api/README.md
@@ -0,0 +1,150 @@
+![OpenVINO™ C# API](https://socialify.git.ci/guojin-yan/OpenVINO-CSharp-API/image?description=1&descriptionEditable=💞%20OpenVINO%20wrapper%20for%20.NET💞%20&forks=1&issues=1&logo=https%3A%2F%2Fs2.loli.net%2F2023%2F01%2F26%2FylE1K5JPogMqGSW.png&name=1&owner=1&pattern=Circuit%20Board&pulls=1&stargazers=1&theme=Light)
+
+
+
+
+
+
+
+
+
+
+[简体中文](README_cn.md) | English
+
+## 📚 What is OpenVINO™ C# API ?
+
+[OpenVINO™](www.openvino.ai) is an open-source toolkit for optimizing and deploying AI inference.
+
+- Boost deep learning performance in computer vision, automatic speech recognition, natural language processing and other common tasks
+- Use models trained with popular frameworks like TensorFlow, PyTorch and more
+- Reduce resource demands and efficiently deploy on a range of Intel® platforms from edge to cloud
+
+ This project is based on OpenVINO™ The tool kit has launched OpenVINO™ C # API, aimed at driving OpenVINO™ Application in the C # field. OpenVINO ™ The C # API is based on OpenVINO™ Development, supported platforms, and OpenVINO ™ Consistent, please refer to OpenVINO™ for specific information。
+
+## NuGet Package
+
+### Core Managed Libraries
+
+| Package | Description | Link |
+| ---------------------------------------------- | --------------------------------------------------------- | ------------------------------------------------------------ |
+| **OpenVINO.CSharp.API** | OpenVINO C# API core libraries | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.CSharp.API.svg)](https://www.nuget.org/packages/OpenVINO.CSharp.API/) |
+| **OpenVINO.CSharp.API.Extensions** | OpenVINO C# API core extensions libraries | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.CSharp.API.Extensions.svg)](https://www.nuget.org/packages/OpenVINO.CSharp.API.Extensions/) |
+| **OpenVINO.CSharp.API.Extensions.OpenCvSharp** | OpenVINO C# API core extensions libraries use OpenCvSharp | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.CSharp.API.Extensions.OpenCvSharp.svg)](https://www.nuget.org/packages/OpenVINO.CSharp.API.Extensions.OpenCvSharp/) |
+| **OpenVINO.CSharp.API.Extensions.EmguCV** | OpenVINO C# API core extensions libraries use EmguCV | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.CSharp.API.Extensions.EmguCV.svg)](https://www.nuget.org/packages/OpenVINO.CSharp.API.Extensions.EmguCV/) |
+
+### Native Runtime Libraries
+
+| Package | Description | Link |
+| ------------------------------------- | ------------------------------------ | ------------------------------------------------------------ |
+| **OpenVINO.runtime.win** | Native bindings for Windows | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.win.svg)](https://www.nuget.org/packages/OpenVINO.runtime.win/) |
+| **OpenVINO.runtime.ubuntu.22-x86_64** | Native bindings for ubuntu.22-x86_64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.ubuntu.22-x86_64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.ubuntu.22-x86_64/) |
+| **OpenVINO.runtime.ubuntu.20-x86_64** | Native bindings for ubuntu.20-x86_64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.ubuntu.20-x86_64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.ubuntu.20-x86_64/) |
+| **OpenVINO.runtime.ubuntu.18-x86_64** | Native bindings for ubuntu.18-x86_64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.ubuntu.18-x86_64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.ubuntu.18-x86_64/) |
+| **OpenVINO.runtime.debian9-arm64** | Native bindings for debian9-arm64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.win.svg)](https://www.nuget.org/packages/OpenVINO.runtime.win/) |
+| **OpenVINO.runtime.debian9-armhf ** | Native bindings for debian9-armhf | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.debian9-armhf.svg)](https://www.nuget.org/packages/OpenVINO.runtime.debian9-armhf/) |
+| **OpenVINO.runtime.centos7-x86_64** | Native bindings for centos7-x86_64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.centos7-x86_64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.centos7-x86_64/) |
+| **OpenVINO.runtime.rhel8-x86_64** | Native bindings for rhel8-x86_64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.rhel8-x86_64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.rhel8-x86_64/) |
+| **OpenVINO.runtime.macos-x86_64** | Native bindings for macos-x86_64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.macos-x86_64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.macos-x86_64/) |
+| **OpenVINO.runtime.macos-arm64** | Native bindings for macos-arm64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.macos-arm64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.macos-arm64/) |
+
+### Integration Library
+
+| Package | Description | Link |
+| --------------------------- | ------------------------------ | ------------------------------------------------------------ |
+| **OpenVINO.CSharp.Windows** | All-in-one package for Windows | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.CSharp.Windows.svg)](https://www.nuget.org/packages/OpenVINO.CSharp.Windows/) |
+
+
+
+## ⚙ How to install OpenVINO™ C# API?
+
+ The following provides OpenVINO ™ The installation method of C # API on different platforms can be customized according to the platform you are using.
+
+### **Windows**
+
+ Install the following package through the ``dotnet add package`` command or through Visual Studio
+
+```shell
+dotnet add package OpenVINO.CSharp.API
+dotnet add package OpenVINO.runtime.win
+Or install =》
+dotnet add package OpenVINO.CSharp.Windows
+```
+
+### **Linux**
+
+ We have created the corresponding NuGet Package for the **Linux ** platform based on the official compiled platform, For example, using **ubuntu.22-x86_64** is installed using the ``dotnet add package`` command:
+
+```shell
+dotnet add package OpenVINO.CSharp.API
+dotnet add package OpenVINO.runtime.ubuntu.22-x86_64
+```
+
+ After running the program once, add environment variables:
+
+```
+export LD_LIBRARY_PATH={Program generated executable file directory}/runtimes/ubuntu.22-x86_64/native
+such as =》
+export LD_LIBRARY_PATH=/home/ygj/Program/sample1/bin/Debug/net6.0/runtimes/ubuntu.22-x86_64/native
+```
+
+ If for a brand new platform (without installing OpenVINO C++), it is necessary to install a dependent environment and switch to ``{Program generated executable file directory}/runtimes/ubuntu.22-x86'_ 64/native ``directory, run the following command:
+
+```shell
+sudo -E ./install_openvino_dependencies.sh
+```
+
+## Mac OS
+
+Install the following package using the ``dotnet add package``command
+
+```shell
+dotnet add package OpenVINO.CSharp.API
+dotnet add package OpenVINO.runtime.macos-arm64
+```
+
+## 🏷开始使用
+
+## 🏷How to use OpenVINO™ C# API?
+
+- **Simple usage**
+
+If you don't know how to use it, simply understand the usage method through the following code.
+
+```c#
+using OpenVinoSharp;
+namespace test
+{
+ internal class Program
+ {
+ static void Main(string[] args)
+ {
+ using Core core = new Core();
+ using Model model = core.read_model("./model.xml");
+ using CompiledModel compiled_model = core.compiled_model(model, "AUTO");
+ using InferRequest infer_request = compiled_model.create_infer_request();
+ using Tensor input_tensor = infer_request.get_tensor("images");
+ infer_request.infer();
+ using Tensor output_tensor = infer_request.get_tensor("output0");
+ }
+ }
+}
+```
+
+The classes and objects encapsulated in the project, such as Core, Model, Tensor, etc., are implemented by calling the C API interface and have unmanaged resources. They need to be handled by calling the **Dispose() ** method or `using` statement, otherwise memory leakage may occur.
+
+## 💻 Tutorial Examples
+
+
+
+## 🗂 API Reference
+
+If you want to learn more information, you can refer to: [OpenVINO™ C# API API Documented](https://guojin-yan.github.io/OpenVINO-CSharp-API.docs/index.html)
+
+## 🎖 Contribute
+
+ If you are interested in OpenVINO ™ Interested in using C # and contributing to the open source community, welcome to join us and develop OpenVINO™ C# API together.
+ If you have any ideas or improvement ideas for this project, please feel free to contact us for guidance on our work.
+
+## License
+
+The release of this project is certified under the [Apache 2.0 license](https://github.com/guojin-yan/OpenVINO-CSharp-API/blob/csharp3.0/LICENSE.txt) .
diff --git a/modules/csharp_api/README_cn.md b/modules/csharp_api/README_cn.md
new file mode 100644
index 000000000..010b9a034
--- /dev/null
+++ b/modules/csharp_api/README_cn.md
@@ -0,0 +1,150 @@
+![OpenVINO™ C# API](https://socialify.git.ci/guojin-yan/OpenVINO-CSharp-API/image?description=1&descriptionEditable=💞%20OpenVINO%20wrapper%20for%20.NET💞%20&forks=1&issues=1&logo=https%3A%2F%2Fs2.loli.net%2F2023%2F01%2F26%2FylE1K5JPogMqGSW.png&name=1&owner=1&pattern=Circuit%20Board&pulls=1&stargazers=1&theme=Light)
+
+
+
+
+
+
+
+
+
+
+
+简体中文| [English](README.md)
+
+# 📚 简介
+
+[OpenVINO™ ](www.openvino.ai)是一个用于优化和部署 AI 推理的开源工具包。
+
+- 提升深度学习在计算机视觉、自动语音识别、自然语言处理和其他常见任务中的性能
+- 使用流行框架(如TensorFlow,PyTorch等)训练的模型
+- 减少资源需求,并在从边缘到云的一系列英特尔®平台上高效部署
+
+ 该项目基于OpenVINO™工具套件推出了 OpenVINO™ C# API,旨在推动 OpenVINO™在C#领域的应用。OpenVINO™ C# API 由于是基于 OpenVINO™ 开发,所支持的平台与OpenVINO™ 一致,具体信息可以参考 OpenVINO™。
+
+# NuGet Package
+
+## Core Managed Libraries
+
+| Package | Description | Link |
+| ---------------------------------------------- | --------------------------------------------------------- | ------------------------------------------------------------ |
+| **OpenVINO.CSharp.API** | OpenVINO C# API core libraries | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.CSharp.API.svg)](https://www.nuget.org/packages/OpenVINO.CSharp.API/) |
+| **OpenVINO.CSharp.API.Extensions** | OpenVINO C# API core extensions libraries | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.CSharp.API.Extensions.svg)](https://www.nuget.org/packages/OpenVINO.CSharp.API.Extensions/) |
+| **OpenVINO.CSharp.API.Extensions.OpenCvSharp** | OpenVINO C# API core extensions libraries use OpenCvSharp | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.CSharp.API.Extensions.OpenCvSharp.svg)](https://www.nuget.org/packages/OpenVINO.CSharp.API.Extensions.OpenCvSharp/) |
+| **OpenVINO.CSharp.API.Extensions.EmguCV** | OpenVINO C# API core extensions libraries use EmguCV | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.CSharp.API.Extensions.EmguCV.svg)](https://www.nuget.org/packages/OpenVINO.CSharp.API.Extensions.EmguCV/) |
+
+## Native Runtime Libraries
+
+| Package | Description | Link |
+| ------------------------------------- | ------------------------------------ | ------------------------------------------------------------ |
+| **OpenVINO.runtime.win** | Native bindings for Windows | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.win.svg)](https://www.nuget.org/packages/OpenVINO.runtime.win/) |
+| **OpenVINO.runtime.ubuntu.22-x86_64** | Native bindings for ubuntu.22-x86_64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.ubuntu.22-x86_64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.ubuntu.22-x86_64/) |
+| **OpenVINO.runtime.ubuntu.20-x86_64** | Native bindings for ubuntu.20-x86_64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.ubuntu.20-x86_64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.ubuntu.20-x86_64/) |
+| **OpenVINO.runtime.ubuntu.18-x86_64** | Native bindings for ubuntu.18-x86_64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.ubuntu.18-x86_64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.ubuntu.18-x86_64/) |
+| **OpenVINO.runtime.debian9-arm64** | Native bindings for debian9-arm64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.win.svg)](https://www.nuget.org/packages/OpenVINO.runtime.win/) |
+| **OpenVINO.runtime.debian9-armhf ** | Native bindings for debian9-armhf | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.debian9-armhf.svg)](https://www.nuget.org/packages/OpenVINO.runtime.debian9-armhf/) |
+| **OpenVINO.runtime.centos7-x86_64** | Native bindings for centos7-x86_64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.centos7-x86_64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.centos7-x86_64/) |
+| **OpenVINO.runtime.rhel8-x86_64** | Native bindings for rhel8-x86_64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.rhel8-x86_64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.rhel8-x86_64/) |
+| **OpenVINO.runtime.macos-x86_64** | Native bindings for macos-x86_64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.macos-x86_64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.macos-x86_64/) |
+| **OpenVINO.runtime.macos-arm64** | Native bindings for macos-arm64 | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.runtime.macos-arm64.svg)](https://www.nuget.org/packages/OpenVINO.runtime.macos-arm64/) |
+
+
+## Integration Library
+
+| Package | Description | Link |
+| --------------------------- | ------------------------------ | ------------------------------------------------------------ |
+| **OpenVINO.CSharp.Windows** | All-in-one package for Windows | [![NuGet Gallery ](https://badge.fury.io/nu/OpenVINO.CSharp.Windows.svg)](https://www.nuget.org/packages/OpenVINO.CSharp.Windows/) |
+
+# ⚙ 如何安装
+
+以下提供了OpenVINO™ C# API在不同平台的安装方法,可以根据自己使用平台进行安装。
+
+## Windows
+
+通过``dotnet add package``指令安装或通过Visual Studio安装以下程序包
+
+```shell
+dotnet add package OpenVINO.CSharp.API
+dotnet add package OpenVINO.runtime.win
+或者安装集成包——>
+dotnet add package OpenVINO.CSharp.Windows
+```
+
+## Linux
+
+ **linux**平台我们根据官方编译的平台制作了对应的NuGet Package,以**ubuntu.22-x86_64**为例,通过``dotnet add package``指令安装:
+
+```shell
+dotnet add package OpenVINO.CSharp.API
+dotnet add package OpenVINO.runtime.ubuntu.22-x86_64
+```
+
+ 运行一次程序后,添加环境变量:
+
+```shell
+export LD_LIBRARY_PATH={Program generated executable file directory}/runtimes/ubuntu.22-x86_64/native
+例如——>
+export LD_LIBRARY_PATH=/home/ygj/Program/sample1/bin/Debug/net6.0/runtimes/ubuntu.22-x86_64/native
+```
+
+ 如果对于一个全新平台(未安装过OpenVINO C++),需要安装一下依赖环境,切换到``{Program generated executable file directory}/runtimes/ubuntu.22-x86_64/native``目录下,运行以下指令:
+
+```shell
+sudo -E ./install_openvino_dependencies.sh
+```
+
+## Mac OS
+
+通过``dotnet add package``指令安装以下程序包
+
+```shell
+dotnet add package OpenVINO.CSharp.API
+dotnet add package OpenVINO.runtime.macos-arm64
+```
+
+## 🏷开始使用
+
+- **使用方法**
+
+如果你不知道如何使用,通过下面代码简单了解使用方法。
+
+```c#
+using OpenVinoSharp; // 引用命名空间
+namespace test
+{
+ internal class Program
+ {
+ static void Main(string[] args)
+ {
+ using Core core = new Core(); // 初始化 Core 核心
+ using Model model = core.read_model("./model.xml"); // 读取模型文件
+ using CompiledModel compiled_model = core.compiled_model(model, "AUTO"); // 将模型加载到设备
+ using InferRequest infer_request = compiled_model.create_infer_request(); // 创建推理通道
+ using Tensor input_tensor = infer_request.get_tensor("images"); // 获取输入节点Tensor
+ infer_request.infer(); // 模型推理
+ using Tensor output_tensor = infer_request.get_tensor("output0"); // 获取输出节点Tensor
+ }
+ }
+}
+```
+
+项目中所封装的类、对象例如Core、Model、Tensor等,通过调用 C api 接口实现,具有非托管资源,需要调用**Dispose()**方法处理或者使用**using**,否则就会出现内存泄漏。
+
+## 💻 应用案例
+
+
+
+## 🗂 API 文档
+
+如果想了解更多信息,可以参阅:[OpenVINO™ C# API API Documented](https://guojin-yan.github.io/OpenVINO-CSharp-API.docs/index.html)
+
+## 🎖 贡献
+
+ 如果您对OpenVINO™ 在C#使用感兴趣,有兴趣对开源社区做出自己的贡献,欢迎加入我们,一起开发OpenVINO™ C# API。
+
+ 如果你对该项目有一些想法或改进思路,欢迎联系我们,指导下我们的工作。
+
+## 许可证书
+
+本项目的发布受[Apache 2.0 license](https://github.com/guojin-yan/OpenVINO-CSharp-API/blob/csharp3.0/LICENSE.txt)许可认证。
+
diff --git a/modules/csharp_api/csharp/CSharpAPI.csproj b/modules/csharp_api/csharp/CSharpAPI.csproj
new file mode 100644
index 000000000..65f622aa4
--- /dev/null
+++ b/modules/csharp_api/csharp/CSharpAPI.csproj
@@ -0,0 +1,46 @@
+
+
+
+ net5.0;net6.0;net48;netcoreapp3.1
+ True
+ True
+ OpenVINO.CSharp.API
+
+ OpenVINO C# API
+ 2023.1.0.1
+ Guojin Yan
+ Guojin Yan
+ OpenVINO C# API
+ Based on the C # platform, call the OpenVINO suite to deploy a deep learning model.
+
+ https://github.com/openvinotoolkit/openvino_contrib/tree/master/modules/csharp_api
+ https://github.com/openvinotoolkit/openvino_contrib/tree/master/modules/csharp_api
+ git
+ ../../nuget
+ zh
+
+
+ This version is a pre release version of OpenVINO™ C# API 3.0 and its features are not yet fully developed. If there are any issues during use, please feel free to contact me.
+ OpenVinoSharp
+ OpenVINO_CSharp_API
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ $(DefineConstants);DOTNET_FRAMEWORK;
+
+
+
+
diff --git a/modules/csharp_api/csharp/CSharpAPI.sln b/modules/csharp_api/csharp/CSharpAPI.sln
new file mode 100644
index 000000000..22f660b7f
--- /dev/null
+++ b/modules/csharp_api/csharp/CSharpAPI.sln
@@ -0,0 +1,31 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio Version 17
+VisualStudioVersion = 17.6.33829.357
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "CSharpAPI", "CSharpAPI.csproj", "{56A1269F-3928-4367-84BE-0EA2877DFED1}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "csharp_api_unit_tests", "..\tests\csharp_api_unit_tests\csharp_api_unit_tests.csproj", "{DD506CD5-C670-4354-879C-42EF1A2A7DD5}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Release|Any CPU = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {56A1269F-3928-4367-84BE-0EA2877DFED1}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {56A1269F-3928-4367-84BE-0EA2877DFED1}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {56A1269F-3928-4367-84BE-0EA2877DFED1}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {56A1269F-3928-4367-84BE-0EA2877DFED1}.Release|Any CPU.Build.0 = Release|Any CPU
+ {DD506CD5-C670-4354-879C-42EF1A2A7DD5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {DD506CD5-C670-4354-879C-42EF1A2A7DD5}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {DD506CD5-C670-4354-879C-42EF1A2A7DD5}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {DD506CD5-C670-4354-879C-42EF1A2A7DD5}.Release|Any CPU.Build.0 = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ExtensibilityGlobals) = postSolution
+ SolutionGuid = {79883B4A-4EAE-42DE-A72B-7B6D47685E9A}
+ EndGlobalSection
+EndGlobal
diff --git a/modules/csharp_api/csharp/base.cs b/modules/csharp_api/csharp/base.cs
new file mode 100644
index 000000000..edd2a2b2b
--- /dev/null
+++ b/modules/csharp_api/csharp/base.cs
@@ -0,0 +1,47 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+
+namespace OpenVinoSharp
+{
+
+ ///
+ /// OpenVINO wrapper for .NET.
+ /// This is the basic namespace of OpenVINO in C#,
+ /// and all classes and methods are within this method.
+ /// OpenVinoSharp .
+ ///
+ [System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
+ class NamespaceDoc
+ {
+ }
+
+ namespace element {
+ ///
+ /// OpenVINO wrapper for .NET.
+ /// Define elements in OpenVINO.
+ /// OpenVinoSharp.element .
+ ///
+ [System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
+ class NamespaceDoc
+ {
+ }
+ }
+
+
+ namespace preprocess {
+ ///
+ /// Mainly defined the data processing methods in OpenVINO.
+ /// OpenVinoSharp.preprocess .
+ ///
+ [System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
+ class NamespaceDoc
+ {
+ }
+ }
+}
+
+
diff --git a/modules/csharp_api/csharp/common/common.cs b/modules/csharp_api/csharp/common/common.cs
new file mode 100644
index 000000000..2b6ad211b
--- /dev/null
+++ b/modules/csharp_api/csharp/common/common.cs
@@ -0,0 +1,177 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// This enum contains codes for all possible return values of the interface functions
+ ///
+ public enum ExceptionStatus : int
+ {
+ ///
+ /// SUCCESS!
+ ///
+ OK = 0,
+ // map exception to C++ interface
+ ///
+ /// GENERAL_ERROR
+ ///
+ GENERAL_ERROR = -1,
+ ///
+ /// NOT_IMPLEMENTED
+ ///
+ NOT_IMPLEMENTED = -2,
+ ///
+ /// NETWORK_NOT_LOADED
+ ///
+ NETWORK_NOT_LOADED = -3,
+ ///
+ /// PARAMETER_MISMATCH
+ ///
+ PARAMETER_MISMATCH = -4,
+ ///
+ /// NOT_FOUND
+ ///
+ NOT_FOUND = -5,
+ ///
+ /// OUT_OF_BOUNDS
+ ///
+ OUT_OF_BOUNDS = -6,
+
+ // exception not of std::exception derived type was thrown
+ ///
+ /// UNEXPECTED
+ ///
+ UNEXPECTED = -7,
+ ///
+ /// REQUEST_BUSY
+ ///
+ REQUEST_BUSY = -8,
+ ///
+ /// RESULT_NOT_READY
+ ///
+ RESULT_NOT_READY = -9,
+ ///
+ /// NOT_ALLOCATED
+ ///
+ NOT_ALLOCATED = -10,
+ ///
+ /// INFER_NOT_STARTED
+ ///
+ INFER_NOT_STARTED = -11,
+ ///
+ /// NETWORK_NOT_READ
+ ///
+ NETWORK_NOT_READ = -12,
+ ///
+ /// INFER_CANCELLED
+ ///
+ INFER_CANCELLED = -13,
+
+ // exception in C wrapper
+
+ ///
+ /// INVALID_C_PARAM
+ ///
+ INVALID_C_PARAM = -14,
+ ///
+ /// UNKNOWN_C_ERROR
+ ///
+ UNKNOWN_C_ERROR = -15,
+ ///
+ /// NOT_IMPLEMENT_C_METHOD
+ ///
+ NOT_IMPLEMENT_C_METHOD = -16,
+ ///
+ /// UNKNOW_EXCEPTION
+ ///
+ UNKNOW_EXCEPTION = -17,
+ ///
+ /// PTR_NULL
+ ///
+ PTR_NULL = -100,
+ }
+
+ ///
+ /// This enum contains codes for element type.
+ ///
+ public enum ElementType : uint
+ {
+ ///
+ /// Undefined element type
+ ///
+ UNDEFINED = 0U,
+ ///
+ /// Dynamic element type
+ ///
+ DYNAMIC,
+ ///
+ /// boolean element type
+ ///
+ BOOLEAN,
+ ///
+ /// bf16 element type
+ ///
+ BF16,
+ ///
+ /// f16 element type
+ ///
+ F16,
+ ///
+ /// f32 element type
+ ///
+ F32,
+ ///
+ /// f64 element type
+ ///
+ F64,
+ ///
+ /// i4 element type
+ ///
+ I4,
+ ///
+ /// i8 element type
+ ///
+ I8,
+ ///
+ /// i16 element type
+ ///
+ I16,
+ ///
+ /// i32 element type
+ ///
+ I32,
+ ///
+ /// i64 element type
+ ///
+ I64,
+ ///
+ /// binary element type
+ ///
+ U1,
+ ///
+ /// u4 element type
+ ///
+ U4,
+ ///
+ /// u8 element type
+ ///
+ U8,
+ ///
+ /// u16 element type
+ ///
+ U16,
+ ///
+ /// u32 element type
+ ///
+ U32,
+ ///
+ /// u64 element type
+ ///
+ U64,
+ };
+
+}
diff --git a/modules/csharp_api/csharp/common/element_type.cs b/modules/csharp_api/csharp/common/element_type.cs
new file mode 100644
index 000000000..cefe0f5c3
--- /dev/null
+++ b/modules/csharp_api/csharp/common/element_type.cs
@@ -0,0 +1,439 @@
+using System;
+using System.Collections.Generic;
+using System.Data;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+using System.Xml.Linq;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// The class of data type, mainly used for model data types.
+ ///
+ ///
+ /// OvType inherits from element. Type
+ ///
+ public class OvType : element.Type
+ {
+ ///
+ /// OvType constructor, initializing element. Type
+ ///
+ /// ElementType data
+ public OvType(ElementType t) : base(t) { }
+ ///
+ /// OvType copy constructor, initializing element. Type
+ ///
+ /// OvType data
+ public OvType(OvType t) : base(t.m_type) { }
+ ///
+ /// OvType constructor, initializing element.Type through data type string
+ ///
+ /// data type string
+ public OvType(string type) : base (type) { }
+
+ };
+ namespace element
+ {
+ ///
+ /// Enum to define possible element types
+ /// ov_element_c#_api
+ ///
+ public enum Type_t
+ {
+ ///
+ /// Undefined element type
+ ///
+ undefined,
+ ///
+ /// Dynamic element type
+ ///
+ dynamic,
+ ///
+ /// boolean element type
+ ///
+ boolean,
+ ///
+ /// bf16 element type
+ ///
+ bf16,
+ ///
+ /// f16 element type
+ ///
+ f16,
+ ///
+ /// f32 element type
+ ///
+ f32,
+ ///
+ /// f64 element type
+ ///
+ f64,
+ ///
+ /// i4 element type
+ ///
+ i4,
+ ///
+ /// i8 element type
+ ///
+ i8,
+ ///
+ /// i16 element type
+ ///
+ i16,
+ ///
+ /// i32 element type
+ ///
+ i32,
+ ///
+ /// i64 element type
+ ///
+ i64,
+ ///
+ /// binary element type
+ ///
+ u1,
+ ///
+ /// u4 element type
+ ///
+ u4,
+ ///
+ /// u8 element type
+ ///
+ u8,
+ ///
+ /// u16 element type
+ ///
+ u16,
+ ///
+ /// u32 element type
+ ///
+ u32,
+ ///
+ /// u64 element type
+ ///
+ u64
+ };
+ ///
+ /// [struct] Type information storage struct.
+ ///
+ struct TypeInfo
+ {
+ ///
+ /// data length.
+ ///
+ public ulong m_bitwidth;
+ ///
+ /// real number flag
+ ///
+ public bool m_is_real;
+ ///
+ /// signed number flag
+ ///
+ public bool m_is_signed;
+ ///
+ /// quantize number flag
+ ///
+ public bool m_is_quantized;
+ ///
+ /// type name full name string
+ ///
+ public string m_cname;
+ ///
+ /// type name abbreviation string
+ ///
+ public string m_type_name;
+ ///
+ /// Structure constructor
+ ///
+ /// data length.
+ /// real number flag
+ /// signed number flag
+ /// quantize number flag
+ /// type name full name string
+ /// type name abbreviation string
+ public TypeInfo(ulong bitwidth, bool is_real, bool is_signed, bool is_quantized, string cname, string type_name)
+ {
+ m_bitwidth = bitwidth;
+ m_is_real = is_real;
+ m_is_signed = is_signed;
+ m_is_quantized = is_quantized;
+ m_cname = cname;
+ m_type_name = type_name;
+ }
+ }
+ ///
+ /// Base class to define element type
+ /// ov_element_c#_api
+ ///
+ public class Type {
+ ///
+ /// data type, defined based on Type_t.
+ ///
+ protected Type_t m_type = Type_t.undefined ;
+ ///
+ /// OvType constructor, by Type_t initialize the Type class
+ ///
+ /// Type_t data
+ public Type(Type_t t) { m_type = t; }
+ ///
+ /// OvType constructor, by ElementType initialize the Type class
+ ///
+ /// ElementType data
+ public Type(ElementType t) { m_type = (Type_t)t; }
+ ///
+ /// OvType copy constructor, by Type initialize the Type class
+ ///
+ /// Type data
+ public Type(Type t) {
+ m_type = t.m_type;
+ }
+ ///
+ /// OvType constructor, initializing element.Type through data type string
+ ///
+ /// data type string
+ public Type(string type) {
+ new Type(type_from_string(type));
+ }
+ ///
+ /// Get data type.
+ ///
+ /// ElementType type
+ public ElementType get_type() {
+ return (ElementType)m_type;
+ }
+ ///
+ /// Get type full name string.
+ ///
+ /// full name string
+ public string c_type_string()
+ {
+ return get_type_info(m_type).m_cname;
+ }
+ ///
+ /// Get data type length.
+ ///
+ /// type length
+ public ulong size()
+ {
+ return (bitwidth() + 7) >> 3;
+ }
+ ///
+ /// Get type number.
+ ///
+ /// type number
+ public ulong hash()
+ {
+ return (ulong)(m_type);
+ }
+ ///
+ /// Get abbreviated name.
+ ///
+ /// abbreviated name
+ public string get_type_name()
+ {
+ return to_string();
+ }
+ ///
+ /// Determine whether it is a real number
+ ///
+ /// true: is real; false: not real
+ public bool is_integral()
+ {
+ return !is_real();
+ }
+ ///
+ /// Convert data type to string
+ ///
+ /// data type string
+ public string to_string()
+ {
+ return get_type_info(m_type).m_type_name;
+ }
+ ///
+ /// Determine whether the current data type is static.
+ ///
+ /// true : is static; false : not static
+ public bool is_static()
+ {
+ return get_type_info(m_type).m_bitwidth != 0;
+ }
+ ///
+ /// Determine whether the current data type is real.
+ ///
+ /// true : is real; false : not real
+ public bool is_real()
+ {
+ return get_type_info(m_type).m_is_real;
+ }
+ ///
+ /// Determine whether the current data type is integral number.
+ ///
+ /// true : is integral number; false : not integral number
+ public bool is_integral_number()
+ {
+ return is_integral() && (m_type != Type_t.boolean);
+ }
+ ///
+ /// Determine whether the current data type is signed.
+ ///
+ /// true : is signed; false : not signed
+ public bool is_signed()
+ {
+ return get_type_info(m_type).m_is_signed;
+ }
+ ///
+ /// Determine whether the current data is of quantum type
+ ///
+ /// true : is quantized; false : not quantized
+ public bool is_quantized()
+ {
+ return get_type_info(m_type).m_is_quantized;
+ }
+ ///
+ /// Obtain the size of the current data type
+ ///
+ /// the size of the current data type
+ public ulong bitwidth()
+ {
+ return get_type_info(m_type).m_bitwidth;
+ }
+
+ ///
+ /// Get the current type of the Type_ Info
+ ///
+ /// Type_t
+ /// TypeInfo data
+ TypeInfo get_type_info(element.Type_t type)
+ {
+ switch (type)
+ {
+ case element.Type_t.undefined:
+ return new TypeInfo(10000, false, false, false, "undefined", "undefined");
+ case element.Type_t.dynamic:
+ return new TypeInfo(0, false, false, false, "dynamic", "dynamic");
+ case element.Type_t.boolean:
+ return new TypeInfo(8, false, true, false, "char", "boolean");
+ case element.Type_t.bf16:
+ return new TypeInfo(16, true, true, false, "bfloat16", "bf16");
+ case element.Type_t.f16:
+ return new TypeInfo(16, true, true, false, "float16", "f16");
+ case element.Type_t.f32:
+ return new TypeInfo(32, true, true, false, "float", "f32");
+ case element.Type_t.f64:
+ return new TypeInfo(64, true, true, false, "double", "f64");
+ case element.Type_t.i4:
+ return new TypeInfo(4, false, true, true, "int4_t", "i4");
+ case element.Type_t.i8:
+ return new TypeInfo(8, false, true, true, "int8_t", "i8");
+ case element.Type_t.i16:
+ return new TypeInfo(16, false, true, false, "int16_t", "i16");
+ case element.Type_t.i32:
+ return new TypeInfo(32, false, true, true, "int32_t", "i32");
+ case element.Type_t.i64:
+ return new TypeInfo(64, false, true, false, "int64_t", "i64");
+ case element.Type_t.u1:
+ return new TypeInfo(1, false, false, false, "uint1_t", "u1");
+ case element.Type_t.u4:
+ return new TypeInfo(4, false, false, false, "uint4_t", "u4");
+ case element.Type_t.u8:
+ return new TypeInfo(8, false, false, true, "uint8_t", "u8");
+ case element.Type_t.u16:
+ return new TypeInfo(16, false, false, false, "uint16_t", "u16");
+ case element.Type_t.u32:
+ return new TypeInfo(32, false, false, false, "uint32_t", "u32");
+ case element.Type_t.u64:
+ return new TypeInfo(64, false, false, false, "uint64_t", "u64");
+ default:
+ return new TypeInfo(100000, false, false, false, "default", "default");
+ }
+ }
+ ///
+ /// Convert type string to Type class
+ ///
+ /// type string
+ /// Type class
+ Type type_from_string(string type)
+ {
+ if (type == "f16" || type == "FP16")
+ {
+ return new Type(Type_t.f16);
+ }
+ else if (type == "f32" || type == "FP32")
+ {
+ return new Type(Type_t.f32);
+ }
+ else if (type == "bf16" || type == "BF16")
+ {
+ return new Type(Type_t.bf16);
+ }
+ else if (type == "f64" || type == "FP64")
+ {
+ return new Type(Type_t.f64);
+ }
+ else if (type == "i4" || type == "I4")
+ {
+ return new Type(Type_t.i4);
+ }
+ else if (type == "i8" || type == "I8")
+ {
+ return new Type(Type_t.i8);
+ }
+ else if (type == "i16" || type == "I16")
+ {
+ return new Type(Type_t.i16);
+ }
+ else if (type == "i32" || type == "I32")
+ {
+ return new Type(Type_t.i32);
+ }
+ else if (type == "i64" || type == "I64")
+ {
+ return new Type(Type_t.i64);
+ }
+ else if (type == "u1" || type == "U1" || type == "BIN" || type == "bin")
+ {
+ return new Type(Type_t.u1);
+ }
+ else if (type == "u4" || type == "U4")
+ {
+ return new Type(Type_t.u4);
+ }
+ else if (type == "u8" || type == "U8")
+ {
+ return new Type(Type_t.u8);
+ }
+ else if (type == "u16" || type == "U16")
+ {
+ return new Type(Type_t.u16);
+ }
+ else if (type == "u32" || type == "U32")
+ {
+ return new Type(Type_t.u32);
+ }
+ else if (type == "u64" || type == "U64")
+ {
+ return new Type(Type_t.u64);
+ }
+ else if (type == "boolean" || type == "BOOL")
+ {
+ return new Type(Type_t.boolean);
+ }
+ else if (type == "undefined" || type == "UNSPECIFIED")
+ {
+ return new Type(Type_t.undefined);
+ }
+ else if (type == "dynamic")
+ {
+ return new Type(Type_t.dynamic);
+ }
+ else
+ {
+ return new Type(Type_t.undefined);
+ }
+ }
+ };
+
+
+ }
+}
diff --git a/modules/csharp_api/csharp/common/property.cs b/modules/csharp_api/csharp/common/property.cs
new file mode 100644
index 000000000..cf6ddd25a
--- /dev/null
+++ b/modules/csharp_api/csharp/common/property.cs
@@ -0,0 +1,163 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// A header for advanced hardware specific properties for OpenVINO runtime devices.
+ /// To use in set_property, compile_model, import_model, get_property methods.
+ ///
+ public enum PropertyKey
+ {
+ // Read-only property key
+ ///
+ /// Read-only property to get a string list of supported read-only properties.
+ ///
+ SUPPORTED_PROPERTIES,
+ ///
+ /// Read-only property to get a list of available device IDs.
+ ///
+ AVAILABLE_DEVICES,
+ ///
+ /// Read-only property(uint32_t string) to get an unsigned integer value of optimaln
+ /// number of compiled model infer requests.
+ ///
+ OPTIMAL_NUMBER_OF_INFER_REQUESTS,
+ ///
+ /// Read-only property
+ RANGE_FOR_ASYNC_INFER_REQUESTS,
+ ///
+ /// Read-only property(string(unsigned int, unsigned int)) to provide information about a range for
+ /// streams on platforms where streams are supported
+ ///
+ RANGE_FOR_STREAMS,
+ ///
+ /// Read-only property to get a string value representing a full device name.
+ ///
+ FULL_DEVICE_NAME,
+ ///
+ /// Read-only property to get a string list of capabilities options per device.
+ ///
+ OPTIMIZATION_CAPABILITIES,
+ ///
+ /// Read-only property to get a name of name of a model
+ ///
+ NETWORK_NAME,
+ ///
+ /// Read-only property(uint32_t string) to query information optimal batch size for the given device
+ /// and the network
+ ///
+ OPTIMAL_BATCH_SIZE,
+ ///
+ /// Read-only property to get maximum batch size which does not cause performance degradation due
+ /// to memory swap impact.
+ ///
+ MAX_BATCH_SIZE,
+
+ // Read-write property key
+ ///
+ /// Read-write property(string) to set/get the directory which will be used to store any data cached
+ /// by plugins.
+ ///
+ CACHE_DIR,
+ ///
+ /// Read-write property(uint32_t string) to set/get the number of executor logical partitions.
+ ///
+ NUM_STREAMS,
+ ///
+ /// Read-write property to set/get the name for setting CPU affinity per thread option.
+ ///
+ AFFINITY,
+ ///
+ /// Read-write property9int32_t string) to set/get the maximum number of threads that can be used
+ /// for inference tasks.
+ ///
+ INFERENCE_NUM_THREADS,
+ ///
+ /// Read-write property, it is high-level OpenVINO Performance Hints
+ ///
+ PERFORMANCE_HINT,
+ ///
+ /// Read-write property, it is high-level OpenVINO hint for using CPU pinning to bind CPU threads to processors
+ /// during inference
+ ///
+ ENABLE_CPU_PINNING,
+ ///
+ /// Read-write property, it is high-level OpenVINO Hints for the type of CPU core used during inference
+ ///
+ SCHEDULING_CORE_TYPE,
+ ///
+ /// Read-write property, it is high-level OpenVINO hint for using hyper threading processors during CPU inference
+ ///
+ ENABLE_HYPER_THREADING,
+ ///
+ /// Read-write property to set the hint for device to use specified precision for inference.
+ ///
+ INFERENCE_PRECISION_HINT,
+ ///
+ /// (Optional) Read-write property(uint32_t string) that backs the Performance Hints by giving
+ /// additional information on how many inference requests the application will be
+ /// keeping in flight usually this value comes from the actual use-case (e.g.
+ /// number of video-cameras, or other sources of inputs)
+ ///
+ PERFORMANCE_HINT_NUM_REQUESTS,
+ ///
+ /// Read-write property, high-level OpenVINO model priority hint.
+ ///
+ MODEL_PRIORITY,
+ ///
+ /// Read-write property for setting desirable log level.
+ ///
+ LOG_LEVEL,
+ ///
+ /// Read-write property(string) for setting performance counters option.
+ ///
+ PERF_COUNT,
+ ///
+ /// Read-write property(std::pair(std::string, Any)), device Priorities config option,
+ /// with comma-separated devices listed in the desired priority
+ ///
+ MULTI_DEVICE_PRIORITIES,
+ ///
+ /// Read-write property(string) for high-level OpenVINO Execution hint
+ /// unlike low-level properties that are individual (per-device), the hints are something that every device accepts
+ /// and turns into device-specific settings
+ /// Execution mode hint controls preferred optimization targets (performance or accuracy) for given model
+ ///
+ EXECUTION_MODE_HINT,
+ ///
+ /// Read-write property to set whether force terminate tbb when ov core destruction
+ ///
+ FORCE_TBB_TERMINATE,
+ ///
+ /// Read-write property to configure `mmap()` use for model read
+ ///
+ ENABLE_MMAP,
+ ///
+ /// Read-write property
+ ///
+ AUTO_BATCH_TIMEOUT,
+ }
+
+ public static partial class Ov
+ {
+ ///
+ /// Get the read-write property(string) to set/get the directory which will be used to store any data cached by plugins.
+ ///
+ ///
+ /// The read-write property(string) to set/get the directory which will be used to store any data cached by plugins.
+ ///
+ /// The pair data.
+ public static KeyValuePair cache_dir(string dir)
+ {
+ return new KeyValuePair(PropertyKey.CACHE_DIR.ToString(), dir);
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/common/version.cs b/modules/csharp_api/csharp/common/version.cs
new file mode 100644
index 000000000..9414fee71
--- /dev/null
+++ b/modules/csharp_api/csharp/common/version.cs
@@ -0,0 +1,77 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// [struct] Represents version information that describes plugins and the OpenVINO library
+ ///
+ /// ov_runtime_c#_api
+ public struct Version
+ {
+ ///
+ /// A null terminated string with build number
+ ///
+ public string buildNumber;
+ ///
+ /// A null terminated description string
+ ///
+ public string description;
+ ///
+ /// Constructs a Version.
+ ///
+ ///
+ ///
+ public Version(string buildNumber, string description) {
+ this.buildNumber = buildNumber;
+ this.description = description;
+ }
+
+ ///
+ /// Convert Version to output string
+ ///
+ /// Output string
+ public string to_string()
+ {
+ string str = "";
+ str += description;
+ str += "\r\n Version : ";
+ str += buildNumber.Substring(0, buildNumber.IndexOf("-"));
+ str += "\r\n Build : ";
+ str += buildNumber;
+ return str;
+ }
+ }
+ ///
+ /// [struct] Represents version information that describes device and ov runtime library
+ ///
+ public struct CoreVersion
+ {
+ ///
+ /// A device name
+ ///
+ public string device_name;
+ ///
+ /// The OpenVINO version.
+ ///
+ public Version version;
+ }
+ ///
+ /// [struct] Represents version information that describes all devices and ov runtime library
+ ///
+ public struct CoreVersionList
+ {
+ ///
+ /// An array of device versions
+ ///
+ public IntPtr core_version;
+ ///
+ /// A number of versions in the array
+ ///
+ public ulong size;
+ }
+}
diff --git a/modules/csharp_api/csharp/core/compiled_model.cs b/modules/csharp_api/csharp/core/compiled_model.cs
new file mode 100644
index 000000000..34cdf52c1
--- /dev/null
+++ b/modules/csharp_api/csharp/core/compiled_model.cs
@@ -0,0 +1,367 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// This class represents a compiled model.
+ ///
+ /// ov_runtime_c#_api
+ ///
+ /// A model is compiled by a specific device by applying multiple optimization
+ /// transformations, then mapping to compute kernels.
+ ///
+ public class CompiledModel : IDisposable
+ {
+ ///
+ /// [private]CompiledModel class pointer.
+ ///
+ private IntPtr m_ptr = IntPtr.Zero;
+ ///
+ /// [private]CompiledModel class pointer.
+ ///
+ public IntPtr Ptr
+ {
+ get { return m_ptr; }
+ set { m_ptr = value; }
+ }
+ ///
+ /// Default Constructor
+ ///
+ public CompiledModel()
+ {
+ }
+
+ ///
+ /// Constructs CompiledModel from the initialized ptr.
+ ///
+ ///
+ public CompiledModel(IntPtr ptr)
+ {
+ this.m_ptr = ptr;
+ }
+ ///
+ /// CompiledModel()'s destructor
+ ///
+ ~CompiledModel()
+ {
+ Dispose();
+ }
+ ///
+ /// Release unmanaged resources
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_core_free(m_ptr);
+
+ m_ptr = IntPtr.Zero;
+ }
+ ///
+ /// Creates an inference request object used to infer the compiled model.
+ /// The created request has allocated input and output tensors (which can be changed later).
+ ///
+ /// InferRequest object
+ public InferRequest create_infer_request()
+ {
+ IntPtr infer_request_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_create_infer_request(m_ptr, ref infer_request_ptr));
+ return new InferRequest(infer_request_ptr);
+ }
+
+ ///
+ /// Get a const single input port of compiled_model, which only support single input compiled_model.
+ ///
+ /// The input port of compiled_model.
+ public Node get_input()
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_input(m_ptr, ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_const);
+ }
+
+ ///
+ /// Get a const input port of compiled_model by name.
+ ///
+ /// input tensor name (string).
+ /// The input port of compiled_model.
+ public Node get_input(string tensor_name)
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ sbyte[] c_tensor_name = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(tensor_name));
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_input_by_name(m_ptr, ref c_tensor_name[0], ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_const);
+ }
+
+ ///
+ /// Get a const input port of compiled_model by port index.
+ ///
+ /// input tensor index.
+ /// The input port of compiled_model.
+ public Node get_input(ulong index)
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_input_by_index(m_ptr, index, ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_const);
+ }
+
+ ///
+ /// Get a const single output port of compiled_model, which only support single output model.
+ ///
+ /// The output port of compiled_model.
+ public Node get_output()
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ HandleException.handler(NativeMethods.ov_compiled_model_output(m_ptr, ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_const);
+ }
+ ///
+ /// Get a const output port of compiled_model by name.
+ ///
+ /// output tensor name (string).
+ /// The output port of compiled_model.
+ public Node get_output(string tensor_name)
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ sbyte[] c_tensor_name = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(tensor_name));
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_output_by_name(m_ptr, ref c_tensor_name[0], ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_const);
+ }
+ ///
+ /// Get a const output port of compiled_model by port index.
+ ///
+ /// input tensor index.
+ /// The output port of compiled_model.
+ public Node get_output(ulong index)
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_output_by_index(m_ptr, index, ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_const);
+ }
+ ///
+ /// Get the input size of compiled_model.
+ ///
+ /// The input size of compiled_model.
+ public ulong get_inputs_size()
+ {
+ ulong input_size = 0;
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_inputs_size(m_ptr, ref input_size));
+ return input_size;
+ }
+ ///
+ /// Get the output size of compiled_model.
+ ///
+ /// The output size.
+ public ulong get_outputs_size()
+ {
+ ulong output_size = 0;
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_outputs_size(m_ptr, ref output_size));
+ return output_size;
+ }
+
+ ///
+ /// Gets a single input of a compiled model.
+ ///
+ ///
+ /// The input is represented as an output of the ov::op::v0::Parameter operation.
+ /// The input contains information about input tensor such as tensor shape, names, and element type.
+ ///
+ /// Compiled model input.
+ /// If a model has more than one input, this method throws ov::Exception.
+ public Input input()
+ {
+ Node node = get_input();
+ return new Input(node, 0);
+ }
+ ///
+ /// Gets input of a compiled model identified by @p index.
+ ///
+ /// The input contains information about input tensor such as tensor shape, names, and element type.
+ /// Index of input.
+ /// Compiled model input.
+ /// The method throws ov::Exception if input with the specified index @p i is not found.
+ public Input input(ulong index)
+ {
+ Node node = get_input(index);
+ return new Input(node, index);
+ }
+ ///
+ /// Gets input of a compiled model identified by @p tensor_name.
+ ///
+ /// The input contains information about input tensor such as tensor shape, names, and element type.
+ /// Output tensor name.
+ /// Compiled model input.
+ /// The method throws ov::Exception if input with the specified tensor name @p tensor_name is not found.
+ public Input input(string tensor_name)
+ {
+ Node node = get_input(tensor_name);
+ return new Input(node, 0);
+ }
+
+ ///
+ /// Gets a single output of a compiled model.
+ ///
+ ///
+ /// The output is represented as an output from the ov::op::v0::Result operation.
+ /// The output contains information about output tensor such as tensor shape, names, and element type.
+ ///
+ /// Compiled model output.
+ /// If a model has more than one output, this method throws ov::Exception.
+ public Output output()
+ {
+ Node node = get_output();
+ return new Output(node, 0);
+ }
+ ///
+ /// Gets output of a compiled model identified by @p index.
+ ///
+ /// The output contains information about output tensor such as tensor shape, names, and element type.
+ /// Index of output.
+ /// Compiled model output.
+ /// The method throws ov::Exception if output with the specified index @p index is not found.
+ public Output output(ulong index)
+ {
+ Node node = get_output(index);
+ return new Output(node, index);
+ }
+ ///
+ /// Gets output of a compiled model identified by @p tensor_name.
+ ///
+ /// The output contains information about output tensor such as tensor shape, names, and element type.
+ /// Output tensor name.
+ /// Compiled model output.
+ /// The method throws ov::Exception if output with the specified tensor name @p tensor_name is not found.
+ public Output output(string tensor_name)
+ {
+ Node node = get_output(tensor_name);
+ return new Output(node, 0);
+ }
+
+ ///
+ /// Gets all inputs of a compiled model.
+ ///
+ ///
+ /// Inputs are represented as a vector of outputs of the ov::op::v0::Parameter operations.
+ /// They contain information about input tensors such as tensor shape, names, and element type.
+ ///
+ /// List of model inputs.
+ public List inputs()
+ {
+ ulong input_size = get_inputs_size();
+ List inputs = new List ();
+ for (ulong index = 0; index < input_size; ++index)
+ {
+ inputs.Add(input(index));
+ }
+ return inputs;
+ }
+
+ ///
+ /// Get all outputs of a compiled model.
+ ///
+ ///
+ /// Outputs are represented as a vector of output from the ov::op::v0::Result operations.
+ /// Outputs contain information about output tensors such as tensor shape, names, and element type.
+ ///
+ /// List of model outputs.
+ public List outputs()
+ {
+ ulong output_size = get_outputs_size();
+ List outputs = new List();
+ for (ulong index = 0; index < output_size; ++index)
+ {
+ outputs.Add(output(index));
+ }
+ return outputs;
+ }
+ ///
+ /// Gets runtime model information from a device.
+ ///
+ ///
+ /// This object represents an internal device-specific model that is optimized for a particular
+ /// accelerator. It contains device-specific nodes, runtime information and can be used only
+ /// to understand how the source model is optimized and which kernels, element types, and layouts
+ /// are selected for optimal inference.
+ ///
+ ///
+ public Model get_runtime_model()
+ {
+ IntPtr model_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_get_runtime_model(m_ptr, ref model_ptr));
+ return new Model(model_ptr);
+ }
+
+ ///
+ /// Exports the current compiled model to an output model_path.
+ /// The exported model can also be imported via the ov::Core::import_model method.
+ ///
+ /// Output path to store the model to.
+ public void export_model(string model_path)
+ {
+ sbyte[] c_model_path = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(model_path));
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_export_model(m_ptr, ref c_model_path[0]));
+ }
+
+ ///
+ /// Sets properties for the current compiled model.
+ ///
+ /// Map of pairs: (property name, property value).
+ public void set_property(KeyValuePair properties)
+ {
+ IntPtr property_key = Marshal.StringToHGlobalAnsi(properties.Key);
+ IntPtr property_value = Marshal.StringToHGlobalAnsi(properties.Value);
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_set_property(m_ptr, property_key, property_value));
+ }
+ ///
+ /// Gets properties for current compiled model
+ ///
+ ///
+ /// The method is responsible for extracting information that affects compiled model inference.
+ /// The list of supported configuration values can be extracted via CompiledModel::get_property
+ /// with the ov::supported_properties key, but some of these keys cannot be changed dynamically,
+ /// for example, ov::device::id cannot be changed if a compiled model has already been compiled
+ /// for a particular device.
+ ///
+ /// Property key, can be found in openvino/runtime/properties.hpp.
+ /// Property value.
+ public string get_property(string property_key)
+ {
+ sbyte[] c_property_key = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(property_key));
+ IntPtr property_value_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_get_property(m_ptr, ref c_property_key[0],
+ ref property_value_ptr));
+ return Marshal.PtrToStringAnsi(property_value_ptr);
+ }
+
+ ///
+ /// Returns pointer to device-specific shared context on a remote accelerator device that was used
+ /// to create this CompiledModel.
+ ///
+ /// A context.
+ public RemoteContext get_context() {
+ IntPtr context_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_compiled_model_get_context(m_ptr, ref context_ptr));
+ return new RemoteContext(context_ptr);
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/core/core.cs b/modules/csharp_api/csharp/core/core.cs
new file mode 100644
index 000000000..280c0ad63
--- /dev/null
+++ b/modules/csharp_api/csharp/core/core.cs
@@ -0,0 +1,500 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Reflection;
+using System.Runtime.InteropServices;
+using System.Text;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// This class represents an OpenVINO runtime Core entity.
+ /// ov_runtime_c#_api
+ ///
+ /// User applications can create several Core class instances, but in this case the underlying plugins
+ /// are created multiple times and not shared between several Core instances.The recommended way is to have
+ /// a single Core instance per application.
+ ///
+ public class Core : IDisposable
+ {
+ ///
+ /// [private]Core class pointer.
+ ///
+ private IntPtr m_ptr = IntPtr.Zero;
+ ///
+ /// [public]Core class pointer.
+ ///
+ public IntPtr Ptr
+ {
+ get { return m_ptr; }
+ set { m_ptr = value; }
+ }
+
+ ///
+ /// Represent all available devices.
+ ///
+ struct ov_available_devices_t
+ {
+ ///
+ /// devices' name
+ ///
+ public IntPtr devices;
+ ///
+ /// devices' number
+ ///
+ public ulong size;
+ }
+
+ ///
+ /// Constructs an OpenVINO Core instance with devices and their plugins description.
+ /// There are two ways how to configure device plugins:
+ /// 1. (default) Use XML configuration file in case of dynamic libraries build;
+ /// 2. Use strictly defined configuration in case of static libraries build.
+ ///
+ ///
+ /// Path to the .xml file with plugins to load from. If the XML configuration file is not
+ /// specified, default OpenVINO Runtime plugins are loaded from:
+ /// 1. (dynamic build) default `plugins.xml` file located in the same folder as OpenVINO runtime shared library;
+ /// 2. (static build) statically defined configuration.In this case path to the.xml file is ignored.
+ ///
+ public Core(string xml_config_file = null)
+ {
+ if (!String.IsNullOrEmpty(xml_config_file))
+ {
+ HandleException.handler(
+ NativeMethods.ov_core_create_with_config(xml_config_file, ref m_ptr));
+ }
+ else
+ {
+ HandleException.handler(
+ NativeMethods.ov_core_create(ref m_ptr));
+ }
+ }
+ ///
+ /// Core's destructor
+ ///
+ ~Core() { Dispose(); }
+ ///
+ /// Release unmanaged resources
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_core_free(m_ptr);
+
+ m_ptr = IntPtr.Zero;
+ }
+ ///
+ /// Returns device plugins version information.
+ ///
+ /// Device name to identify a plugin.
+ /// A vector of versions.
+ ///
+ /// Device name can be complex and identify multiple devices at once like `HETERO:CPU,GPU`;
+ /// in this case, std::map contains multiple entries, each per device.
+ ///
+ public KeyValuePair get_versions(string device_name)
+ {
+ if (string.IsNullOrEmpty(device_name))
+ {
+ throw new ArgumentNullException(nameof(device_name));
+ }
+ int l = Marshal.SizeOf(typeof(CoreVersionList));
+ IntPtr ptr_core_version_s = Marshal.AllocHGlobal(l);
+ sbyte[] c_device_name = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(device_name));
+ HandleException.handler(
+ NativeMethods.ov_core_get_versions_by_device_name(m_ptr, ref c_device_name[0], ptr_core_version_s));
+ var temp1 = Marshal.PtrToStructure(ptr_core_version_s, typeof(CoreVersionList));
+ CoreVersionList core_version_s = (CoreVersionList)temp1;
+ var temp2 = Marshal.PtrToStructure(core_version_s.core_version, typeof(CoreVersion));
+ CoreVersion core_version = (CoreVersion)temp2;
+ KeyValuePair value = new KeyValuePair(core_version.device_name, core_version.version);
+ NativeMethods.ov_core_versions_free(ptr_core_version_s);
+ return value;
+ }
+
+
+ ///
+ /// Reads models from IR / ONNX / PDPD / TF / TFLite file formats.
+ ///
+ /// Path to a model.
+ /// Path to a data file.
+ /// A model.
+ ///
+ ///
+ /// For IR format (*.bin):
+ /// if `bin_path` is empty, will try to read a bin file with the same name as xml and
+ /// if the bin file with the same name is not found, will load IR without weights.
+ /// For the following file formats the `bin_path` parameter is not used:
+ ///
+ /// ONNX format (*.onnx)
+ /// PDPD(*.pdmodel)
+ /// TF(*.pb)
+ /// TFLite(*.tflite)
+ ///
+ public Model read_model(string model_path, string bin_path = "")
+ {
+ if (string.IsNullOrEmpty(model_path))
+ {
+ throw new ArgumentNullException(nameof(model_path));
+ }
+ IntPtr model_ptr = new IntPtr();
+ sbyte[] c_model_path = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(model_path));
+
+ if (bin_path == "")
+ {
+ sbyte c_bin_path = new sbyte();
+ HandleException.handler(
+ NativeMethods.ov_core_read_model(m_ptr, ref c_model_path[0], ref c_bin_path, ref model_ptr));
+ }
+ else
+ {
+ sbyte[] c_bin_path = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(bin_path));
+ HandleException.handler(
+ NativeMethods.ov_core_read_model(m_ptr, ref c_model_path[0], ref c_bin_path[0], ref model_ptr));
+ }
+
+ return new Model(model_ptr);
+ }
+
+ ///
+ /// Reads models from IR / ONNX / PDPD / TF / TFLite formats.
+ ///
+ /// String with a model in IR / ONNX / PDPD / TF / TFLite format.
+ /// Shared pointer to a constant tensor with weights.
+ ///
+ /// Created model object shares the weights with the @p weights object.
+ /// Thus, do not create @p weights on temporary data that can be freed later, since the model constant data will point to an invalid memory.
+ ///
+ /// A model.
+ public Model read_model(string model_path, Tensor weights)
+ {
+ if (string.IsNullOrEmpty(model_path))
+ {
+ throw new ArgumentNullException(nameof(model_path));
+ }
+ if (weights == null)
+ {
+ throw new ArgumentNullException(nameof(weights));
+ }
+ FileStream fs = new FileStream(model_path, FileMode.Open, FileAccess.Read);
+ long len = fs.Seek(0, SeekOrigin.End);
+ fs.Seek(0, SeekOrigin.Begin);
+ byte[] data = new byte[len + 1];
+ fs.Read(data, 0, (int)len);
+ fs.Close();
+ IntPtr model_ptr = new IntPtr();
+ HandleException.handler(
+ NativeMethods.ov_core_read_model_from_memory(m_ptr, ref data[0], weights.Ptr, ref model_ptr));
+ return new Model(model_ptr);
+ }
+ ///
+ /// Reads models from IR / ONNX / PDPD / TF / TFLite formats.
+ ///
+ /// String with a model in IR / ONNX / PDPD / TF / TFLite format,
+ /// You can obtain input content through the Ov.content_from_file() method.
+ /// Shared pointer to a constant tensor with weights.
+ ///
+ public Model read_model(byte[] model_str, Tensor weights)
+ {
+ IntPtr model_ptr = new IntPtr();
+ HandleException.handler(
+ NativeMethods.ov_core_read_model_from_memory(m_ptr, ref model_str[0], weights.Ptr, ref model_ptr));
+ return new Model(model_ptr);
+ }
+
+
+ ///
+ /// Creates a compiled model from a source model object.
+ ///
+ /// Model object acquired from Core::read_model.
+ /// Optional map of pairs: (property name, property value) relevant only for this load operation.
+ /// A compiled model.
+ ///
+ /// Users can create as many compiled models as they need and use
+ /// them simultaneously (up to the limitation of the hardware resources).
+ ///
+ public CompiledModel compile_model(Model model, Dictionary properties = null)
+ {
+ return compile_model(model, "AUTO", properties);
+ }
+
+ ///
+ /// Creates and loads a compiled model from a source model to the default OpenVINO device selected by the AUTO
+ ///
+ /// Model object acquired from Core::read_model.
+ /// Name of a device to load a model to.
+ /// Optional map of pairs: (property name, property value) relevant only for this load operation.
+ /// A compiled model.
+ ///
+ /// Users can create as many compiled models as they need and use
+ /// them simultaneously (up to the limitation of the hardware resources).
+ ///
+ public CompiledModel compile_model(Model model, string device_name, Dictionary properties=null)
+ {
+ if (model == null)
+ {
+ throw new ArgumentNullException(nameof(model));
+ }
+ if (string.IsNullOrEmpty(device_name))
+ {
+ throw new ArgumentNullException(nameof(device_name));
+ }
+ IntPtr compiled_model_ptr = new IntPtr();
+ sbyte[] c_device = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(device_name));
+ if (properties == null)
+ {
+ HandleException.handler(
+ NativeMethods.ov_core_compile_model(m_ptr, model.m_ptr, ref c_device[0], 0, ref compiled_model_ptr));
+ }
+ else if (properties.Count==1)
+ {
+ List inputs = new List();
+ foreach (var item in properties)
+ {
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Key));
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Value));
+ }
+ HandleException.handler(
+ NativeMethods.ov_core_compile_model(m_ptr, model.m_ptr, ref c_device[0], 2, ref compiled_model_ptr,
+ inputs[0], inputs[1]));
+ }
+ else if (properties.Count == 2)
+ {
+ List inputs = new List();
+ foreach (var item in properties)
+ {
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Key));
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Value));
+ }
+ HandleException.handler(
+ NativeMethods.ov_core_compile_model(m_ptr, model.m_ptr, ref c_device[0], 4, ref compiled_model_ptr,
+ inputs[0], inputs[1], inputs[2], inputs[3]));
+ }
+ else if (properties.Count == 3)
+ {
+ List inputs = new List();
+ foreach (var item in properties)
+ {
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Key));
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Value));
+ }
+ HandleException.handler(
+ NativeMethods.ov_core_compile_model(m_ptr, model.m_ptr, ref c_device[0], 6, ref compiled_model_ptr,
+ inputs[0], inputs[1], inputs[2], inputs[3], inputs[4], inputs[5]));
+ }
+ else
+ {
+ throw new Exception("Only supports parameter quantities of 0, 1, 2, and 3.");
+ }
+ return new CompiledModel(compiled_model_ptr);
+ }
+
+ ///
+ /// Reads and loads a compiled model from the IR/ONNX/PDPD file to the default OpenVINO device selected by the AUTO plugin.
+ ///
+ /// Path to a model.
+ /// Optional map of pairs: (property name, property value) relevant only for this load operation.
+ ///
+ /// This can be more efficient than using the Core::read_model + Core::compile_model(model_in_memory_object) flow,
+ /// especially for cases when caching is enabled and a cached model is availab
+ ///
+ /// A compiled model.
+ public CompiledModel compile_model(string model_path, Dictionary properties = null)
+ {
+ return compile_model(model_path, "AUTO", properties);
+ }
+ ///
+ /// Reads a model and creates a compiled model from the IR/ONNX/PDPD file.
+ ///
+ /// Path to a model.
+ /// Name of a device to load a model to.
+ /// Optional map of pairs: (property name, property value) relevant only for this load operation.
+ ///
+ /// This can be more efficient than using the Core::read_model + Core::compile_model(model_in_memory_object) flow,
+ /// especially for cases when caching is enabled and a cached model is availab
+ ///
+ /// A compiled model.
+ public CompiledModel compile_model(string model_path, string device_name, Dictionary properties = null)
+ {
+ if (string.IsNullOrEmpty(model_path))
+ {
+ throw new ArgumentNullException(nameof(model_path));
+ }
+ if (string.IsNullOrEmpty(device_name))
+ {
+ throw new ArgumentNullException(nameof(device_name));
+ }
+ IntPtr compiled_model_ptr = new IntPtr();
+ sbyte[] c_model = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(model_path));
+ sbyte[] c_device = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(device_name));
+ if (properties == null)
+ {
+ HandleException.handler(
+ NativeMethods.ov_core_compile_model_from_file(m_ptr, ref c_model[0], ref c_device[0], 0, ref compiled_model_ptr));
+ }
+ else if (properties.Count == 1)
+ {
+ List inputs = new List();
+ foreach (var item in properties)
+ {
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Key));
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Value));
+ }
+ HandleException.handler(
+ NativeMethods.ov_core_compile_model_from_file(m_ptr, ref c_model[0], ref c_device[0], 2, ref compiled_model_ptr,
+ inputs[0], inputs[1]));
+ }
+ else if (properties.Count == 2)
+ {
+ List inputs = new List();
+ foreach (var item in properties)
+ {
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Key));
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Value));
+ }
+ HandleException.handler(
+ NativeMethods.ov_core_compile_model_from_file(m_ptr, ref c_model[0], ref c_device[0], 4, ref compiled_model_ptr,
+ inputs[0], inputs[1], inputs[2], inputs[3]));
+ }
+ else if (properties.Count == 3)
+ {
+ List inputs = new List();
+ foreach (var item in properties)
+ {
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Key));
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Value));
+ }
+ HandleException.handler(
+ NativeMethods.ov_core_compile_model_from_file(m_ptr, ref c_model[0], ref c_device[0], 6, ref compiled_model_ptr,
+ inputs[0], inputs[1], inputs[2], inputs[3], inputs[4], inputs[5]));
+ }
+ else
+ {
+ throw new Exception("Only supports parameter quantities of 0, 1, 2, and 3.");
+ }
+ return new CompiledModel(compiled_model_ptr);
+ }
+ ///
+ /// Sets properties for a device, acceptable keys can be found in PropertyKey.
+ ///
+ /// Name of a device to load a model to.
+ ///
+ /// The read-write property(string) to set/get the directory which will be used to store any data cached by plugins.
+ ///
+ public void set_property(string device_name, KeyValuePair properties)
+ {
+ sbyte[] c_device = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(device_name));
+ IntPtr key = Marshal.StringToHGlobalAnsi(properties.Key);
+ IntPtr value = Marshal.StringToHGlobalAnsi(properties.Value);
+ HandleException.handler(
+ NativeMethods.ov_core_set_property(m_ptr, ref c_device[0], key, value));
+ }
+
+ public void set_property(string device_name, Dictionary properties)
+ {
+ sbyte[] c_device = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(device_name));
+
+ if (properties.Count == 1)
+ {
+ List inputs = new List();
+ foreach (var item in properties)
+ {
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Key));
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Value));
+ }
+ HandleException.handler(
+ NativeMethods.ov_core_set_property(m_ptr, ref c_device[0], inputs[0], inputs[1]));
+ }
+ else if (properties.Count == 2)
+ {
+ List inputs = new List();
+ foreach (var item in properties)
+ {
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Key));
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Value));
+ }
+ HandleException.handler(
+ NativeMethods.ov_core_set_property(m_ptr, ref c_device[0], inputs[0], inputs[1], inputs[2], inputs[3]));
+ }
+ else if (properties.Count == 3)
+ {
+ List inputs = new List();
+ foreach (var item in properties)
+ {
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Key));
+ inputs.Add(Marshal.StringToHGlobalAnsi(item.Value));
+ }
+ HandleException.handler(
+ NativeMethods.ov_core_set_property(m_ptr, ref c_device[0], inputs[0], inputs[1],
+ inputs[2], inputs[3], inputs[4], inputs[5]));
+ }
+ else
+ {
+ throw new Exception("Only supports parameter quantities of 1, 2, and 3.");
+ }
+
+ }
+
+ ///
+ /// Gets properties related to device behaviour.
+ /// The method extracts information that can be set via the set_property method.
+ ///
+ /// Name of a device to load a model to.
+ /// A header for advanced hardware specific properties for OpenVINO runtime devices.
+ /// Properties related to device behaviour.
+ public string get_property(string device_name, PropertyKey key)
+ {
+ IntPtr value = IntPtr.Zero;
+ sbyte[] c_device = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(device_name));
+ sbyte[] c_key = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(key.ToString()));
+ HandleException.handler(
+ NativeMethods.ov_core_get_property(m_ptr, ref c_device[0], ref c_key[0], ref value));
+ return Marshal.PtrToStringAnsi(value);
+ }
+ ///
+ /// Returns devices available for inference.
+ /// Core objects go over all registered plugins and ask about available devices.
+ ///
+ /// A vector of devices. The devices are returned as { CPU, GPU.0, GPU.1, GNA }.
+ ///
+ /// If there is more than one device of a specific type, they are enumerated with the .# suffix.
+ /// Such enumerated device can later be used as a device name in all Core methods like Core::compile_model,
+ /// Core::query_model, Core::set_property and so on.
+ ///
+ public List get_available_devices()
+ {
+ int l = Marshal.SizeOf(typeof(ov_available_devices_t));
+ IntPtr devices_ptr = Marshal.AllocHGlobal(l);
+ HandleException.handler(
+ NativeMethods.ov_core_get_available_devices(m_ptr, devices_ptr));
+
+ var temp1 = Marshal.PtrToStructure(devices_ptr, typeof(ov_available_devices_t));
+
+ ov_available_devices_t devices_s = (ov_available_devices_t)temp1;
+ IntPtr[] devices_ptrs = new IntPtr[devices_s.size];
+ Marshal.Copy(devices_s.devices, devices_ptrs, 0, (int)devices_s.size);
+ List devices = new List();
+ for (int i = 0; i < (int)devices_s.size; ++i)
+ {
+ devices.Add(Marshal.PtrToStringAnsi(devices_ptrs[i]));
+ }
+ NativeMethods.ov_available_devices_free(devices_ptr);
+ return devices;
+ }
+
+ public CompiledModel import_model(string model_path, string device_name = "AUTO")
+ {
+ IntPtr value = IntPtr.Zero;
+ byte[] data = Ov.content_from_file(model_path);
+ sbyte[] c_device = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(device_name));
+ HandleException.handler(
+ NativeMethods.ov_core_import_model(m_ptr, ref data[0], (ulong)data.Length, ref c_device[0], ref value));
+ return new CompiledModel(value);
+ }
+ }
+}
+
diff --git a/modules/csharp_api/csharp/core/dimension.cs b/modules/csharp_api/csharp/core/dimension.cs
new file mode 100644
index 000000000..7d7f4e56f
--- /dev/null
+++ b/modules/csharp_api/csharp/core/dimension.cs
@@ -0,0 +1,84 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+using static OpenVinoSharp.Ov;
+using ov_dimension = OpenVinoSharp.Ov.ov_dimension;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// Class representing a dimension, which may be dynamic (undetermined until runtime),
+ /// in a shape or shape-like object.
+ ///
+ /// Static dimensions may be implicitly converted from value_type.
+ /// A dynamic dimension is constructed with Dimension() or Dimension::dynamic().
+ public class Dimension
+ {
+ ///
+ /// The ov_dimension struct.
+ ///
+ ov_dimension m_dimension;
+ ///
+ /// Construct a static dimension.
+ ///
+ /// Value of the dimension.
+ public Dimension(long dimension)
+ {
+ m_dimension.min = dimension;
+ m_dimension.max = dimension;
+ }
+ ///
+ /// Construct a dynamic dimension with ov_dimension struct.
+ ///
+ /// The ov_dimension struct.
+ public Dimension(ov_dimension dimension)
+ {
+ m_dimension = dimension;
+ }
+ ///
+ /// Construct a dynamic dimension with bounded range
+ ///
+ /// The lower inclusive limit for the dimension
+ /// The upper inclusive limit for the dimension
+ public Dimension(long min_dimension, long max_dimension)
+ {
+ m_dimension.min = min_dimension;
+ m_dimension.max = max_dimension;
+ }
+ ///
+ /// Get ov_dimension struct.
+ ///
+ /// Return ov_dimension struct.
+ public ov_dimension get_dimension()
+ {
+ return m_dimension;
+ }
+ ///
+ /// Get max.
+ ///
+ /// Dimension max.
+ public long get_max()
+ {
+ return m_dimension.max;
+ }
+
+ ///
+ /// Get min.
+ ///
+ /// Dimension min.
+ public long get_min()
+ {
+ return m_dimension.min;
+ }
+ ///
+ /// Check this dimension whether is dynamic
+ ///
+ /// Boolean, true is dynamic and false is static.
+ public bool is_dynamic()
+ {
+ return (m_dimension.min == 0 && m_dimension.max == -1) ? true : false;
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/core/infer_request.cs b/modules/csharp_api/csharp/core/infer_request.cs
new file mode 100644
index 000000000..66058a3d4
--- /dev/null
+++ b/modules/csharp_api/csharp/core/infer_request.cs
@@ -0,0 +1,455 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Numerics;
+using System.Reflection;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+using static OpenVinoSharp.Ov;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// This is a class of infer request that can be run in asynchronous or synchronous manners.
+ ///
+ /// ov_runtime_c#_api
+ public class InferRequest : IDisposable
+ {
+ ///
+ /// [private]InferRequest class pointer.
+ ///
+ public IntPtr m_ptr = IntPtr.Zero;
+
+ ///
+ /// [public]InferRequest class pointer.
+ ///
+ public IntPtr Ptr { get { return m_ptr; } set { m_ptr = value; } }
+
+ ///
+ /// Constructs InferRequest from the initialized IntPtr.
+ ///
+ ///
+ public InferRequest(IntPtr ptr)
+ {
+ this.m_ptr = ptr;
+ }
+ ///
+ /// InferRequest's destructor
+ ///
+ ~InferRequest()
+ {
+ Dispose();
+ }
+ ///
+ /// Release unmanaged resources
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_infer_request_free(m_ptr);
+ m_ptr = IntPtr.Zero;
+ }
+ ///
+ /// Sets an input/output tensor to infer on.
+ ///
+ /// Name of the input or output tensor.
+ /// Reference to the tensor. The element_type and shape of the tensor must match
+ /// the model's input/output element_type and size.
+ public void set_tensor(string tensor_name,Tensor tensor)
+ {
+ sbyte[] c_tensor_name = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(tensor_name));
+ HandleException.handler(
+ NativeMethods.ov_infer_request_set_tensor(
+ m_ptr, ref c_tensor_name[0], tensor.Ptr));
+ }
+ ///
+ /// Sets an input/output tensor to infer.
+ ///
+ /// Node of the input or output tensor.
+ /// Reference to a tensor. The element_type and shape of a tensor must match
+ /// the model's input/output element_type and size.
+ public void set_tensor(Node node, Tensor tensor)
+ {
+ if (node.node_type == Node.NodeType.e_const)
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_set_tensor_by_const_port(
+ m_ptr, node.Ptr, tensor.Ptr));
+ }
+ else {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_set_tensor_by_port(
+ m_ptr, node.Ptr, tensor.Ptr));
+ }
+ }
+ ///
+ /// Sets an input/output tensor to infer.
+ ///
+ ///
+ /// Port of the input or output tensor. Use the following methods to get the ports:
+ /// - Model.input()
+ /// - Model.const_input()
+ /// - Model.inputs()
+ /// - Model.const_inputs()
+ /// - Model.output()
+ /// - Model.const_output()
+ /// - Model.outputs()
+ /// - Model.const_outputs()
+ /// - CompiledModel.input()
+ /// - CompiledModel.const_input()
+ /// - CompiledModel.inputs()
+ /// - CompiledModel.const_inputs()
+ /// - CompiledModel.output()
+ /// - CompiledModel.const_output()
+ /// - CompiledModel.outputs()
+ /// - CompiledModel.const_outputs()
+ ///
+ /// Reference to a tensor. The element_type and shape of a tensor must match
+ /// the model's input/output element_type and size.
+ public void set_tensor(Input port, Tensor tensor)
+ {
+ if (port.get_node().node_type == Node.NodeType.e_const)
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_set_tensor_by_const_port(
+ m_ptr, port.get_node().Ptr, tensor.Ptr));
+ }
+ else
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_set_tensor_by_port(
+ m_ptr, port.get_node().Ptr, tensor.Ptr));
+ }
+ }
+ ///
+ /// Sets an input/output tensor to infer.
+ ///
+ ///
+ /// Port of the input or output tensor. Use the following methods to get the ports:
+ /// - Model.input()
+ /// - Model.const_input()
+ /// - Model.inputs()
+ /// - Model.const_inputs()
+ /// - Model.output()
+ /// - Model.const_output()
+ /// - Model.outputs()
+ /// - Model.const_outputs()
+ /// - CompiledModel.input()
+ /// - CompiledModel.const_input()
+ /// - CompiledModel.inputs()
+ /// - CompiledModel.const_inputs()
+ /// - CompiledModel.output()
+ /// - CompiledModel.const_output()
+ /// - CompiledModel.outputs()
+ /// - CompiledModel.const_outputs()
+ ///
+ /// Reference to a tensor. The element_type and shape of a tensor must match
+ /// the model's input/output element_type and size.
+ public void set_tensor(Output port, Tensor tensor)
+ {
+ if (port.get_node().node_type == Node.NodeType.e_const)
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_set_tensor_by_const_port(
+ m_ptr, port.get_node().Ptr, tensor.Ptr));
+ }
+ else
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_set_tensor_by_port(
+ m_ptr, port.get_node().Ptr, tensor.Ptr));
+ }
+ }
+
+ ///
+ /// Sets an input tensor to infer.
+ ///
+ /// Index of the input tensor. If @p idx is greater than the number of model inputs,
+ /// an exception is thrown.
+ /// Reference to the tensor. The element_type and shape of the tensor must match
+ /// the model's input/output element_type and size.
+ public void set_input_tensor(ulong index, Tensor tensor)
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_set_input_tensor_by_index(
+ m_ptr, index, tensor.Ptr));
+ }
+
+ ///
+ /// Sets an input tensor to infer models with single input.
+ ///
+ /// If model has several inputs, an exception is thrown.
+ /// Reference to the input tensor.
+ public void set_input_tensor(Tensor tensor)
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_set_input_tensor(
+ m_ptr, tensor.Ptr));
+ }
+ ///
+ /// Sets an output tensor to infer.
+ /// Index of the input preserved accross Model, CompiledModel, and InferRequest.
+ ///
+ /// Index of the output tensor.
+ /// Reference to the output tensor. The type of the tensor must match the model
+ /// output element type and shape.
+ public void set_output_tensor(ulong index, Tensor tensor)
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_set_output_tensor_by_index(
+ m_ptr, index, tensor.Ptr));
+ }
+ ///
+ /// Sets an output tensor to infer models with single output.
+ ///
+ /// If model has several outputs, an exception is thrown.
+ /// Reference to the output tensor.
+ public void set_output_tensor(Tensor tensor)
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_set_output_tensor(
+ m_ptr, tensor.Ptr));
+ }
+
+
+
+ ///
+ /// Gets an input/output tensor for inference by tensor name.
+ ///
+ /// Name of a tensor to get.
+ /// The tensor with name @p tensor_name. If the tensor is not found, an exception is thrown.
+ public Tensor get_tensor(string tensor_name)
+ {
+ IntPtr tensor_ptr = IntPtr.Zero;
+ sbyte[] c_tensor_name = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(tensor_name));
+ HandleException.handler(
+ NativeMethods.ov_infer_request_get_tensor(m_ptr, ref c_tensor_name[0], ref tensor_ptr));
+ return new Tensor(tensor_ptr);
+ }
+
+ ///
+ /// Gets an input/output tensor for inference by node.
+ ///
+ /// If the tensor with the specified @n node is not found, an exception is thrown.
+ /// Node of the tensor to get.
+ /// Tensor for the node @n node.
+ public Tensor get_tensor(Node node)
+ {
+ IntPtr tensor_ptr = IntPtr.Zero;
+ ExceptionStatus status;
+ if (node.node_type == Node.NodeType.e_const)
+ {
+ status = NativeMethods.ov_infer_request_get_tensor_by_const_port(
+ m_ptr, node.Ptr, ref tensor_ptr);
+ }
+ else
+ {
+ status = NativeMethods.ov_infer_request_get_tensor_by_port(
+ m_ptr, node.Ptr, ref tensor_ptr);
+ }
+
+ if (status != 0)
+ {
+ System.Diagnostics.Debug.WriteLine("set_tensor get_tensor error : " + status.ToString());
+ }
+ return new Tensor(tensor_ptr);
+ }
+
+ ///
+ /// Gets an input/output tensor for inference.
+ ///
+ /// If the tensor with the specified @p port is not found, an exception is thrown.
+ /// Port of the tensor to get.
+ /// Tensor for the port @p port.
+ public Tensor get_tensor(Output port)
+ {
+ IntPtr tensor_ptr = IntPtr.Zero;
+ if (port.get_node().node_type == Node.NodeType.e_const)
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_get_tensor_by_const_port(
+ m_ptr, port.get_node().Ptr, ref tensor_ptr));
+ }
+ else
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_get_tensor_by_port(
+ m_ptr, port.get_node().Ptr, ref tensor_ptr));
+ }
+ return new Tensor(tensor_ptr);
+ }
+ ///
+ /// Gets an input/output tensor for inference.
+ ///
+ /// If the tensor with the specified @p port is not found, an exception is thrown.
+ /// Port of the tensor to get.
+ /// Tensor for the port @p port.
+ public Tensor get_tensor(Input port)
+ {
+ IntPtr tensor_ptr = IntPtr.Zero;
+ if (port.get_node().node_type == Node.NodeType.e_const)
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_get_tensor_by_const_port(
+ m_ptr, port.get_node().Ptr, ref tensor_ptr));
+ }
+ else
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_get_tensor_by_port(
+ m_ptr, port.get_node().Ptr, ref tensor_ptr));
+ }
+ return new Tensor(tensor_ptr);
+ }
+
+ ///
+ /// Gets an input tensor for inference.
+ ///
+ /// Index of the tensor to get.
+ /// Tensor with the input index @p idx. If the tensor with the specified @p idx is not found,
+ /// an exception is thrown.
+ public Tensor get_input_tensor(ulong index)
+ {
+ IntPtr tensor_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_infer_request_get_input_tensor_by_index(
+ m_ptr, index, ref tensor_ptr));
+ return new Tensor(tensor_ptr);
+ }
+
+ ///
+ /// Gets an input tensor for inference.
+ ///
+ /// The input tensor for the model. If model has several inputs, an exception is thrown.
+ public Tensor get_input_tensor()
+ {
+ IntPtr tensor_ptr = IntPtr.Zero;
+
+ HandleException.handler(
+ NativeMethods.ov_infer_request_get_input_tensor(
+ m_ptr, ref tensor_ptr));
+ return new Tensor(tensor_ptr);
+ }
+
+ ///
+ /// Gets an output tensor for inference.
+ ///
+ /// Index of the tensor to get.
+ /// Tensor with the output index @p idx. If the tensor with the specified @p idx is not found,
+ /// an exception is thrown.
+ public Tensor get_output_tensor(ulong index)
+ {
+ IntPtr tensor_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_infer_request_get_output_tensor_by_index(
+ m_ptr, index, ref tensor_ptr));
+ return new Tensor(tensor_ptr);
+ }
+
+ ///
+ /// Gets an output tensor for inference.
+ ///
+ /// Output tensor for the model. If model has several outputs, an exception is thrown.
+ public Tensor get_output_tensor()
+ {
+ IntPtr tensor_ptr = IntPtr.Zero;
+
+ HandleException.handler(
+ NativeMethods.ov_infer_request_get_output_tensor(m_ptr, ref tensor_ptr));
+ return new Tensor(tensor_ptr);
+ }
+ ///
+ /// Infers specified input(s) in synchronous mode.
+ ///
+ ///
+ /// It blocks all methods of InferRequest while request is ongoing (running or waiting in a queue).
+ /// Calling any method leads to throwning the ov::Busy exception.
+ ///
+ public void infer()
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_infer(m_ptr));
+ }
+
+ ///
+ /// Cancels inference request.
+ ///
+ public void cancel()
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_cancel(m_ptr));
+ }
+ ///
+ /// Starts inference of specified input(s) in asynchronous mode.
+ ///
+ ///
+ /// It returns immediately. Inference starts also immediately.
+ /// Calling any method while the request in a running state leads to throwning the ov::Busy exception.
+ ///
+ public void start_async()
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_start_async(m_ptr));
+ }
+ ///
+ /// Waits for the result to become available. Blocks until the result becomes available.
+ ///
+ public void wait()
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_wait(m_ptr));
+ }
+
+ ///
+ /// Waits for the result to become available. Blocks until the specified timeout has elapsed or the result
+ /// becomes available, whichever comes first.
+ ///
+ /// Maximum duration, in milliseconds, to block for.
+ /// True if inference request is ready and false, otherwise.
+ public bool wait_for(long timeout)
+ {
+ HandleException.handler(
+ NativeMethods.ov_infer_request_wait_for(m_ptr, timeout));
+ return true;
+ }
+
+ ///
+ /// Queries performance measures per layer to identify the most time consuming operation.
+ ///
+ /// Not all plugins provide meaningful data.
+ /// List of profiling information for operations in a model.
+ public List get_profiling_info()
+ {
+ int l = Marshal.SizeOf(typeof(ov_profiling_info_list));
+ IntPtr ptr = Marshal.AllocHGlobal(l);
+ ov_profiling_info_list profiling_info_list = new ov_profiling_info_list();
+
+ profiling_info_list.size = 0;
+ profiling_info_list.profiling_infos = IntPtr.Zero;
+
+ Marshal.StructureToPtr(profiling_info_list, ptr, false);
+
+ HandleException.handler(
+ NativeMethods.ov_infer_request_get_profiling_info(m_ptr, ptr));
+
+ var tempp = Marshal.PtrToStructure(ptr, typeof(ov_profiling_info_list));
+ profiling_info_list = (ov_profiling_info_list)tempp;
+ l = Marshal.SizeOf(typeof(Ov.ProfilingInfo));
+
+ List profiling_infos = new List();
+ for (int i = 0; i < (int)profiling_info_list.size; ++i)
+ {
+ var tempt = Marshal.PtrToStructure(profiling_info_list.profiling_infos, typeof(Ov.ProfilingInfo));
+ Ov.ProfilingInfo profiling_info = (Ov.ProfilingInfo)tempt;
+ profiling_infos.Add(profiling_info);
+ }
+ HandleException.handler(
+ NativeMethods.ov_profiling_info_list_free(ptr));
+ return profiling_infos;
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/core/layout.cs b/modules/csharp_api/csharp/core/layout.cs
new file mode 100644
index 000000000..78166b66f
--- /dev/null
+++ b/modules/csharp_api/csharp/core/layout.cs
@@ -0,0 +1,96 @@
+using System;
+using System.Collections.Generic;
+using System.Drawing.Imaging;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// ov::Layout represents the text information of tensor's dimensions/axes. E.g. layout `NCHW` means that 4D
+ /// tensor `{-1, 3, 480, 640}` will have:
+ /// - 0: `N = -1`: batch dimension is dynamic
+ /// - 1: `C = 3`: number of channels is '3'
+ /// - 2: `H = 480`: image height is 480
+ /// - 3: `W = 640`: image width is 640
+ ///
+ ///
+ /// `ov::Layout` can be specified for:
+ /// - Preprocessing purposes. E.g.
+ /// - To apply normalization (means/scales) it is usually required to set 'C' dimension in a layout.
+ /// - To resize the image to specified width/height it is needed to set 'H' and 'W' dimensions in a layout
+ /// - To transpose image - source and target layout can be set (see
+ /// `ov::preprocess::PreProcessSteps::convert_layout`)
+ /// - To set/get model's batch (see `ov::get_batch`/`ov::set_batch') it is required in general to specify 'N' dimension
+ /// in layout for appropriate inputs
+ ///
+ public class Layout : IDisposable
+ {
+ ///
+ /// [private]Layout class pointer.
+ ///
+ private IntPtr m_ptr = IntPtr.Zero;
+ ///
+ /// [public]Layout class pointer.
+ ///
+ public IntPtr Ptr
+ {
+ get { return m_ptr; }
+ set { m_ptr = value; }
+ }
+
+ ///
+ /// Constructs a Layout with static or dynamic layout information based on string representation.
+ ///
+ ///
+ /// The string used to construct Layout from.
+ /// The string representation can be in the following form:
+ /// - can define order and meaning for dimensions "NCHW"
+ /// - partial layout specialization:
+ /// - "NC?" defines 3 dimensional layout, first two NC, 3rd one is not defined
+ /// - "N...C" defines layout with dynamic rank where 1st dimension is N, last one is C
+ /// - "NC..." defines layout with dynamic rank where first two are NC, others are not
+ /// defined
+ /// - only order of dimensions "adbc" (0312)
+ /// - Advanced syntax can be used for multi-character names like "[N,C,H,W,...,CustomName]"
+ ///
+ public Layout(string layout_desc)
+ {
+ sbyte[] c_layout_desc = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(layout_desc));
+ HandleException.handler(
+ NativeMethods.ov_layout_create(ref c_layout_desc[0], ref m_ptr));
+
+ }
+
+ ///
+ /// Default deconstruction
+ ///
+ ~Layout()
+ {
+ Dispose();
+ }
+
+ ///
+ /// Release unmanaged resources.
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_layout_free(m_ptr);
+ m_ptr = IntPtr.Zero;
+ }
+
+ ///
+ /// String representation of Layout.
+ ///
+ /// String representation of Layout.
+ public string to_string()
+ {
+ return NativeMethods.ov_layout_to_string(m_ptr);
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/core/model.cs b/modules/csharp_api/csharp/core/model.cs
new file mode 100644
index 000000000..cd540bf74
--- /dev/null
+++ b/modules/csharp_api/csharp/core/model.cs
@@ -0,0 +1,502 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Reflection;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+using System.Xml.Linq;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// A user-defined model
+ ///
+ public class Model : IDisposable
+ {
+ ///
+ /// [private]Model class pointer.
+ ///
+ public IntPtr m_ptr = IntPtr.Zero;
+ ///
+ /// [public]Model class pointer.
+ ///
+ public IntPtr Ptr
+ {
+ get { return m_ptr; }
+ set { m_ptr = value; }
+ }
+ ///
+ /// Default Constructor
+ ///
+ public Model()
+ {
+ }
+ ///
+ /// Constructs Model from the initialized ptr.
+ ///
+ /// Model pointer.
+ public Model(IntPtr ptr)
+ {
+ if (ptr == IntPtr.Zero)
+ {
+ throw new OVException(ExceptionStatus.GENERAL_ERROR, "The ptr is null!");
+ }
+ Ptr = ptr;
+ }
+ ///
+ /// Model's destructor
+ ///
+ ~Model()
+ {
+ Dispose();
+ }
+ ///
+ /// Release unmanaged resources
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_core_free(m_ptr);
+ m_ptr = IntPtr.Zero;
+ }
+
+ ///
+ /// Gets the friendly name for a model.
+ ///
+ /// The friendly name for a model.
+ public string get_friendly_name()
+ {
+ IntPtr s_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_model_get_friendly_name(m_ptr, ref s_ptr));
+ string ss = Marshal.PtrToStringAnsi(s_ptr);
+ return ss;
+ }
+ ///
+ /// Get single input port of model, which only support single input model.
+ ///
+ /// The input port of model.
+ public Node get_input()
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_model_input(m_ptr, ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_nomal);
+ }
+
+ ///
+ /// Get an input port of model by name.
+ ///
+ /// input tensor name (string).
+ /// The input port of model.
+ public Node get_input(string tensor_name)
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ sbyte[] c_tensor_name = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(tensor_name));
+ HandleException.handler(
+ NativeMethods.ov_model_input_by_name(m_ptr, ref c_tensor_name[0], ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_nomal);
+ }
+
+ ///
+ /// Get an input port of model by port index.
+ ///
+ /// input tensor index.
+ /// The input port of model.
+ public Node get_input(ulong index)
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_model_input_by_index(m_ptr, index, ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_nomal);
+ }
+
+ ///
+ /// Get an single output port of model, which only support single output model.
+ ///
+ /// The output port of model.
+ public Node get_output()
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_model_output(m_ptr, ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_nomal);
+ }
+ ///
+ /// Get an output port of model by name.
+ ///
+ /// output tensor name (string).
+ /// The output port of model.
+ public Node get_output(string tensor_name)
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ sbyte[] c_tensor_name = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(tensor_name));
+ HandleException.handler(
+ NativeMethods.ov_model_output_by_name(m_ptr, ref c_tensor_name[0], ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_nomal);
+ }
+ ///
+ /// Get an output port of model by port index.
+ ///
+ /// input tensor index.
+ /// The output port of model.
+ public Node get_output(ulong index)
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_model_output_by_index(m_ptr, index, ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_nomal);
+ }
+ ///
+ /// Get a const single input port of model, which only support single input model.
+ ///
+ /// The const input port of model.
+ public Node get_const_input()
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_model_const_input(m_ptr, ref port_ptr));
+ return new Node(port_ptr,Node.NodeType.e_const);
+ }
+ ///
+ /// Get a const input port of model by name.
+ ///
+ /// input tensor name (string).
+ /// The const input port of model.
+ public Node get_const_input(string tensor_name)
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ sbyte[] c_tensor_name = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(tensor_name));
+ HandleException.handler(
+ NativeMethods.ov_model_const_input_by_name(m_ptr, ref c_tensor_name[0], ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_const);
+ }
+ ///
+ /// Get a const input port of model by port index.
+ ///
+ /// input tensor index.
+ /// The const input port of model.
+ public Node get_const_input(ulong index)
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_model_const_input_by_index(m_ptr, index, ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_const);
+ }
+ ///
+ /// Get a single const output port of model, which only support single output model..
+ ///
+ /// The const output port of model.
+ public Node get_const_output()
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_model_const_output(m_ptr, ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_const);
+ }
+ ///
+ /// Get a const output port of model by port index.
+ ///
+ /// output tensor name (string).
+ /// The const output port of model.
+ public Node get_const_output(string tensor_name)
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ sbyte[] c_tensor_name = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(tensor_name));
+ HandleException.handler(
+ NativeMethods.ov_model_const_output_by_name(m_ptr, ref c_tensor_name[0], ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_const);
+ }
+ ///
+ /// Get a const output port of model by name.
+ ///
+ /// output tensor index.
+ /// The const output port of model.
+ public Node get_const_output(ulong index)
+ {
+ IntPtr port_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_model_const_output_by_index(m_ptr, index, ref port_ptr));
+ return new Node(port_ptr, Node.NodeType.e_const);
+ }
+
+ ///
+ /// Get single input of model, which only support single input model.
+ ///
+ /// The input of model.
+ public Input input()
+ {
+ Node node = get_input();
+ return new Input(node, 0);
+ }
+ ///
+ /// Get an input of model by port index.
+ ///
+ /// input tensor index.
+ /// The input of model.
+ public Input input(ulong index)
+ {
+ Node node = get_input(index);
+ return new Input(node, index);
+ }
+ ///
+ /// Get an input of model by name.
+ ///
+ /// input tensor name (string).
+ /// The input of model.
+ public Input input(string tensor_name)
+ {
+ Node node = get_input(tensor_name);
+ return new Input(node, 0);
+ }
+
+ ///
+ /// Get single const input of model, which only support single input model.
+ ///
+ /// The const input of model.
+ public Input const_input()
+ {
+ Node node = get_const_input();
+ return new Input(node, 0);
+ }
+ ///
+ /// Get an const input of model by port index.
+ ///
+ /// input tensor index.
+ /// The const input of model.
+ public Input const_input(ulong index)
+ {
+ Node node = get_const_input(index);
+ return new Input(node, index);
+ }
+ ///
+ /// Get an const input of model by name.
+ ///
+ /// input tensor name (string).
+ /// The const input of model.
+ public Input const_input(string tensor_name)
+ {
+ Node node = get_const_input(tensor_name);
+ return new Input(node, 0);
+ }
+
+
+
+ ///
+ /// Get single input of model, which only support single input model.
+ ///
+ /// The input of model.
+ public Output output()
+ {
+ Node node = get_output();
+ return new Output(node, 0);
+ }
+ ///
+ /// Get an output of model by port index.
+ ///
+ /// output tensor index.
+ /// The output of model.
+ public Output output(ulong index)
+ {
+ Node node = get_output(index);
+ return new Output(node, index);
+ }
+ ///
+ /// Get an output of model by name.
+ ///
+ /// output tensor name (string).
+ /// The output of model.
+ public Output output(string tensor_name)
+ {
+ Node node = get_output(tensor_name);
+ return new Output(node, 0);
+ }
+
+ ///
+ /// Get single const output of model, which only support single output model.
+ ///
+ /// The const output of model.
+ public Output const_output()
+ {
+ Node node = get_const_output();
+ return new Output(node, 0);
+ }
+ ///
+ /// Get an const output of model by port index.
+ ///
+ /// output tensor index.
+ /// The const output of model.
+ public Output const_output(ulong index)
+ {
+ Node node = get_const_output(index);
+ return new Output(node, index);
+ }
+ ///
+ /// Get an const output of model by name.
+ ///
+ /// output tensor name (string).
+ /// The const output of model.
+ public Output const_output(string tensor_name)
+ {
+ Node node = get_const_output(tensor_name);
+ return new Output(node, 0);
+ }
+ ///
+ /// Get the input size of model.
+ ///
+ /// The input size.
+ public ulong get_inputs_size()
+ {
+ ulong input_size = 0;
+ HandleException.handler(
+ NativeMethods.ov_model_inputs_size(m_ptr, ref input_size));
+ return input_size;
+ }
+ ///
+ /// Get the output size of model.
+ ///
+ /// The output size.
+ public ulong get_outputs_size()
+ {
+ ulong output_size = 0;
+ HandleException.handler(
+ NativeMethods.ov_model_outputs_size(m_ptr, ref output_size));
+ return output_size;
+ }
+
+ ///
+ /// Get all input of model.
+ ///
+ /// All input of model.
+ public List inputs()
+ {
+ ulong input_size = get_inputs_size();
+ List inputs = new List ();
+ for (ulong index = 0; index < input_size; ++index)
+ {
+ inputs.Add(input(index));
+ }
+ return inputs;
+ }
+ ///
+ /// Get all output of model
+ ///
+ /// All output of model
+ public List outputs()
+ {
+ ulong output_size = get_outputs_size();
+ List outputs = new List();
+ for (ulong index = 0; index < output_size; ++index)
+ {
+ outputs.Add(output(index));
+ }
+ return outputs;
+ }
+
+ ///
+ /// Get all const input of model.
+ ///
+ /// All input of model.
+ public List const_inputs()
+ {
+ ulong input_size = get_inputs_size();
+ List inputs = new List ();
+ for (ulong index = 0; index < input_size; ++index)
+ {
+ inputs.Add(const_input(index));
+ }
+ return inputs;
+ }
+
+ ///
+ /// Get all const output of model
+ ///
+ /// All output of model
+ public List const_outputs()
+ {
+ ulong output_size = get_outputs_size();
+ List outputs = new List();
+ for (ulong index = 0; index < output_size; ++index)
+ {
+ outputs.Add(const_output(index));
+ }
+ return outputs;
+ }
+ ///
+ /// The ops defined in the model is dynamic shape.
+ ///
+ /// true if any of the ops defined in the model is dynamic shape..
+ public bool is_dynamic()
+ {
+ return NativeMethods.ov_model_is_dynamic(m_ptr);
+ }
+
+
+ ///
+ /// Do reshape in model with partial shape for a specified name.
+ ///
+ /// The list of input tensor names and PartialShape.
+ public void reshape(Dictionary partial_shapes)
+ {
+ foreach (var partial_shape in partial_shapes)
+ {
+ sbyte[] c_tensor_name = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(partial_shape.Key));
+ PartialShape shape = partial_shape.Value;
+ HandleException.handler(
+ NativeMethods.ov_model_reshape_input_by_name(m_ptr, ref c_tensor_name[0], shape.get_partial_shape()));
+ }
+ }
+ ///
+ /// Do reshape in model for one node(port 0).
+ ///
+ /// A PartialShape.
+ public void reshape(PartialShape partial_shape)
+ {
+ HandleException.handler(
+ NativeMethods.ov_model_reshape_single_input(m_ptr, partial_shape.get_partial_shape()));
+ }
+ ///
+ /// Do reshape in model with a list of (port id, partial shape).
+ ///
+ /// The list of input port id and PartialShape.
+ public void reshape(Dictionary partial_shapes)
+ {
+ ulong[] indexs = new ulong[partial_shapes.Count];
+ Ov.ov_partial_shape[] shapes = new Ov.ov_partial_shape[partial_shapes.Count];
+ int i = 0;
+ foreach (var partial_shape in partial_shapes)
+ {
+ indexs[i] = partial_shape.Key;
+ shapes[i] = partial_shape.Value.get_partial_shape();
+ }
+ HandleException.handler(NativeMethods.ov_model_reshape_by_port_indexes(m_ptr, ref indexs[0],
+ ref shapes[0], (ulong)partial_shapes.Count));
+ }
+ ///
+ /// Do reshape in model with a list of (ov_output_port_t, partial shape).
+ ///
+ /// The list of input node and PartialShape.
+ public void reshape(Dictionary partial_shapes)
+ {
+ IntPtr[] nodes_ptr = new IntPtr[partial_shapes.Count];
+ Ov.ov_partial_shape[] shapes = new Ov.ov_partial_shape[partial_shapes.Count];
+ int i = 0;
+ foreach (var partial_shape in partial_shapes)
+ {
+ nodes_ptr[i] = partial_shape.Key.Ptr;
+ shapes[i] = partial_shape.Value.get_partial_shape();
+ }
+ HandleException.handler(NativeMethods.ov_model_reshape_by_ports(m_ptr, ref nodes_ptr[0],
+ ref shapes[0], (ulong)partial_shapes.Count));
+ }
+
+ }
+
+
+}
+
+
diff --git a/modules/csharp_api/csharp/core/node.cs b/modules/csharp_api/csharp/core/node.cs
new file mode 100644
index 000000000..25f7e1faf
--- /dev/null
+++ b/modules/csharp_api/csharp/core/node.cs
@@ -0,0 +1,147 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// Nodes are the backbone of the graph of Value dataflow. Every node has
+ /// zero or more nodes as arguments and one value, which is either a tensor
+ /// or a (possibly empty) tuple of values.
+ ///
+ public class Node : IDisposable
+ {
+ ///
+ /// The node type.
+ ///
+ public enum NodeType
+ {
+ ///
+ /// Const type.
+ ///
+ e_const = 0,
+ ///
+ /// Nomal type.
+ ///
+ e_nomal = 1
+ };
+ ///
+ /// [private]Node class pointer.
+ ///
+ public IntPtr m_ptr = IntPtr.Zero;
+
+ ///
+ /// [public]Node class pointer.
+ ///
+ public IntPtr Ptr { get { return m_ptr; } set { m_ptr = value; } }
+
+ ///
+ /// Specify the format type of the node.
+ ///
+ public NodeType node_type { get; set; }
+
+ ///
+ /// Default Constructor.
+ ///
+ /// The pointer of node.
+ /// The type of node.
+ public Node(IntPtr ptr, NodeType type)
+ {
+ Ptr = ptr;
+ this.node_type = type;
+ }
+ ///
+ /// Default deconstruction.
+ ///
+ ~Node()
+ {
+ Dispose();
+ }
+ ///
+ /// Release unmanaged resources.
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ if (node_type == NodeType.e_const)
+ {
+ NativeMethods.ov_output_const_port_free(m_ptr);
+ }
+ else
+ {
+ NativeMethods.ov_output_port_free(m_ptr);
+ }
+ m_ptr = IntPtr.Zero;
+ }
+
+ ///
+ /// Get the shape.
+ ///
+ /// Returns the shape.
+ public Shape get_shape()
+ {
+ int l = Marshal.SizeOf(typeof(Ov.ov_shape));
+ IntPtr shape_ptr = Marshal.AllocHGlobal(l);
+ if (node_type == NodeType.e_const)
+ {
+ HandleException.handler(
+ NativeMethods.ov_const_port_get_shape(m_ptr, shape_ptr));
+ }
+ else
+ {
+ HandleException.handler(
+ NativeMethods.ov_port_get_shape(m_ptr, shape_ptr));
+ }
+
+ return new Shape(shape_ptr);
+ }
+
+ ///
+ /// Get the partial shape.
+ ///
+ /// Returns the partial shape.
+ public PartialShape get_partial_shape()
+ {
+ Ov.ov_partial_shape shape = new Ov.ov_partial_shape();
+ HandleException.handler(
+ NativeMethods.ov_port_get_partial_shape(m_ptr, ref shape));
+ return new PartialShape(shape);
+ }
+
+ ///
+ /// Get the unique name of the node.
+ ///
+ /// A const reference to the node's unique name.
+ public string get_name()
+ {
+ ExceptionStatus status;
+ IntPtr s_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_port_get_any_name(m_ptr, ref s_ptr));
+ string ss = Marshal.PtrToStringAnsi(s_ptr);
+ return ss;
+ }
+ ///
+ /// Checks that there is exactly one output and returns its element type.
+ ///
+ ///
+ /// TODO: deprecate in favor of node->get_output_element_type(0) with a suitable check in
+ /// the calling code, or updates to the calling code if it is making an invalid assumption
+ /// of only one output.
+ ///
+ /// Data type.
+ public OvType get_element_type()
+ {
+ uint data_type = 0;
+ HandleException.handler(
+ NativeMethods.ov_port_get_element_type(m_ptr, ref data_type));
+ return new OvType((ElementType)data_type);
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/core/node_input.cs b/modules/csharp_api/csharp/core/node_input.cs
new file mode 100644
index 000000000..1700ea470
--- /dev/null
+++ b/modules/csharp_api/csharp/core/node_input.cs
@@ -0,0 +1,76 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// A handle for one of a node's inputs.
+ ///
+ public class Input : IDisposable
+ {
+ ///
+ /// The input node.
+ ///
+ private Node m_node;
+ ///
+ /// The input node port index.
+ ///
+ private ulong m_index = 0;
+ ///
+ /// Constructs a Output.
+ ///
+ /// The node for the input handle.
+ /// The index of the input.
+ public Input(Node node, ulong index)
+ {
+ m_node = node;
+ m_index = index;
+ }
+ ///
+ /// Default deconstruction.
+ ///
+ ~Input() {
+ Dispose();
+ }
+ ///
+ /// Release unmanaged resources.
+ ///
+ public void Dispose()
+ {
+ m_node.Dispose();
+ }
+ ///
+ /// Get the node referred to by this input handle.
+ ///
+ /// The ouput node
+ public Node get_node() { return m_node; }
+ ///
+ /// The index of the input referred to by this input handle.
+ ///
+ /// The index of the input.
+ public ulong get_index() { return m_index; }
+ ///
+ /// The element type of the input referred to by this input handle.
+ ///
+ /// The element type of the input.
+ public OvType get_element_type() { return m_node.get_element_type(); }
+ ///
+ /// The shape of the input referred to by this input handle.
+ ///
+ /// The shape of the input .
+ public Shape get_shape() { return m_node.get_shape(); }
+ ///
+ /// Any tensor names associated with this input
+ ///
+ /// tensor names
+ public string get_any_name() { return m_node.get_name(); }
+ ///
+ /// The partial shape of the input referred to by this input handle.
+ ///
+ /// The partial shape of the input
+ public PartialShape get_partial_shape() { return m_node.get_partial_shape(); }
+ }
+}
diff --git a/modules/csharp_api/csharp/core/node_output.cs b/modules/csharp_api/csharp/core/node_output.cs
new file mode 100644
index 000000000..2db547190
--- /dev/null
+++ b/modules/csharp_api/csharp/core/node_output.cs
@@ -0,0 +1,77 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// A handle for one of a node's outputs.
+ ///
+ public class Output : IDisposable
+ {
+ ///
+ /// The output node.
+ ///
+ private Node m_node;
+ ///
+ /// The output node port index.
+ ///
+ private ulong m_index = 0;
+ ///
+ /// Constructs a Output.
+ ///
+ /// The node for the output handle.
+ /// The index of the output.
+ public Output(Node node, ulong index=0)
+ {
+ m_node = node;
+ m_index = index;
+ }
+ ///
+ /// Default deconstruction.
+ ///
+ ~Output()
+ {
+ Dispose();
+ }
+ ///
+ /// Release unmanaged resources.
+ ///
+ public void Dispose()
+ {
+ m_node.Dispose();
+ }
+ ///
+ /// Get the node referred to by this output handle.
+ ///
+ /// The ouput node
+ public Node get_node() { return m_node; }
+ ///
+ /// The index of the output referred to by this output handle.
+ ///
+ /// The index of the output.
+ public ulong get_index() { return m_index; }
+ ///
+ /// The element type of the output referred to by this output handle.
+ ///
+ /// The element type of the output.
+ public OvType get_element_type() { return m_node.get_element_type(); }
+ ///
+ /// The shape of the output referred to by this output handle.
+ ///
+ /// The shape of the output .
+ public Shape get_shape(){ return m_node.get_shape(); }
+ ///
+ /// Any tensor names associated with this output
+ ///
+ /// tensor names
+ public string get_any_name() { return m_node.get_name(); }
+ ///
+ /// The partial shape of the output referred to by this output handle.
+ ///
+ /// The partial shape of the output
+ public PartialShape get_partial_shape() { return m_node.get_partial_shape(); }
+ }
+}
diff --git a/modules/csharp_api/csharp/core/partial_shape.cs b/modules/csharp_api/csharp/core/partial_shape.cs
new file mode 100644
index 000000000..1fe0accd4
--- /dev/null
+++ b/modules/csharp_api/csharp/core/partial_shape.cs
@@ -0,0 +1,230 @@
+using System;
+using System.Collections.Generic;
+using System.Drawing.Drawing2D;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+using System.Xml.Linq;
+using ov_partial_shape = OpenVinoSharp.Ov.ov_partial_shape;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// Class representing a shape that may be partially or totally dynamic.
+ ///
+ ///
+ /// Dynamic rank. (Informal notation: `?`)
+ /// Static rank, but dynamic dimensions on some or all axes.
+ /// (Informal notation examples: `{1,2,?,4}`, `{?,?,?}`)
+ /// Static rank, and static dimensions on all axes.
+ /// (Informal notation examples: `{1,2,3,4}`, `{6}`, `{}`)
+ ///
+ public class PartialShape
+ {
+ ///
+ /// PartialShape rank.
+ ///
+ private Dimension rank;
+
+ ///
+ /// PartialShape dimensions.
+ ///
+ private Dimension[] dimensions;
+ ///
+ /// Constructing partial shape by ov_partial_shape.
+ ///
+ /// ov_partial_shape struct.
+ public PartialShape(Ov.ov_partial_shape shape) {
+ partial_shape_convert(shape);
+ }
+ ///
+ /// Constructing partial shape by dimensions.
+ ///
+ /// The partial shape dimensions array.
+ public PartialShape(Dimension[] dimensions)
+ {
+ this.dimensions = dimensions;
+ rank = new Dimension(dimensions.Length, dimensions.Length);
+ }
+ ///
+ /// Constructing partial shape by dimensions.
+ ///
+ /// The partial shape dimensions list.
+ public PartialShape(List dimensions) : this(dimensions.ToArray())
+ {
+ }
+
+ ///
+ /// Constructing dynamic partial shape by dimensions.
+ ///
+ /// The partial shape rank.
+ /// The partial shape dimensions array.
+ public PartialShape(Dimension rank, Dimension[] dimensions)
+ {
+ this.dimensions = dimensions;
+ this.rank = rank;
+ }
+
+ ///
+ /// Constructing dynamic partial shape by dimensions.
+ ///
+ /// The partial shape rank.
+ /// The partial shape dimensions list.
+ public PartialShape(Dimension rank, List dimensions) : this(rank, dimensions.ToArray())
+ {
+ }
+ ///
+ /// Constructing static partial shape by dimensions.
+ ///
+ /// The partial shape rank.
+ /// The partial shape dimensions array.
+ public PartialShape(long rank, long[] dimensions)
+ {
+ this.rank = new Dimension(rank);
+ this.dimensions = new Dimension[dimensions.Length];
+ for (int i = 0; i < dimensions.Length; ++i)
+ {
+ this.dimensions[i] = new Dimension(dimensions[i]);
+ }
+ }
+ ///
+ /// Constructing static partial shape by dimensions.
+ ///
+ /// The partial shape rank.
+ /// The partial shape dimensions list.
+ public PartialShape(long rank, List dimensions) : this(rank, dimensions.ToArray())
+ {
+ }
+
+ ///
+ /// Constructing static partial shape by shape.
+ ///
+ /// The shape
+ public PartialShape(Shape shape)
+ {
+ Ov.ov_partial_shape partial_shape = new ov_partial_shape();
+ HandleException.handler(
+ NativeMethods.ov_shape_to_partial_shape(shape.shape, ref partial_shape));
+ partial_shape_convert(partial_shape);
+ }
+
+ ///
+ /// Default deconstruction.
+ ///
+ ~PartialShape()
+ {
+ }
+
+ ///
+ /// Convert partial shape to PartialShape class.
+ ///
+ /// ov_partial_shape struct
+ private void partial_shape_convert(Ov.ov_partial_shape shape)
+ {
+ rank = new Dimension(shape.rank);
+ long[] data = new long[rank.get_max() * 2];
+ dimensions = new Dimension[rank.get_max()];
+ Marshal.Copy(shape.dims, data, 0, (int)rank.get_max() * 2);
+ for (int i = 0; i < rank.get_max(); ++i)
+ {
+ dimensions[i] = new Dimension(data[2 * i], data[2 * i + 1]);
+ }
+ }
+ ///
+ /// Get ov_partial_shape
+ ///
+ /// return ov_partial_shape.
+ public ov_partial_shape get_partial_shape()
+ {
+ Ov.ov_partial_shape shape_arr = new Ov.ov_partial_shape();
+ shape_arr.rank = rank.get_dimension();
+ List ov_dims = new List();
+ for (int i = 0; i < shape_arr.rank.max; ++i)
+ {
+ ov_dims.Add(dimensions[i].get_dimension());
+ }
+ Ov.ov_dimension[] ds = ov_dims.ToArray();
+ shape_arr.dims = Marshal.UnsafeAddrOfPinnedArrayElement(ds, 0);
+ return shape_arr;
+ }
+ ///
+ /// Get rank.
+ ///
+ ///
+ public Dimension get_rank()
+ {
+ return rank;
+ }
+ ///
+ /// Get dimensions.
+ ///
+ /// Dimension[
+ public Dimension[] get_dimensions() {
+ return dimensions;
+ }
+
+ ///
+ /// Convert partial shape without dynamic data to a static shape.
+ ///
+ /// The shape.
+ public Shape to_shape()
+ {
+ int l = Marshal.SizeOf(typeof(Ov.ov_shape));
+ IntPtr shape_ptr = Marshal.AllocHGlobal(l);
+ HandleException.handler(
+ NativeMethods.ov_partial_shape_to_shape(get_partial_shape(), shape_ptr));
+ return new Shape(shape_ptr);
+ }
+
+ ///
+ /// Check if this shape is static.
+ ///
+ /// A shape is considered static if it has static rank, and all dimensions of the shape
+ /// are static.
+ /// `true` if this shape is static, else `false`.
+ public bool is_static() {
+ return !is_dynamic();
+ }
+
+ ///
+ /// Check if this shape is dynamic.
+ ///
+ /// A shape is considered static if it has static rank, and all dimensions of the shape
+ /// are static.
+ /// `false` if this shape is static, else `true`.
+ public bool is_dynamic() {
+ return rank.is_dynamic();
+ }
+
+ ///
+ /// Get partial shape string.
+ ///
+ ///
+ public string to_string()
+ {
+ string s = "Shape : {";
+ if (rank.is_dynamic())
+ {
+ s += "?";
+ }
+ else
+ {
+ for (int i = 0; i < rank.get_max(); ++i)
+ {
+ if (dimensions[i].is_dynamic())
+ {
+ s += "?,";
+ }
+ else
+ {
+ s += dimensions[i].get_dimension().max.ToString() + ",";
+ }
+ }
+ }
+ s = s.Substring(0, s.Length - 1);
+ s += "}";
+ return s;
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/core/remote_context.cs b/modules/csharp_api/csharp/core/remote_context.cs
new file mode 100644
index 000000000..8fea322aa
--- /dev/null
+++ b/modules/csharp_api/csharp/core/remote_context.cs
@@ -0,0 +1,37 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+namespace OpenVinoSharp
+{
+ ///
+ /// This class represents an abstraction for remote (non-CPU) accelerator device-specific inference context.
+ /// Such context represents a scope on the device within which compiled models and remote memory tensors can exist,
+ /// function, and exchange data.
+ ///
+ public class RemoteContext
+ {
+ ///
+ /// [private]RemoteContext class pointer.
+ ///
+ private IntPtr m_ptr = IntPtr.Zero;
+ ///
+ /// [public]RemoteContext class pointer.
+ ///
+ public IntPtr Ptr { get { return m_ptr; } set { m_ptr = value; } }
+ ///
+ /// Default Constructor
+ ///
+ /// RemoteContext pointer.
+ public RemoteContext(IntPtr ptr)
+ {
+ if (ptr == IntPtr.Zero)
+ {
+ System.Diagnostics.Debug.WriteLine("RemoteContext init error : ptr is null!");
+ return;
+ }
+ Ptr = ptr;
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/core/shape.cs b/modules/csharp_api/csharp/core/shape.cs
new file mode 100644
index 000000000..bee3b3c73
--- /dev/null
+++ b/modules/csharp_api/csharp/core/shape.cs
@@ -0,0 +1,157 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Security.Cryptography.X509Certificates;
+using System.Text;
+using System.Threading.Tasks;
+
+using ov_shape = OpenVinoSharp.Ov.ov_shape;
+
+namespace OpenVinoSharp
+{
+
+ ///
+ /// Shape for a tensor.
+ ///
+ /// ov_runtime_c#_api
+ public class Shape : List, IDisposable
+ {
+ ///
+ /// [struct] The shape ov_shape
+ ///
+ public ov_shape shape;
+ ///
+ /// [private]Shape class pointer.
+ ///
+ private IntPtr m_ptr = IntPtr.Zero;
+ ///
+ /// [public]Shape class pointer.
+ ///
+ public IntPtr Ptr { get { return m_ptr; } set { m_ptr = value; } }
+ ///
+ /// Constructs Shape from the initialized IntPtr.
+ ///
+ /// Initialized IntPtr
+ public Shape(IntPtr ptr)
+ {
+ if (ptr == IntPtr.Zero)
+ {
+ System.Diagnostics.Debug.WriteLine("Shape init error : ptr is null!");
+ return;
+ }
+ this.m_ptr = ptr;
+ var temp = Marshal.PtrToStructure(ptr, typeof(ov_shape));
+ shape = (ov_shape)temp;
+ long[] dims = shape.get_dims();
+ for (int i = 0; i < shape.rank; ++i)
+ {
+ this.Add(dims[i]);
+ }
+ }
+ ///
+ /// Constructs Shape from the list.
+ ///
+ /// Initialized list
+ public Shape(List axis_lengths)
+ {
+ for (int i = 0; i < axis_lengths.Count; ++i)
+ {
+ this.Add(axis_lengths[i]);
+ }
+ int l = Marshal.SizeOf(typeof(ov_shape));
+ m_ptr = Marshal.AllocHGlobal(l);
+ HandleException.handler(
+ NativeMethods.ov_shape_create((long)this.Count, ref axis_lengths.ToArray()[0], m_ptr));
+ var temp = Marshal.PtrToStructure(m_ptr, typeof(ov_shape));
+ shape = (ov_shape)temp;
+ }
+ ///
+ /// Constructs Shape from the initialized array.
+ ///
+ /// Initialized array
+ public Shape(long[] axis_lengths)
+ {
+ for (int i = 0; i < axis_lengths.Length; ++i)
+ {
+ this.Add(axis_lengths[i]);
+ }
+ int l = Marshal.SizeOf(typeof(ov_shape));
+ m_ptr = Marshal.AllocHGlobal(l);
+ HandleException.handler(
+ NativeMethods.ov_shape_create((long)this.Count, ref axis_lengths[0], m_ptr));
+ var temp = Marshal.PtrToStructure(m_ptr, typeof(ov_shape));
+ shape = (ov_shape)temp;
+ }
+ ///
+ /// Constructs Shape from the initialized array.
+ ///
+ /// Any length parameter
+ public Shape(params int[] data)
+ {
+ long[] axis_lengths = new long[data.Length];
+ for (int i = 0; i < data.Length; ++i)
+ {
+ this.Add(data[i]);
+ axis_lengths[i] = data[i];
+ }
+ int l = Marshal.SizeOf(typeof(ov_shape));
+ m_ptr = Marshal.AllocHGlobal(l);
+ HandleException.handler(
+ NativeMethods.ov_shape_create((long)this.Count, ref axis_lengths[0], m_ptr));
+ var temp = Marshal.PtrToStructure(m_ptr, typeof(ov_shape));
+ shape = (ov_shape)temp;
+ }
+ ///
+ /// Shape's destructor
+ ///
+ ~Shape()
+ {
+ Dispose();
+ }
+ ///
+ /// Release unmanaged resources
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_shape_free(m_ptr);
+ m_ptr = IntPtr.Zero;
+ }
+ ///
+ /// Convert shape to string.
+ ///
+ /// shape string
+ public string to_string()
+ {
+ if (this.Count < 1)
+ {
+ return "NULL";
+ }
+ string s = "Shape : {";
+ foreach(var i in this)
+ {
+ s += i.ToString() + ", ";
+ }
+ s = s.Substring(0, s.Length - 2);
+ s += "}";
+ return s;
+ }
+ ///
+ /// Obtain the product of all shape parameters
+ ///
+ /// The product of all shape parameters
+ public long data_size()
+ {
+ long d = 1;
+ foreach (var i in this)
+ {
+ d *= i;
+ }
+ return d;
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/core/tensor.cs b/modules/csharp_api/csharp/core/tensor.cs
new file mode 100644
index 000000000..586abd929
--- /dev/null
+++ b/modules/csharp_api/csharp/core/tensor.cs
@@ -0,0 +1,380 @@
+using OpenVinoSharp.element;
+using OpenVinoSharp.preprocess;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Security.Cryptography;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// Tensor API holding host memory.
+ /// It can throw exceptions safely for the application, where it is properly handled.
+ ///
+ /// ov_runtime_c#_api
+ public class Tensor : IDisposable
+ {
+ ///
+ /// [private]Tensor class pointer.
+ ///
+ private IntPtr m_ptr = IntPtr.Zero;
+ ///
+ /// [public]Tensor class pointer.
+ ///
+ public IntPtr Ptr { get { return m_ptr; } set { m_ptr = value; } }
+
+ ///
+ /// Default Constructor
+ ///
+ public Tensor() { }
+ ///
+ /// Constructs Tensor from the initialized pointer.
+ ///
+ /// Tensor pointer.
+ public Tensor(IntPtr ptr)
+ {
+ if (ptr == IntPtr.Zero)
+ {
+ System.Diagnostics.Debug.WriteLine("Tensor init error : ptr is null!");
+ return;
+ }
+ this.m_ptr = ptr;
+ }
+
+ ///
+ /// Constructs Tensor using element type ,shape and image data.
+ ///
+ /// Tensor element type
+ /// Tensor shape
+ /// Image data
+ public Tensor(element.Type type, Shape shape, byte[] mat)
+ {
+ HandleException.handler(
+ NativeMethods.ov_tensor_create_from_host_ptr
+ ((uint)type.get_type(), shape.shape, Marshal.UnsafeAddrOfPinnedArrayElement(mat, 0), ref m_ptr));
+ }
+ public Tensor(OvType type, Shape shape, byte[] mat)
+ :this(new element.Type(type.get_type()), shape, mat)
+ {
+ }
+ ///
+ /// Constructs Tensor using element type ,shape and input data.
+ ///
+ /// Tensor shape
+ /// Input data
+ public Tensor(Shape shape, float[] mat)
+ {
+ HandleException.handler(
+ NativeMethods.ov_tensor_create_from_host_ptr
+ ((uint)ElementType.F32, shape.shape, Marshal.UnsafeAddrOfPinnedArrayElement(mat, 0), ref m_ptr));
+ }
+ ///
+ /// Constructs Tensor using element type ,shape and input data.
+ ///
+ /// Tensor shape
+ /// Input data
+ public Tensor(Shape shape, double[] mat)
+ {
+ HandleException.handler(
+ NativeMethods.ov_tensor_create_from_host_ptr
+ ((uint)ElementType.F64, shape.shape, Marshal.UnsafeAddrOfPinnedArrayElement(mat, 0), ref m_ptr));
+ }
+ ///
+ /// Constructs Tensor using element type ,shape and input data.
+ ///
+ /// Tensor shape
+ /// Input data
+ public Tensor(Shape shape, int[] mat)
+ {
+ HandleException.handler(
+ NativeMethods.ov_tensor_create_from_host_ptr
+ ((uint)ElementType.I32, shape.shape, Marshal.UnsafeAddrOfPinnedArrayElement(mat, 0), ref m_ptr));
+ }
+ ///
+ /// Constructs Tensor using element type ,shape and input data.
+ ///
+ /// Tensor shape
+ /// Input data
+ public Tensor(Shape shape, short[] mat)
+ {
+ HandleException.handler(
+ NativeMethods.ov_tensor_create_from_host_ptr
+ ((uint)ElementType.I16, shape.shape, Marshal.UnsafeAddrOfPinnedArrayElement(mat, 0), ref m_ptr));
+ }
+ ///
+ /// Constructs Tensor using element type ,shape and input data.
+ ///
+ /// Tensor shape
+ /// Input data
+ public Tensor(Shape shape, long[] mat)
+ {
+ HandleException.handler(
+ NativeMethods.ov_tensor_create_from_host_ptr
+ ((uint)ElementType.I64, shape.shape, Marshal.UnsafeAddrOfPinnedArrayElement(mat, 0), ref m_ptr));
+ }
+ ///
+ /// Constructs Tensor using element type and shape. Wraps allocated host memory.
+ ///
+ /// Does not perform memory allocation internally.
+ /// Tensor element type
+ /// Tensor shape
+ /// Pointer to pre-allocated host memory
+ public Tensor(element.Type type, Shape shape, IntPtr host_ptr)
+ {
+ HandleException.handler(
+ NativeMethods.ov_tensor_create_from_host_ptr
+ ((uint)type.get_type(), shape.shape, host_ptr, ref m_ptr));
+ }
+ public Tensor(OvType type, Shape shape, IntPtr host_ptr)
+ :this(new element.Type(type.get_type()), shape, host_ptr)
+ {
+ }
+ ///
+ /// Constructs Tensor using element type and shape. Allocate internal host storage using default allocator
+ ///
+ /// Tensor element type
+ /// Tensor shape
+ public Tensor(element.Type type, Shape shape)
+ {
+ HandleException.handler(
+ NativeMethods.ov_tensor_create
+ ((uint)type.get_type(), shape.shape, ref m_ptr));
+ }
+ public Tensor(OvType type, Shape shape)
+ : this(new element.Type(type.get_type()), shape)
+ {
+ }
+ ///
+ /// Default copy constructor
+ ///
+ /// other Tensor object
+ public Tensor(Tensor tensor)
+ {
+ HandleException.handler(
+ NativeMethods.ov_tensor_create_from_host_ptr
+ ((uint)tensor.get_element_type().get_type(), tensor.get_shape().shape, tensor.data(), ref m_ptr));
+ }
+
+ ///
+ /// Tensor's destructor
+ ///
+ ~Tensor()
+ {
+ Dispose();
+ }
+ ///
+ /// Release unmanaged resources
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_tensor_free(m_ptr);
+
+ m_ptr = IntPtr.Zero;
+ }
+ ///
+ /// Set new shape for tensor, deallocate/allocate if new total size is bigger than previous one.
+ ///
+ /// Memory allocation may happen
+ /// A new shape
+ public void set_shape(Shape shape)
+ {
+ HandleException.handler(
+ NativeMethods.ov_tensor_set_shape(m_ptr, shape.shape));
+ }
+
+ ///
+ /// Get tensor shape
+ ///
+ /// A tensor shape
+ public Shape get_shape()
+ {
+ int l = Marshal.SizeOf(typeof(Ov.ov_shape));
+ IntPtr shape_ptr = Marshal.AllocHGlobal(l);
+ HandleException.handler(
+ NativeMethods.ov_tensor_get_shape(m_ptr, shape_ptr));
+
+ return new Shape(shape_ptr);
+ }
+ ///
+ /// Get tensor element type
+ ///
+ /// A tensor element type
+ public OvType get_element_type()
+ {
+ uint type = 100;
+ HandleException.handler(
+ NativeMethods.ov_tensor_get_element_type(m_ptr, out type));
+ OvType t = new OvType((ElementType)type);
+ return t;
+ }
+
+ ///
+ /// Returns the total number of elements (a product of all the dims or 1 for scalar).
+ ///
+ /// The total number of elements.
+ public ulong get_size()
+ {
+ ulong size = 0;
+ HandleException.handler(
+ NativeMethods.ov_tensor_get_size(m_ptr, ref size));
+ return size;
+ }
+
+ ///
+ /// Returns the size of the current Tensor in bytes.
+ ///
+ /// Tensor's size in bytes
+ public ulong get_byte_size()
+ {
+ ulong size = 0;
+ HandleException.handler(
+ NativeMethods.ov_tensor_get_byte_size(m_ptr, ref size));
+ return size;
+ }
+
+ ///
+ /// Copy tensor, destination tensor should have the same element type and shape
+ ///
+ /// Data type.
+ /// destination tensor
+ public void copy_to(Tensor dst)
+ {
+ ulong length = this.get_size();
+ T[] data = this.get_data((int)length);
+ dst.set_data(data);
+ }
+
+ ///
+ /// Provides an access to the underlaying host memory.
+ ///
+ /// A host pointer to tensor memory.
+ public IntPtr data()
+ {
+ IntPtr data_ptr = new IntPtr();
+ HandleException.handler(
+ NativeMethods.ov_tensor_data(m_ptr, ref data_ptr));
+ return data_ptr;
+ }
+
+ ///
+ /// Load the specified type of data into the underlying host memory.
+ ///
+ /// data type
+ /// Data to be loaded.
+ public void set_data(T[] input_data)
+ {
+ IntPtr data_ptr = new IntPtr();
+ HandleException.handler(
+ NativeMethods.ov_tensor_data(m_ptr, ref data_ptr));
+ int length = input_data.Length;
+
+ string t = typeof(T).ToString();
+ if (t == "System.Byte")
+ {
+ byte[] data = (byte[])Convert.ChangeType(input_data, typeof(byte[]));
+ Marshal.Copy(data, 0, data_ptr, length);
+ }
+ else if (t == "System.Int32")
+ {
+ int[] data = (int[])Convert.ChangeType(input_data, typeof(int[]));
+ Marshal.Copy(data, 0, data_ptr, length);
+ }
+ else if (t == "System.Int64")
+ {
+ long[] data = (long[])Convert.ChangeType(input_data, typeof(long[]));
+ Marshal.Copy(data, 0, data_ptr, length);
+ }
+ else if (t == "System.Int16")
+ {
+ short[] data = (short[])Convert.ChangeType(input_data, typeof(short[]));
+ Marshal.Copy(data, 0, data_ptr, length);
+ }
+ else if (t == "System.Single")
+ {
+ float[] data = (float[])Convert.ChangeType(input_data, typeof(float[]));
+ Marshal.Copy(data, 0, data_ptr, length);
+ }
+ else if (t == "System.Double")
+ {
+ double[] data = (double[])Convert.ChangeType(input_data, typeof(double[]));
+ Marshal.Copy(data, 0, data_ptr, length);
+ }
+ else
+ {
+ Console.WriteLine("Data format error, not supported. Only double, flaot, int, long, shaort and byte data formats are supported");
+ }
+ }
+
+ ///
+ /// Read data of the specified type from the underlying host memory.
+ ///
+ /// Type of data read.
+ /// The length of the read data.
+ /// Read data.
+ public T[] get_data(int length)
+ {
+ IntPtr data_ptr = new IntPtr();
+ HandleException.handler(
+ NativeMethods.ov_tensor_data(m_ptr, ref data_ptr));
+ string t = typeof(T).ToString();
+ T[] result = new T[length];
+
+ if (t == "System.Byte")
+ {
+ byte[] data = new byte[length];
+ Marshal.Copy(data_ptr, data, 0, length);
+ result = (T[])Convert.ChangeType(data, typeof(T[]));
+ return result;
+ }
+ else if (t == "System.Int32")
+ {
+ int[] data = new int[length];
+ Marshal.Copy(data_ptr, data, 0, length);
+ result = (T[])Convert.ChangeType(data, typeof(T[]));
+ return result;
+ }
+ else if (t == "System.Int64")
+ {
+ long[] data = new long[length];
+ Marshal.Copy(data_ptr, data, 0, length);
+ result = (T[])Convert.ChangeType(data, typeof(T[]));
+ return result;
+ }
+ else if (t == "System.Int16")
+ {
+ short[] data = new short[length];
+ Marshal.Copy(data_ptr, data, 0, length);
+ result = (T[])Convert.ChangeType(data, typeof(T[]));
+ return result;
+ }
+ else if (t == "System.Single")
+ {
+ float[] data = new float[length];
+ Marshal.Copy(data_ptr, data, 0, length);
+ result = (T[])Convert.ChangeType(data, typeof(T[]));
+ return result;
+ }
+ else if (t == "System.Double")
+ {
+ double[] data = new double[length];
+ Marshal.Copy(data_ptr, data, 0, length);
+ result = (T[])Convert.ChangeType(data, typeof(T[]));
+ return result;
+ }
+ else
+ {
+ Console.WriteLine("Data format error, not supported. Only double, flaot, int, long, shaort and byte data formats are supported");
+ return result;
+ }
+
+ }
+
+ }
+}
diff --git a/modules/csharp_api/csharp/exception/exception.cs b/modules/csharp_api/csharp/exception/exception.cs
new file mode 100644
index 000000000..83e081b1b
--- /dev/null
+++ b/modules/csharp_api/csharp/exception/exception.cs
@@ -0,0 +1,45 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.Serialization;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// The default exception to be thrown by OpenVINO
+ ///
+ [Serializable]
+ // ReSharper disable once InconsistentNaming
+ internal class OVException : Exception
+ {
+ ///
+ /// The numeric code for error status
+ ///
+ public ExceptionStatus status { get; set; }
+
+
+ ///
+ /// A description of the error
+ ///
+ public string err_msg { get; set; }
+
+
+ ///
+ /// Constructor
+ ///
+ /// The numeric code for error status
+ /// The source file name where error is encountered
+ /// A description of the error
+ /// The source file name where error is encountered
+ /// The line number in the source where error is encountered
+ public OVException(ExceptionStatus status, string err_msg)
+ : base(err_msg)
+ {
+ this.status = status;
+ this.err_msg = err_msg;
+ }
+
+ }
+}
diff --git a/modules/csharp_api/csharp/exception/handle_exception.cs b/modules/csharp_api/csharp/exception/handle_exception.cs
new file mode 100644
index 000000000..10805f825
--- /dev/null
+++ b/modules/csharp_api/csharp/exception/handle_exception.cs
@@ -0,0 +1,253 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// OpenVINO C API Return value anomaly detection handle
+ ///
+ static class HandleException
+ {
+ ///
+ /// Check if there are any abnormalities in the return value, and if so, return the
+ /// corresponding exceptions according to the abnormal value
+ ///
+ ///
+ public static void handler(ExceptionStatus status) {
+ if (ExceptionStatus.OK == status)
+ {
+ return;
+ }
+ else if (ExceptionStatus.GENERAL_ERROR == status)
+ {
+ general_error();
+ }
+ else if (ExceptionStatus.NOT_IMPLEMENTED == status)
+ {
+ not_implemented();
+ }
+ else if (ExceptionStatus.NETWORK_NOT_LOADED == status)
+ {
+ network_not_loaded();
+ }
+ else if (ExceptionStatus.PARAMETER_MISMATCH == status)
+ {
+ parameter_mismatch();
+ }
+ else if (ExceptionStatus.NOT_FOUND == status)
+ {
+ not_found();
+ }
+ else if (ExceptionStatus.OUT_OF_BOUNDS == status)
+ {
+ out_of_bounds();
+ }
+ else if (ExceptionStatus.UNEXPECTED == status)
+ {
+ unexpection();
+ }
+ else if (ExceptionStatus.REQUEST_BUSY == status)
+ {
+ request_busy();
+ }
+ else if (ExceptionStatus.RESULT_NOT_READY == status)
+ {
+ result_not_ready();
+ }
+ else if (ExceptionStatus.NOT_ALLOCATED == status)
+ {
+ not_allocated();
+ }
+ else if (ExceptionStatus.INFER_NOT_STARTED == status)
+ {
+ infer_not_started();
+ }
+ else if (ExceptionStatus.NETWORK_NOT_READ == status)
+ {
+ netword_not_read();
+ }
+ else if (ExceptionStatus.INFER_CANCELLED == status)
+ {
+ infer_cancelled();
+ }
+ else if (ExceptionStatus.INVALID_C_PARAM == status)
+ {
+ invalid_c_param();
+ }
+ else if (ExceptionStatus.UNKNOWN_C_ERROR == status)
+ {
+ unknown_c_error();
+ }
+ else if (ExceptionStatus.NOT_IMPLEMENT_C_METHOD == status)
+ {
+ not_implement_c_method();
+ }
+ else if (ExceptionStatus.UNKNOW_EXCEPTION == status)
+ {
+ unknown_exception();
+ }
+ else if (ExceptionStatus.PTR_NULL == status)
+ {
+ ptr_null_exception();
+ }
+
+ }
+ ///
+ /// Throw GENERAL_ERROR OpenVINOException.
+ ///
+ /// general error!
+ private static void general_error() {
+ throw new OVException(ExceptionStatus.GENERAL_ERROR, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+ ///
+ /// Throw NOT_IMPLEMENTED OpenVINOException.
+ ///
+ /// not implemented!
+ private static void not_implemented()
+ {
+ throw new OVException(ExceptionStatus.NOT_IMPLEMENTED, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+
+ ///
+ /// Throw NETWORK_NOT_LOADED OpenVINOException.
+ ///
+ /// network not loaded!
+ private static void network_not_loaded()
+ {
+ throw new OVException(ExceptionStatus.NETWORK_NOT_LOADED, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+
+
+ ///
+ /// Throw PARAMETER_MISMATCH OpenVINOException.
+ ///
+ /// parameter mismatch!
+ private static void parameter_mismatch()
+ {
+ throw new OVException(ExceptionStatus.PARAMETER_MISMATCH, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+
+ ///
+ /// Throw NOT_FOUND OpenVINOException.
+ ///
+ /// not found!
+ private static void not_found()
+ {
+ throw new OVException(ExceptionStatus.NOT_FOUND, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+
+ ///
+ /// Throw OUT_OF_BOUNDS OpenVINOException.
+ ///
+ /// out of bounds!
+ private static void out_of_bounds()
+ {
+ throw new OVException(ExceptionStatus.OUT_OF_BOUNDS, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+
+
+ ///
+ /// Throw UNEXPECTED OpenVINOException.
+ ///
+ /// unexpection!
+ private static void unexpection()
+ {
+ throw new OVException(ExceptionStatus.UNEXPECTED, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+
+
+
+ ///
+ /// Throw REQUEST_BUSY OpenVINOException.
+ ///
+ /// request busy!
+ private static void request_busy()
+ {
+ throw new OVException(ExceptionStatus.REQUEST_BUSY, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+ ///
+ /// Throw RESULT_NOT_READY OpenVINOException.
+ ///
+ /// result not ready!
+ private static void result_not_ready()
+ {
+ throw new OVException(ExceptionStatus.RESULT_NOT_READY, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+ ///
+ /// Throw OpenVINOException.
+ ///
+ /// not allocated!
+ private static void not_allocated()
+ {
+ throw new OVException(ExceptionStatus.NOT_ALLOCATED, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+ ///
+ /// Throw INFER_NOT_STARTED OpenVINOException.
+ ///
+ /// infer not started!
+ private static void infer_not_started()
+ {
+ throw new OVException(ExceptionStatus.INFER_NOT_STARTED, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+ ///
+ /// Throw NETWORK_NOT_READ OpenVINOException.
+ ///
+ /// netword not read!
+ private static void netword_not_read()
+ {
+ throw new OVException(ExceptionStatus.NETWORK_NOT_READ, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+ ///
+ /// Throw INFER_CANCELLED OpenVINOException.
+ ///
+ /// infer cancelled!
+ private static void infer_cancelled()
+ {
+ throw new OVException(ExceptionStatus.INFER_CANCELLED, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+ ///
+ /// Throw INVALID_C_PARAM OpenVINOException.
+ ///
+ /// invalid c param!
+ private static void invalid_c_param()
+ {
+ throw new OVException(ExceptionStatus.INVALID_C_PARAM, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+ ///
+ /// Throw UNKNOWN_C_ERROR OpenVINOException.
+ ///
+ /// unknown c error!
+ private static void unknown_c_error()
+ {
+ throw new OVException(ExceptionStatus.UNKNOWN_C_ERROR, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+ ///
+ /// Throw NOT_IMPLEMENT_C_METHOD OpenVINOException.
+ ///
+ /// not implement c method!
+ private static void not_implement_c_method()
+ {
+ throw new OVException(ExceptionStatus.NOT_IMPLEMENT_C_METHOD, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+ ///
+ /// Throw UNKNOW_EXCEPTION OpenVINOException.
+ ///
+ /// unknown exception!
+ private static void unknown_exception()
+ {
+ throw new OVException(ExceptionStatus.UNKNOW_EXCEPTION, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+ ///
+ /// Throw PTR_NULL OpenVINOException.
+ ///
+ ///
+ private static void ptr_null_exception()
+ {
+ throw new OVException(ExceptionStatus.UNKNOW_EXCEPTION, Marshal.PtrToStringAnsi(NativeMethods.ov_get_last_err_msg()));
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_base.cs b/modules/csharp_api/csharp/native_methods/ov_base.cs
new file mode 100644
index 000000000..b53dc3a86
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_base.cs
@@ -0,0 +1,16 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ ///
+ /// Introducing C API.
+ ///
+ public partial class NativeMethods
+ {
+ private const string dll_extern = "openvino_c";
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_common.cs b/modules/csharp_api/csharp/native_methods/ov_common.cs
new file mode 100644
index 000000000..0a0ff31e7
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_common.cs
@@ -0,0 +1,38 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ public partial class NativeMethods
+ {
+ ///
+ /// Print the error info.
+ ///
+ /// a status code.
+ /// error info.
+ [DllImport(dll_extern, EntryPoint = "ov_get_error_info",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static string ov_get_error_info(int status);
+
+ ///
+ /// free char
+ ///
+ /// The pointer to the char to free.
+ [DllImport(dll_extern, EntryPoint = "ov_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_free(ref char content);
+
+
+ ///
+ /// Get the last error msg.
+ ///
+ /// The last error msg.
+ [DllImport(dll_extern, EntryPoint = "ov_get_last_err_msg",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static IntPtr ov_get_last_err_msg();
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_compiled_model.cs b/modules/csharp_api/csharp/native_methods/ov_compiled_model.cs
new file mode 100644
index 000000000..d10278119
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_compiled_model.cs
@@ -0,0 +1,202 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ public partial class NativeMethods
+ {
+ ///
+ /// Get the input size of ov_compiled_model_t.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// the compiled_model's input size.
+ ///
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_inputs_size",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_inputs_size(
+ IntPtr compiled_model, ref ulong size);
+
+ ///
+ /// Get the single const input port of ov_compiled_model_t, which only support single input model.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// A pointer to the ov_output_const_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_input",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_input(
+ IntPtr compiled_model, ref IntPtr input_port);
+
+ ///
+ /// Get a const input port of ov_compiled_model_t by port index.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// input index.
+ /// A pointer to the ov_output_const_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_input_by_index",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_input_by_index(
+ IntPtr compiled_model,
+ ulong index,
+ ref IntPtr input_port);
+
+ ///
+ /// Get a const input port of ov_compiled_model_t by name.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// nput tensor name (char *).
+ /// A pointer to the ov_output_const_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_input_by_name",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_input_by_name(
+ IntPtr compiled_model,
+ ref sbyte name,
+ ref IntPtr input_port);
+
+ ///
+ /// Get the output size of ov_compiled_model_t.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// the compiled_model's output size.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_outputs_size",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_outputs_size(
+ IntPtr compiled_model,
+ ref ulong size);
+
+ ///
+ /// Get the single const output port of ov_compiled_model_t, which only support single output model.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// A pointer to the ov_output_const_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_output",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_output(
+ IntPtr compiled_model, ref IntPtr output_port);
+
+
+ ///
+ /// Get a const output port of ov_compiled_model_t by port index.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// input index.
+ /// A pointer to the ov_output_const_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_output_by_index",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_output_by_index(
+ IntPtr compiled_model,
+ ulong index,
+ ref IntPtr output_port);
+
+
+ ///
+ /// Get a const output port of ov_compiled_model_t by name.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// tensor name (char *).
+ /// A pointer to the ov_output_const_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_output_by_name",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_output_by_name(
+ IntPtr compiled_model,
+ ref sbyte name,
+ ref IntPtr output_port);
+
+ ///
+ /// Gets runtime model information from a device.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// A pointer to the ov_model_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_get_runtime_model",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_get_runtime_model(
+ IntPtr compiled_model,
+ ref IntPtr model);
+
+ ///
+ /// Creates an inference request object used to infer the compiled model.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// A pointer to the ov_infer_request_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_create_infer_request",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_create_infer_request(
+ IntPtr compiled_model,
+ ref IntPtr infer_request);
+
+ ///
+ /// Sets properties for a device, acceptable keys can be found in ov_property_key_xxx.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// The property key string.
+ /// The property value string.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_set_property",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_set_property(
+ IntPtr compiled_model,
+ IntPtr property_key,
+ IntPtr property_value);
+
+ ///
+ /// Gets properties for current compiled model.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// Property key.
+ /// A pointer to property value.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_get_property",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_get_property(
+ IntPtr compiled_model,
+ ref sbyte property_key,
+ ref IntPtr property_value);
+
+ ///
+ /// Exports the current compiled model to an output stream `std::ostream`.
+ /// The exported model can also be imported via the ov::Core::import_model method.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// Path to the file.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_export_model",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_export_model(
+ IntPtr compiled_model,
+ ref sbyte export_model_path);
+
+ ///
+ /// Release the memory allocated by ov_compiled_model_t.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_compiled_model_free(IntPtr compiled_model);
+
+ ///
+ /// Returns pointer to device-specific shared context on a remote accelerator
+ /// device that was used to create this CompiledModel.
+ ///
+ /// A pointer to the ov_compiled_model_t.
+ /// Return context.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_compiled_model_get_context",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_compiled_model_get_context(
+ IntPtr compiled_model,
+ ref IntPtr context);
+
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_core.cs b/modules/csharp_api/csharp/native_methods/ov_core.cs
new file mode 100644
index 000000000..145c42bb7
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_core.cs
@@ -0,0 +1,408 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ public partial class NativeMethods
+ {
+
+ ///
+ /// Get version of OpenVINO.
+ ///
+ /// a pointer to the version
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_get_openvino_version",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_get_openvino_version(
+ IntPtr version);
+
+ ///
+ /// Release the memory allocated by ov_version_t.
+ ///
+ /// A pointer to the ov_version_t to free memory.
+ [DllImport(dll_extern, EntryPoint = "ov_version_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_version_free(
+ IntPtr version);
+
+ ///
+ /// Constructs OpenVINO Core instance by default.
+ /// See RegisterPlugins for more details.
+ ///
+ /// A pointer to the newly created ov_core_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_create",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_create(
+ ref IntPtr core);
+
+ ///
+ /// Constructs OpenVINO Core instance using XML configuration file with devices description.
+ /// See RegisterPlugins for more details.
+ ///
+ /// A path to .xml file with devices to load from.
+ /// If XML configuration file is not specified, then default plugin.xml file will be used.
+ /// A pointer to the newly created ov_core_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_create_with_config",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_create_with_config(
+ string xml_config_file,
+ ref IntPtr core);
+
+ ///
+ /// Release the memory allocated by ov_core_t.
+ ///
+ /// A pointer to the ov_core_t to free memory.
+ [DllImport(dll_extern, EntryPoint = "ov_core_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_core_free(
+ IntPtr core);
+
+ ///
+ /// Reads models from IR / ONNX / PDPD / TF / TFLite formats.
+ ///
+ /// A pointer to the ie_core_t instance.
+ /// Path to a model.
+ /// Path to a data file.
+ /// A pointer to the newly created model.
+ /// Status code of the operation: OK(0) for success.
+ ///
+ ///
+ /// For IR format (*.bin):
+ /// if `bin_path` is empty, will try to read a bin file with the same name as xml and
+ /// if the bin file with the same name is not found, will load IR without weights.
+ /// For the following file formats the `bin_path` parameter is not used:
+ ///
+ /// ONNX format (*.onnx)
+ /// PDPD(*.pdmodel)
+ /// TF(*.pb)
+ /// TFLite(*.tflite)
+ ///
+ [DllImport(dll_extern, EntryPoint = "ov_core_read_model_unicode",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_read_model_unicode(
+ IntPtr core,
+ string model_path,
+ string bin_path,
+ ref IntPtr model);
+
+ ///
+ /// Reads models from IR / ONNX / PDPD / TF / TFLite formats.
+ ///
+ /// A pointer to the ie_core_t instance.
+ /// Path to a model.
+ /// Path to a data file.
+ /// A pointer to the newly created model.
+ /// Status code of the operation: OK(0) for success.
+ ///
+ ///
+ /// For IR format (*.bin):
+ /// if `bin_path` is empty, will try to read a bin file with the same name as xml and
+ /// if the bin file with the same name is not found, will load IR without weights.
+ /// For the following file formats the `bin_path` parameter is not used:
+ ///
+ /// ONNX format (*.onnx)
+ /// PDPD(*.pdmodel)
+ /// TF(*.pb)
+ /// TFLite(*.tflite)
+ ///
+ [DllImport(dll_extern, EntryPoint = "ov_core_read_model",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_read_model(
+ IntPtr core,
+ ref sbyte model_path,
+ ref sbyte bin_path,
+ ref IntPtr model);
+
+ [DllImport(dll_extern, EntryPoint = "ov_core_read_model_from_memory_buffer",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_read_model_from_memory_buffer(IntPtr core,
+ ref byte model_path,
+ ulong str_size,
+ IntPtr weights,
+ ref IntPtr model);
+
+ ///
+ /// Reads models from IR / ONNX / PDPD / TF / TFLite formats.
+ ///
+ /// A pointer to the ie_core_t instance.
+ /// Path to a model.
+ /// Shared pointer to a constant tensor with weights.
+ /// A pointer to the newly created model.
+ ///
+ /// Reading ONNX / PDPD / TF / TFLite models does not support loading weights
+ /// from the @p weights tensors.
+ ///
+ /// Created model object shares the weights with the @p weights object.
+ /// Thus, do not create @p weights on temporary data that can be freed later,
+ /// since the model constant data will point to an invalid memory.
+ ///
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_read_model_from_memory",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_read_model_from_memory(IntPtr core,
+ ref byte model_path,
+ IntPtr weights,
+ ref IntPtr model);
+
+ ///
+ /// Creates a compiled model from a source model object. Users can create
+ /// as many compiled models as they need and use them simultaneously
+ /// (up to the limitation of the hardware resources).
+ ///
+ /// A pointer to the ie_core_t instance.
+ /// Model object acquired from Core::read_model.
+ /// Name of a device to load a model to.
+ /// How many properties args will be passed,
+ /// each property contains 2 args: key and value.
+ /// A pointer to the newly created compiled_model.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_compile_model",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_compile_model(
+ IntPtr core,
+ IntPtr model,
+ ref sbyte device_name,
+ ulong property_args_size,
+ ref IntPtr compiled_model);
+
+ [DllImport(dll_extern, EntryPoint = "ov_core_compile_model",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_compile_model(
+ IntPtr core,
+ IntPtr model,
+ ref sbyte device_name,
+ ulong property_args_size,
+ ref IntPtr compiled_model,
+ IntPtr varg1, IntPtr varg2);
+ [DllImport(dll_extern, EntryPoint = "ov_core_compile_model",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_compile_model(
+ IntPtr core,
+ IntPtr model,
+ ref sbyte device_name,
+ ulong property_args_size,
+ ref IntPtr compiled_model,
+ IntPtr varg1, IntPtr varg2,
+ IntPtr varg3, IntPtr varg4);
+
+ [DllImport(dll_extern, EntryPoint = "ov_core_compile_model",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_compile_model(
+ IntPtr core,
+ IntPtr model,
+ ref sbyte device_name,
+ ulong property_args_size,
+ ref IntPtr compiled_model,
+ IntPtr varg1, IntPtr varg2,
+ IntPtr varg3, IntPtr varg4,
+ IntPtr varg5, IntPtr varg6);
+
+ ///
+ /// Reads a model and creates a compiled model from the IR/ONNX/PDPD file.
+ /// This can be more efficient than using the ov_core_read_model_from_XXX + ov_core_compile_model flow,
+ /// especially for cases when caching is enabled and a cached model is available.
+ ///
+ /// A pointer to the ie_core_t instance.
+ /// Path to a model.
+ /// Name of a device to load a model to.
+ /// How many properties args will be passed,
+ /// each property contains 2 args: key and value.
+ /// A pointer to the newly created compiled_model.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_compile_model_from_file",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_compile_model_from_file(
+ IntPtr core,
+ ref sbyte model_path,
+ ref sbyte device_name,
+ ulong property_args_size,
+ ref IntPtr compiled_model);
+ [DllImport(dll_extern, EntryPoint = "ov_core_compile_model_from_file",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_compile_model_from_file(
+ IntPtr core,
+ ref sbyte model_path,
+ ref sbyte device_name,
+ ulong property_args_size,
+ ref IntPtr compiled_model,
+ IntPtr varg1, IntPtr varg2);
+ [DllImport(dll_extern, EntryPoint = "ov_core_compile_model_from_file",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_compile_model_from_file(
+ IntPtr core,
+ ref sbyte model_path,
+ ref sbyte device_name,
+ ulong property_args_size,
+ ref IntPtr compiled_model,
+ IntPtr varg1, IntPtr varg2,
+ IntPtr varg3, IntPtr varg4);
+ [DllImport(dll_extern, EntryPoint = "ov_core_compile_model_from_file",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_compile_model_from_file(
+ IntPtr core,
+ ref sbyte model_path,
+ ref sbyte device_name,
+ ulong property_args_size,
+ ref IntPtr compiled_model,
+ IntPtr varg1, IntPtr varg2,
+ IntPtr varg3, IntPtr varg4,
+ IntPtr varg5, IntPtr varg6);
+
+
+ ///
+ /// Sets properties for a device, acceptable keys can be found in ov_property_key_xxx.
+ ///
+ /// A pointer to the ie_core_t instance.
+ /// Name of a device.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_set_property",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public static extern ExceptionStatus ov_core_set_property(IntPtr core,
+ ref sbyte device_name, IntPtr varg1, IntPtr varg2, IntPtr varg3, IntPtr varg4);
+ [DllImport(dll_extern, EntryPoint = "ov_core_set_property",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public static extern ExceptionStatus ov_core_set_property(IntPtr core,
+ ref sbyte device_name, IntPtr varg1, IntPtr varg2);
+ [DllImport(dll_extern, EntryPoint = "ov_core_set_property",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public static extern ExceptionStatus ov_core_set_property(IntPtr core,
+ ref sbyte device_name, IntPtr varg1, IntPtr varg2, IntPtr varg3, IntPtr varg4, IntPtr varg5, IntPtr varg6);
+ ///
+ /// Gets properties related to device behaviour.
+ /// The method extracts information that can be set via the set_property method.
+ ///
+ /// A pointer to the ie_core_t instance.
+ /// Name of a device to get a property value.
+ /// Property key.
+ /// A pointer to property value with string format.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_get_property",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_get_property(
+ IntPtr core,
+ ref sbyte device_name,
+ ref sbyte property_key,
+ ref IntPtr property_value);
+
+ ///
+ /// Returns devices available for inference.
+ ///
+ /// A pointer to the ie_core_t instance.
+ /// A pointer to the ov_available_devices_t instance.
+ /// Core objects go over all registered plugins and ask about available devices.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_get_available_devices",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_get_available_devices(
+ IntPtr core,
+ IntPtr devices);
+
+ ///
+ /// Releases memory occpuied by ov_available_devices_t
+ ///
+ /// A pointer to the ov_available_devices_t instance.
+ [DllImport(dll_extern, EntryPoint = "ov_available_devices_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_available_devices_free(IntPtr devices);
+
+ ///
+ /// Imports a compiled model from the previously exported one.
+ ///
+ /// A pointer to the ov_core_t instance.
+ /// A pointer to content of the exported model.
+ /// Number of bytes in the exported network.
+ /// Name of a device to import a compiled model for.
+ /// A pointer to the newly created compiled_model.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_import_model",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_import_model(
+ IntPtr core,
+ ref byte content,
+ ulong content_size,
+ ref sbyte device_name,
+ ref IntPtr compiled_model);
+
+
+ ///
+ /// Returns device plugins version information.
+ /// Device name can be complex and identify multiple devices at once like `HETERO:CPU,GPU`;
+ /// in this case, std::map contains multiple entries, each per device.
+ ///
+ /// A pointer to the ov_core_t instance.
+ /// Device name to identify a plugin.
+ /// A pointer to versions corresponding to device_name.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_get_versions_by_device_name",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_get_versions_by_device_name(
+ IntPtr core,
+ ref sbyte device_name,
+ IntPtr versions);
+
+
+ ///
+ /// Releases memory occupied by ov_core_version_list_t.
+ ///
+ /// A pointer to the ie_core_versions to free memory.
+ [DllImport(dll_extern, EntryPoint = "ov_core_versions_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_core_versions_free(
+ IntPtr versions);
+
+ ///
+ /// Creates a new remote shared context object on the specified accelerator device
+ /// using specified plugin-specific low-level device API parameters (device handle, pointer, context, etc.).
+ ///
+ /// A pointer to the ov_core_t instance.
+ /// Device name to identify a plugin.
+ /// How many property args will be for this remote context creation.
+ /// A pointer to the newly created remote context.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_create_context",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_create_context(
+ IntPtr core,
+ ref sbyte device_name,
+ ulong context_args_size,
+ ref IntPtr context);
+
+
+ ///
+ /// Creates a compiled model from a source model within a specified remote context.
+ ///
+ /// A pointer to the ov_core_t instance.
+ /// Model object acquired from ov_core_read_model.
+ /// A pointer to the newly created remote context.
+ /// How many args will be for this compiled model.
+ /// A pointer to the newly created compiled_model.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_compile_model_with_context",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_compile_model_with_context(
+ IntPtr core,
+ IntPtr model,
+ IntPtr context,
+ ulong property_args_size,
+ ref IntPtr compiled_model);
+
+ ///
+ /// Gets a pointer to default (plugin-supplied) shared context object for the specified accelerator device.
+ ///
+ /// A pointer to the ov_core_t instance.
+ /// Name of a device to get a default shared context from.
+ /// A pointer to the referenced remote context.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_core_get_default_context",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_core_get_default_context(IntPtr core, ref sbyte device_name, ref IntPtr context);
+
+ }
+
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_dimension.cs b/modules/csharp_api/csharp/native_methods/ov_dimension.cs
new file mode 100644
index 000000000..99fa541a1
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_dimension.cs
@@ -0,0 +1,27 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+using ov_dimension = OpenVinoSharp.Ov.ov_dimension;
+
+namespace OpenVinoSharp
+{
+ public partial class NativeMethods
+ {
+
+
+
+ ///
+ /// Check this dimension whether is dynamic
+ ///
+ /// The dimension pointer that will be checked.
+ /// Boolean, true is dynamic and false is static.
+ [DllImport(dll_extern, EntryPoint = "ov_dimension_is_dynamic",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static bool ov_dimension_is_dynamic(ov_dimension dim);
+
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_infer_request.cs b/modules/csharp_api/csharp/native_methods/ov_infer_request.cs
new file mode 100644
index 000000000..1c125c68c
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_infer_request.cs
@@ -0,0 +1,276 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Runtime.InteropServices.ComTypes;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ public partial class NativeMethods
+ {
+ ///
+ /// Set an input/output tensor to infer on by the name of tensor.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Name of the input or output tensor.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_set_tensor",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_set_tensor(
+ IntPtr infer_request,
+ ref sbyte tensor_name,
+ IntPtr tensor);
+
+ ///
+ /// Set an input/output tensor to infer request for the port.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Port of the input or output tensor, which can be got by calling ov_model_t/ov_compiled_model_t interface.
+ ///
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_set_tensor_by_port",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_set_tensor_by_port(
+ IntPtr infer_request,
+ IntPtr port,
+ IntPtr tensor);
+ ///
+ /// Set an input/output tensor to infer request for the port.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Const port of the input or output tensor, which can be got by call interface from ov_model_t/ov_compiled_model_t.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_set_tensor_by_const_port",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_set_tensor_by_const_port(
+ IntPtr infer_request,
+ IntPtr port,
+ IntPtr tensor);
+ ///
+ /// Set an input tensor to infer on by the index of tensor.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Index of the input port. If @p idx is greater than the number of model inputs, an error will return.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_set_input_tensor_by_index",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_set_input_tensor_by_index(
+ IntPtr infer_request,
+ ulong idx,
+ IntPtr tensor);
+ ///
+ /// Set an input tensor for the model with single input to infer on.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_set_input_tensor",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_set_input_tensor(
+ IntPtr infer_request,
+ IntPtr tensor);
+ ///
+ /// Set an output tensor to infer by the index of output tensor.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Index of the output tensor.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_set_output_tensor_by_index",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_set_output_tensor_by_index(
+ IntPtr infer_request,
+ ulong idx,
+ IntPtr tensor);
+ ///
+ /// Set an output tensor to infer models with single output.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_set_output_tensor",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_set_output_tensor(
+ IntPtr infer_request,
+ IntPtr tensor);
+ ///
+ /// Get an input/output tensor by the name of tensor.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Name of the input or output tensor to get.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_get_tensor",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_get_tensor(
+ IntPtr infer_request,
+ ref sbyte tensor_name,
+ ref IntPtr tensor);
+ ///
+ /// Get an input/output tensor by const port.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Port of the tensor to get. @p port is not found, an error will return.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_get_tensor_by_const_port",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_get_tensor_by_const_port(
+ IntPtr infer_request,
+ IntPtr port,
+ ref IntPtr tensor);
+ ///
+ /// Get an input/output tensor by port.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Port of the tensor to get. @p port is not found, an error will return.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_get_tensor_by_port",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_get_tensor_by_port(
+ IntPtr infer_request,
+ IntPtr port,
+ ref IntPtr tensor);
+ ///
+ /// Get an input tensor by the index of input tensor.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// ndex of the tensor to get. @p idx. If the tensor with the specified @p idx is not found, an error will return.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_get_input_tensor_by_index",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_get_input_tensor_by_index(
+ IntPtr infer_request,
+ ulong idx,
+ ref IntPtr tensor);
+ ///
+ /// Get an input tensor from the model with only one input tensor.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_get_input_tensor",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_get_input_tensor(
+ IntPtr infer_request,
+ ref IntPtr tensor);
+ ///
+ /// Get an output tensor by the index of output tensor.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// ndex of the tensor to get. @p idx. If the tensor with the specified @p idx is not found, an error will return.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_get_output_tensor_by_index",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_get_output_tensor_by_index(
+ IntPtr infer_request,
+ ulong idx,
+ ref IntPtr tensor);
+ ///
+ /// Get an output tensor from the model with only one output tensor.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Reference to the tensor.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_get_output_tensor",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_get_output_tensor(
+ IntPtr infer_request,
+ ref IntPtr tensor);
+ ///
+ /// Infer specified input(s) in synchronous mode.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Status code of the operation: OK(0) for success..
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_infer",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_infer(
+ IntPtr infer_request);
+
+ ///
+ /// Cancel inference request.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_cancel",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_cancel(IntPtr infer_request);
+
+
+ ///
+ /// Start inference of specified input(s) in asynchronous mode.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_start_async",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_start_async(IntPtr infer_request);
+
+ ///
+ /// Wait for the result to become available.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_wait",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_wait(IntPtr infer_request);
+
+ ///
+ /// Waits for the result to become available. Blocks until the specified timeout has elapsed or the result becomes available,
+ /// whichever comes first.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Maximum duration, in milliseconds, to block for.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_wait_for",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_wait_for(IntPtr infer_request, long timeout);
+
+ ///
+ /// Set callback function, which will be called when inference is done.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// A function to be called.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_set_callback",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_set_callback(IntPtr infer_request, IntPtr callback);
+
+ ///
+ /// Release the memory allocated by ov_infer_request_t.
+ ///
+ /// A pointer to the ov_infer_request_t to free memory.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_infer_request_free(IntPtr infer_request);
+
+ ///
+ /// Query performance measures per layer to identify the most time consuming operation.
+ ///
+ /// A pointer to the ov_infer_request_t.
+ /// Vector of profiling information for operations in a model.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_infer_request_get_profiling_info",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_infer_request_get_profiling_info(IntPtr infer_request, IntPtr profiling_infos);
+
+ ///
+ /// Release the memory allocated by ov_profiling_info_list_t.
+ ///
+ /// A pointer to the ov_profiling_info_list_t to free memory.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_profiling_info_list_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_profiling_info_list_free(IntPtr profiling_infos);
+
+
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_layout.cs b/modules/csharp_api/csharp/native_methods/ov_layout.cs
new file mode 100644
index 000000000..f3feeed28
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_layout.cs
@@ -0,0 +1,41 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ public partial class NativeMethods
+ {
+ ///
+ /// Create a layout object.
+ ///
+ /// The description of layout.
+ /// The layout input pointer.
+ /// a status code, return OK if successful
+ [DllImport(dll_extern, EntryPoint = "ov_layout_create",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_layout_create(
+ ref sbyte layout_desc,
+ ref IntPtr layout);
+
+ ///
+ /// Free layout object.
+ ///
+ /// The pointer of layout.
+ [DllImport(dll_extern, EntryPoint = "ov_layout_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_layout_free(IntPtr layout);
+
+ ///
+ /// Convert layout object to a readable string.
+ ///
+ /// layout will be converted.
+ /// string that describes the layout content.
+ [DllImport(dll_extern, EntryPoint = "ov_layout_to_string",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static string ov_layout_to_string(IntPtr layout);
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_model.cs b/modules/csharp_api/csharp/native_methods/ov_model.cs
new file mode 100644
index 000000000..164bac365
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_model.cs
@@ -0,0 +1,304 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ public partial class NativeMethods
+ {
+
+ ///
+ /// Release the memory allocated by ov_model_t.
+ ///
+ /// A pointer to the ov_model_t to free memory.
+ [DllImport(dll_extern, EntryPoint = "ov_model_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_model_free(
+ IntPtr model);
+
+ ///
+ /// Get a const single input port of ov_model_t, which only support single input model.
+ ///
+ /// A pointer to the ov_model_t.
+ /// A pointer to the ov_output_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_const_input",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_const_input(
+ IntPtr model,
+ ref IntPtr input_port);
+
+ ///
+ /// Get a const input port of ov_model_t by name.
+ ///
+ /// A pointer to the ov_model_t.
+ /// input tensor name (char *).
+ /// A pointer to the ov_output_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_const_input_by_name",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_const_input_by_name(
+ IntPtr model,
+ ref sbyte tensor_name,
+ ref IntPtr input_port);
+
+ ///
+ /// Get a const input port of ov_model_t by port index.
+ ///
+ /// A pointer to the ov_model_t.
+ /// input tensor index.
+ /// A pointer to the ov_output_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_const_input_by_index",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_const_input_by_index(
+ IntPtr model,
+ ulong index,
+ ref IntPtr input_port);
+
+ ///
+ /// Get single input port of ov_model_t, which only support single input model.
+ ///
+ /// A pointer to the ov_model_t.
+ /// A pointer to the ov_output_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_input",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_input(
+ IntPtr model,
+ ref IntPtr input_port);
+
+ ///
+ /// Get an input port of ov_model_t by name.
+ ///
+ /// A pointer to the ov_model_t.
+ /// input tensor name (char *).
+ /// A pointer to the ov_output_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_input_by_name",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_input_by_name(
+ IntPtr model,
+ ref sbyte tensor_name,
+ ref IntPtr input_port);
+
+ ///
+ /// Get an input port of ov_model_t by port index.
+ ///
+ /// A pointer to the ov_model_t.
+ /// input tensor index.
+ /// A pointer to the ov_output_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_input_by_index",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_input_by_index(
+ IntPtr model,
+ ulong index,
+ ref IntPtr input_port);
+
+
+ ///
+ /// Get a single const output port of ov_model_t, which only support single output model..
+ ///
+ /// A pointer to the ov_model_t.
+ /// A pointer to the ov_output_const_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_const_output",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_const_output(
+ IntPtr model,
+ ref IntPtr output_port);
+
+ ///
+ /// Get a const output port of ov_model_t by port index.
+ ///
+ /// A pointer to the ov_model_t.
+ /// input tensor index.
+ /// A pointer to the ov_output_const_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_const_output_by_index",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_const_output_by_index(
+ IntPtr model,
+ ulong index,
+ ref IntPtr output_port);
+
+ ///
+ /// Get a const output port of ov_model_t by name.
+ ///
+ /// A pointer to the ov_model_t.
+ /// input tensor name (char *).
+ /// A pointer to the ov_output_const_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_const_output_by_name",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_const_output_by_name(
+ IntPtr model,
+ ref sbyte tensor_name,
+ ref IntPtr output_port);
+
+
+ ///
+ /// Get an single output port of ov_model_t, which only support single output model.
+ ///
+ /// A pointer to the ov_model_t.
+ /// A pointer to the ov_output_const_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_output",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_output(
+ IntPtr model,
+ ref IntPtr output_port);
+
+ ///
+ /// Get an output port of ov_model_t by port index.
+ ///
+ /// A pointer to the ov_model_t.
+ /// input tensor index.
+ /// A pointer to the ov_output_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_output_by_index",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_output_by_index(
+ IntPtr model,
+ ulong index,
+ ref IntPtr output_port);
+
+ ///
+ /// Get an output port of ov_model_t by name.
+ ///
+ /// A pointer to the ov_model_t.
+ /// output tensor name (char *).
+ /// A pointer to the ov_output_port_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_output_by_name",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_output_by_name(
+ IntPtr model,
+ ref sbyte tensor_name,
+ ref IntPtr output_port);
+
+ ///
+ /// Get the input size of ov_model_t.
+ ///
+ /// A pointer to the ov_model_t.
+ /// the model's input size.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_inputs_size",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_inputs_size(IntPtr model, ref ulong input_size);
+
+ ///
+ /// Get the output size of ov_model_t.
+ ///
+ /// A pointer to the ov_model_t.
+ /// the model's output size.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_outputs_size",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_outputs_size(
+ IntPtr model,
+ ref ulong output_size);
+
+ ///
+ /// Returns true if any of the ops defined in the model is dynamic shape..
+ ///
+ /// A pointer to the ov_model_t.
+ /// true if model contains dynamic shapes
+ [DllImport(dll_extern, EntryPoint = "ov_model_is_dynamic",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static bool ov_model_is_dynamic(
+ IntPtr model);
+
+ ///
+ /// Do reshape in model with a list of (name, partial shape).
+ ///
+ /// A pointer to the ov_model_t.
+ /// The list of input tensor names.
+ /// A PartialShape list.
+ /// The item count in the list.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_reshape",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_reshape(
+ IntPtr model,
+ IntPtr[] tensor_names,
+ ref Ov.ov_partial_shape partial_shapes,
+ ulong size);
+
+
+ ///
+ /// Do reshape in model with partial shape for a specified name.
+ ///
+ /// A pointer to the ov_model_t.
+ /// The tensor name of input tensor.
+ /// A PartialShape.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_reshape_input_by_name",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_reshape_input_by_name(
+ IntPtr model,
+ ref sbyte tensor_name,
+ Ov.ov_partial_shape partial_shape);
+
+ ///
+ /// Do reshape in model for one node(port 0).
+ ///
+ /// A pointer to the ov_model_t.
+ /// A PartialShape.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_reshape_single_input",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_reshape_single_input(
+ IntPtr model,
+ Ov.ov_partial_shape partial_shape);
+
+ ///
+ /// Do reshape in model with a list of (port id, partial shape).
+ ///
+ /// A pointer to the ov_model_t.
+ /// The array of port indexes.
+ /// A PartialShape list.
+ /// The item count in the list.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_reshape_by_port_indexes",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_reshape_by_port_indexes(
+ IntPtr model,
+ ref ulong port_indexes,
+ ref Ov.ov_partial_shape partial_shapes,
+ ulong size);
+
+ ///
+ /// Do reshape in model with a list of (ov_output_port_t, partial shape).
+ ///
+ /// A pointer to the ov_model_t.
+ /// The ov_output_port_t list.
+ /// A PartialShape list.
+ /// The item count in the list.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_reshape_by_ports",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_reshape_by_ports(
+ IntPtr model,
+ ref IntPtr output_ports,
+ ref Ov.ov_partial_shape partial_shapes,
+ ulong size);
+
+ ///
+ /// Gets the friendly name for a model.
+ ///
+ /// A pointer to the ov_model_t.
+ /// the model's friendly name.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_model_get_friendly_name",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_model_get_friendly_name(
+ IntPtr model,
+ ref IntPtr friendly_name);
+
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_node.cs b/modules/csharp_api/csharp/native_methods/ov_node.cs
new file mode 100644
index 000000000..640905bac
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_node.cs
@@ -0,0 +1,89 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ public partial class NativeMethods
+ {
+ ///
+ /// Get the shape of port object.
+ ///
+ /// A pointer to ov_output_const_port_t.
+ /// tensor shape.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_const_port_get_shape",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_const_port_get_shape(
+ IntPtr port,
+ IntPtr tensor_shape);
+
+ ///
+ /// Get the shape of port object.
+ ///
+ /// A pointer to ov_output_port_t.
+ /// A pointer to the tensor name.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_const_port_get_shape",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_port_get_shape(
+ IntPtr port,
+ IntPtr tensor_shape);
+ ///
+ /// Get the tensor name of port.
+ ///
+ /// A pointer to the ov_output_const_port_t.
+ /// A pointer to the tensor name.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_port_get_any_name",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_port_get_any_name(
+ IntPtr port,
+ ref IntPtr tensor_name);
+
+ ///
+ /// Get the partial shape of port.
+ ///
+ /// A pointer to the ov_output_const_port_t.
+ /// Partial shape.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_port_get_partial_shape",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_port_get_partial_shape(
+ IntPtr port,
+ ref Ov.ov_partial_shape partial_shape);
+
+ ///
+ /// Get the tensor type of port.
+ ///
+ /// A pointer to the ov_output_const_port_t.
+ /// tensor type.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_port_get_element_type",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_port_get_element_type(
+ IntPtr port,
+ ref uint tensor_type);
+
+ ///
+ /// free port object
+ ///
+ /// The pointer to the instance of the ov_output_port_t to free.
+ [DllImport(dll_extern, EntryPoint = "ov_output_port_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_output_port_free(
+ IntPtr port);
+
+ ///
+ /// free const port
+ ///
+ /// The pointer to the instance of the ov_output_const_port_t to free.
+ [DllImport(dll_extern, EntryPoint = "ov_output_const_port_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_output_const_port_free(
+ IntPtr port);
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_partial_shape.cs b/modules/csharp_api/csharp/native_methods/ov_partial_shape.cs
new file mode 100644
index 000000000..af6cb7a05
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_partial_shape.cs
@@ -0,0 +1,117 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+
+ public partial class NativeMethods
+ {
+ ///
+ /// Initialze a partial shape with static rank and dynamic dimension.
+ ///
+ /// support static rank.
+ /// support dynamic and static dimension.
+ /// The pointer of partial shape
+ ///
+ /// Static rank, but dynamic dimensions on some or all axes.
+ /// Examples: `{1,2,?,4}` or `{?,?,?}` or `{1,2,-1,4}`
+ /// Static rank, and static dimensions on all axes.
+ /// Examples: `{ 1,2,3,4}` or `{6}` or `{}`
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_partial_shape_create",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_partial_shape_create(
+ long rank,
+ ref Ov.ov_dimension dims,
+ out Ov.ov_partial_shape partial_shape_obj);
+
+ ///
+ /// Initialze a partial shape with static rank and dynamic dimension.
+ ///
+ /// support dynamic and static rank.
+ /// support dynamic and static dimension.
+ /// The pointer of partial shape
+ ///
+ /// Dynamic rank:
+ /// Example: `?`
+ /// Static rank, but dynamic dimensions on some or all axes.
+ /// Examples: `{1,2,?,4}` or `{?,?,?}` or `{1,2,-1,4}`
+ /// Static rank, and static dimensions on all axes.
+ /// Examples: `{ 1,2,3,4}` or `{6}` or `{}`
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_partial_shape_create_dynamic",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_partial_shape_create_dynamic(
+ Ov.ov_dimension rank,
+ ref Ov.ov_dimension dims,
+ out Ov.ov_partial_shape partial_shape_obj);
+
+ ///
+ /// Initialize a partial shape with static rank and static dimension.
+ ///
+ /// support dynamic and static rank.
+ /// support dynamic and static dimension.
+ /// The pointer of partial shape
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_partial_shape_create_static",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_partial_shape_create_static(
+ long rank,
+ ref long dims,
+ out Ov.ov_partial_shape partial_shape_obj);
+
+ ///
+ /// Release internal memory allocated in partial shape.
+ ///
+ /// The object's internal memory will be released.
+ [DllImport(dll_extern, EntryPoint = "ov_partial_shape_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_partial_shape_free(ref Ov.ov_partial_shape partial_shape);
+
+ ///
+ /// Convert partial shape without dynamic data to a static shape.
+ ///
+ /// The partial_shape pointer.
+ /// The shape pointer.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_partial_shape_to_shape",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_partial_shape_to_shape(
+ Ov.ov_partial_shape partial_shape,
+ IntPtr shape);
+
+ ///
+ /// Convert shape to partial shape.
+ ///
+ /// The shape.
+ /// The partial_shape pointer.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_shape_to_partial_shape",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_shape_to_partial_shape(
+ Ov.ov_shape shape,
+ ref Ov.ov_partial_shape partial_shape);
+
+ ///
+ /// Check this partial_shape whether is dynamic
+ ///
+ /// The partial_shape.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_partial_shape_is_dynamic",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static bool ov_partial_shape_is_dynamic(Ov.ov_partial_shape partial_shape);
+
+ ///
+ /// Helper function, convert a partial shape to readable string.
+ ///
+ /// The partial_shape pointer.
+ /// A string reprensts partial_shape's content.
+ [DllImport(dll_extern, EntryPoint = "ov_partial_shape_to_string",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static string ov_partial_shape_to_string(Ov.ov_partial_shape partial_shape);
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_prepostprocess.cs b/modules/csharp_api/csharp/native_methods/ov_prepostprocess.cs
new file mode 100644
index 000000000..a44ace04a
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_prepostprocess.cs
@@ -0,0 +1,483 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ public partial class NativeMethods
+ {
+ ///
+ /// Create a ov_preprocess_prepostprocessor_t instance.
+ ///
+ /// A pointer to the ov_model_t.
+ /// A pointer to the ov_preprocess_prepostprocessor_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_prepostprocessor_create",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_prepostprocessor_create(
+ IntPtr model,
+ ref IntPtr preprocess);
+
+ ///
+ /// Release the memory allocated by ov_preprocess_prepostprocessor_t.
+ ///
+ /// A pointer to the ov_preprocess_prepostprocessor_t to free memory.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_prepostprocessor_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_preprocess_prepostprocessor_free(
+ IntPtr preprocess);
+
+ ///
+ /// Get the input info of ov_preprocess_prepostprocessor_t instance.
+ ///
+ /// A pointer to the ov_preprocess_prepostprocessor_t.
+ /// A pointer to the ov_preprocess_input_info_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_prepostprocessor_get_input_info",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_prepostprocessor_get_input_info(
+ IntPtr preprocess,
+ ref IntPtr preprocess_input_info);
+
+ ///
+ /// Get the input info of ov_preprocess_prepostprocessor_t instance by tensor name.
+ ///
+ /// A pointer to the ov_preprocess_prepostprocessor_t.
+ /// The name of input.
+ /// A pointer to the ov_preprocess_input_info_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_prepostprocessor_get_input_info_by_name",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_prepostprocessor_get_input_info_by_name(
+ IntPtr preprocess,
+ ref sbyte tensor_name,
+ ref IntPtr preprocess_input_info);
+
+ ///
+ /// Get the input info of ov_preprocess_prepostprocessor_t instance by tensor order.
+ ///
+ /// A pointer to the ov_preprocess_prepostprocessor_t.
+ /// The order of input.
+ /// A pointer to the ov_preprocess_input_info_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_prepostprocessor_get_input_info_by_index",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_prepostprocessor_get_input_info_by_index(
+ IntPtr preprocess,
+ ulong tensor_index,
+ ref IntPtr preprocess_input_info);
+
+ ///
+ /// Release the memory allocated by ov_preprocess_input_info_t.
+ ///
+ /// A pointer to the ov_preprocess_input_info_t to free memory.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_info_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_preprocess_input_info_free(
+ IntPtr preprocess_input_info);
+
+ ///
+ /// Get a ov_preprocess_input_tensor_info_t.
+ ///
+ /// A pointer to the ov_preprocess_input_info_t.
+ /// A pointer to ov_preprocess_input_tensor_info_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_info_get_tensor_info",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_info_get_tensor_info(
+ IntPtr preprocess_input_info,
+ ref IntPtr preprocess_input_tensor_info);
+
+ ///
+ /// Release the memory allocated by ov_preprocess_input_tensor_info_t.
+ ///
+ /// A pointer to the ov_preprocess_input_tensor_info_t to free memory.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_tensor_info_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_preprocess_input_tensor_info_free(
+ IntPtr preprocess_input_tensor_info);
+
+ ///
+ /// Get a ov_preprocess_preprocess_steps_t.
+ ///
+ /// A pointer to the ov_preprocess_input_info_t.
+ /// A pointer to ov_preprocess_preprocess_steps_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_info_get_preprocess_steps",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_info_get_preprocess_steps(
+ IntPtr preprocess_input_info,
+ ref IntPtr preprocess_input_steps);
+
+
+ ///
+ /// Release the memory allocated by ov_preprocess_preprocess_steps_t.
+ ///
+ /// A pointer to the ov_preprocess_preprocess_steps_t to free memory.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_preprocess_steps_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_preprocess_preprocess_steps_free(
+ IntPtr preprocess_input_process_steps);
+
+
+ ///
+ /// Add resize operation to model's dimensions.
+ ///
+ /// A pointer to ov_preprocess_preprocess_steps_t.
+ /// A ov_preprocess_resizeAlgorithm instance
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_preprocess_steps_resize",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_preprocess_steps_resize(
+ IntPtr preprocess_input_process_steps,
+ int resize_algorithm);
+
+
+ ///
+ /// Add scale preprocess operation. Divide each element of input by specified value.
+ ///
+ /// A pointer to ov_preprocess_preprocess_steps_t.
+ /// Scaling value.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_preprocess_steps_scale",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_preprocess_steps_scale(
+ IntPtr preprocess_input_process_steps,
+ float value);
+
+
+ ///
+ /// Add mean preprocess operation. Subtract specified value from each element of input.
+ ///
+ /// A pointer to ov_preprocess_preprocess_steps_t.
+ /// Value to subtract from each element.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_preprocess_steps_mean",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_preprocess_steps_mean(
+ IntPtr preprocess_input_process_steps,
+ float value);
+
+ ///
+ /// Crop input tensor between begin and end coordinates.
+ ///
+ /// A pointer to ov_preprocess_preprocess_steps_t.
+ /// Pointer to begin indexes for input tensor cropping.
+ /// Negative values represent counting elements from the end of input tensor
+ /// The size of begin array.
+ /// Pointer to end indexes for input tensor cropping.
+ /// End indexes are exclusive, which means values including end edge are not included in the output slice.
+ /// Negative values represent counting elements from the end of input tensor
+ /// The size of end array
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_preprocess_steps_crop",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_preprocess_steps_crop(
+ IntPtr preprocess_input_process_steps,
+ ref int begin,
+ int begin_size,
+ ref int end,
+ int end_size);
+
+ ///
+ /// Add 'convert layout' operation to specified layout.
+ ///
+ /// A pointer to ov_preprocess_preprocess_steps_t.
+ /// A point to ov_layout_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_preprocess_steps_convert_layout",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_preprocess_steps_convert_layout(
+ IntPtr preprocess_input_process_steps,
+ IntPtr layout);
+
+
+ ///
+ /// Reverse channels operation.
+ ///
+ /// A pointer to ov_preprocess_preprocess_steps_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_preprocess_steps_reverse_channels",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_preprocess_steps_reverse_channels(
+ IntPtr preprocess_input_process_steps);
+
+ ///
+ /// Set ov_preprocess_input_tensor_info_t precesion.
+ ///
+ /// A pointer to the ov_preprocess_input_tensor_info_t.
+ /// A point to element_type.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_tensor_info_set_element_type",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_tensor_info_set_element_type(
+ IntPtr preprocess_input_tensor_info,
+ uint element_type);
+
+ ///
+ /// Set ov_preprocess_input_tensor_info_t color format.
+ ///
+ /// A pointer to the ov_preprocess_input_tensor_info_t.
+ /// The enumerate of colorFormat
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_tensor_info_set_color_format",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_tensor_info_set_color_format(
+ IntPtr preprocess_input_tensor_info,
+ uint color_format);
+
+
+ ///
+ /// Set ov_preprocess_input_tensor_info_t color format with subname.
+ ///
+ /// A pointer to the ov_preprocess_input_tensor_info_t.
+ /// The enumerate of colorFormat
+ /// The size of sub_names.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_tensor_info_set_color_format_with_subname",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_tensor_info_set_color_format_with_subname(
+ IntPtr preprocess_input_tensor_info,
+ uint color_format,
+ ulong sub_names_size,
+ IntPtr k1);
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_tensor_info_set_color_format_with_subname",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_tensor_info_set_color_format_with_subname(
+ IntPtr preprocess_input_tensor_info,
+ uint color_format,
+ ulong sub_names_size,
+ IntPtr k1, IntPtr k2);
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_tensor_info_set_color_format_with_subname",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_tensor_info_set_color_format_with_subname(
+ IntPtr preprocess_input_tensor_info,
+ uint color_format,
+ ulong sub_names_size,
+ IntPtr k1, IntPtr k2, IntPtr k3);
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_tensor_info_set_color_format_with_subname",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_tensor_info_set_color_format_with_subname(
+ IntPtr preprocess_input_tensor_info,
+ uint color_format,
+ ulong sub_names_size,
+ IntPtr k1, IntPtr k2, IntPtr k3, IntPtr k4);
+
+ ///
+ /// Set ov_preprocess_input_tensor_info_t spatial_static_shape.
+ ///
+ /// A pointer to the ov_preprocess_input_tensor_info_t.
+ /// The height of input
+ /// The width of input
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_tensor_info_set_spatial_static_shape",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_tensor_info_set_spatial_static_shape(
+ IntPtr preprocess_input_tensor_info,
+ ulong input_height,
+ ulong input_width);
+
+
+ ///
+ /// Set ov_preprocess_input_tensor_info_t memory type.
+ ///
+ /// A pointer to the ov_preprocess_input_tensor_info_t.
+ /// Memory type. Refer to ov_remote_context.h to get memory type string info.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_tensor_info_set_memory_type",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_tensor_info_set_memory_type(
+ IntPtr preprocess_input_tensor_info,
+ ref sbyte mem_type);
+
+
+ ///
+ /// Convert ov_preprocess_preprocess_steps_t element type.
+ ///
+ /// A pointer to the ov_preprocess_preprocess_steps_t.
+ /// preprocess input element type.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_preprocess_steps_convert_element_type",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_preprocess_steps_convert_element_type(
+ IntPtr preprocess_input_process_steps,
+ uint element_type);
+
+
+ ///
+ /// onvert ov_preprocess_preprocess_steps_t color.
+ ///
+ /// A pointer to the ov_preprocess_preprocess_steps_t.
+ /// The enumerate of colorFormat.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_preprocess_steps_convert_color",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_preprocess_steps_convert_color(
+ IntPtr preprocess_input_process_steps,
+ uint color_format);
+
+
+ ///
+ /// Helper function to reuse element type and shape from user's created tensor.
+ ///
+ /// A pointer to the ov_preprocess_input_tensor_info_t.
+ /// A point to ov_tensor_t
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_tensor_info_set_from",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_tensor_info_set_from(
+ IntPtr preprocess_input_tensor_info,
+ IntPtr tensor);
+
+ ///
+ /// Set ov_preprocess_input_tensor_info_t layout.
+ ///
+ /// A pointer to the ov_preprocess_input_tensor_info_t.
+ /// A point to ov_layout_t
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_tensor_info_set_layout",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_tensor_info_set_layout(
+ IntPtr preprocess_input_tensor_info,
+ IntPtr layout);
+
+
+ ///
+ /// Get the output info of ov_preprocess_output_info_t instance.
+ ///
+ /// A pointer to the ov_preprocess_prepostprocessor_t.
+ /// A pointer to the ov_preprocess_output_info_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_prepostprocessor_get_output_info",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_prepostprocessor_get_output_info(
+ IntPtr preprocess,
+ ref IntPtr preprocess_output_info);
+
+
+ ///
+ /// Get the output info of ov_preprocess_output_info_t instance.
+ ///
+ /// A pointer to the ov_preprocess_prepostprocessor_t.
+ /// The tensor index.
+ /// A pointer to the ov_preprocess_output_info_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_prepostprocessor_get_output_info_by_index",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_prepostprocessor_get_output_info_by_index(
+ IntPtr preprocess,
+ ulong tensor_index,
+ ref IntPtr preprocess_output_info);
+
+
+ ///
+ /// Get the output info of ov_preprocess_output_info_t instance.
+ ///
+ /// A pointer to the ov_preprocess_prepostprocessor_t.
+ /// The name of input.
+ /// A pointer to the ov_preprocess_output_info_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_prepostprocessor_get_output_info_by_name",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_prepostprocessor_get_output_info_by_name(
+ IntPtr preprocess,
+ ref sbyte tensor_name,
+ ref IntPtr preprocess_output_info);
+
+
+ ///
+ /// Release the memory allocated by ov_preprocess_output_info_t.
+ ///
+ /// A pointer to the ov_preprocess_output_info_t to free memory.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_output_info_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_preprocess_output_info_free(IntPtr preprocess_output_info);
+
+
+ ///
+ /// Get a ov_preprocess_input_tensor_info_t.
+ ///
+ /// A pointer to the ov_preprocess_output_info_t.
+ /// A pointer to the ov_preprocess_output_tensor_info_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_output_info_get_tensor_info",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_output_info_get_tensor_info(
+ IntPtr preprocess_output_info,
+ ref IntPtr preprocess_output_tensor_info);
+
+ ///
+ /// Release the memory allocated by ov_preprocess_output_tensor_info_t.
+ ///
+ /// A pointer to the ov_preprocess_output_tensor_info_t to free memory.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_output_tensor_info_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_preprocess_output_tensor_info_free(
+ IntPtr preprocess_output_tensor_info);
+
+
+ ///
+ /// Set ov_preprocess_input_tensor_info_t precesion.
+ ///
+ /// A pointer to the ov_preprocess_output_tensor_info_t.
+ /// A point to element_type
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_output_set_element_type",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_output_set_element_type(
+ IntPtr preprocess_output_tensor_info,
+ uint element_type);
+
+
+ ///
+ /// Get current input model information.
+ ///
+ /// A pointer to the ov_preprocess_input_info_t.
+ /// A pointer to the ov_preprocess_input_model_info_t
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_info_get_model_info",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_info_get_model_info(
+ IntPtr preprocess_input_info,
+ ref IntPtr preprocess_input_model_info);
+
+ ///
+ /// Release the memory allocated by ov_preprocess_input_model_info_t.
+ ///
+ /// A pointer to the ov_preprocess_input_model_info_t to free memory.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_model_info_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_model_info_free(
+ IntPtr preprocess_input_model_info);
+
+ ///
+ /// Set layout for model's input tensor.
+ ///
+ /// A pointer to the ov_preprocess_input_model_info_t
+ /// A point to ov_layout_t
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_input_model_info_set_layout",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_input_model_info_set_layout(
+ IntPtr preprocess_input_model_info,
+ IntPtr layout);
+
+
+ ///
+ /// Adds pre/post-processing operations to function passed in constructor.
+ ///
+ /// A pointer to the ov_preprocess_prepostprocessor_t.
+ /// A pointer to the ov_model_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_preprocess_prepostprocessor_build",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_preprocess_prepostprocessor_build(
+ IntPtr preprocess,
+ ref IntPtr model);
+
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_rank.cs b/modules/csharp_api/csharp/native_methods/ov_rank.cs
new file mode 100644
index 000000000..5354deafc
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_rank.cs
@@ -0,0 +1,24 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+using ov_rank = OpenVinoSharp.Ov.ov_rank;
+namespace OpenVinoSharp
+{
+
+ public partial class NativeMethods
+ {
+
+ ///
+ /// Check this rank whether is dynamic
+ ///
+ /// The rank pointer that will be checked.
+ /// The return value.
+ [DllImport(dll_extern, EntryPoint = "ov_rank_is_dynamic",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static bool ov_rank_is_dynamic(ov_rank rank);
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_shape.cs b/modules/csharp_api/csharp/native_methods/ov_shape.cs
new file mode 100644
index 000000000..e414a1942
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_shape.cs
@@ -0,0 +1,37 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ public partial class NativeMethods
+ {
+ ///
+ /// Initialize a fully shape object, allocate space for its dimensions
+ /// and set its content id dims is not null.
+ ///
+ /// The rank value for this object, it should be more than 0(>0)
+ /// The dimensions data for this shape object, it's size should be equal to rank.
+ /// The input/output shape object pointer.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_shape_create",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_shape_create(
+ long rank,
+ ref long dims,
+ IntPtr shape);
+
+ ///
+ /// Free a shape object's internal memory.
+ ///
+ /// The input shape object pointer.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_shape_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_shape_free(
+ IntPtr shape);
+ }
+}
diff --git a/modules/csharp_api/csharp/native_methods/ov_tensor.cs b/modules/csharp_api/csharp/native_methods/ov_tensor.cs
new file mode 100644
index 000000000..c87450c27
--- /dev/null
+++ b/modules/csharp_api/csharp/native_methods/ov_tensor.cs
@@ -0,0 +1,121 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp
+{
+ public partial class NativeMethods
+ {
+ ///
+ /// Constructs Tensor using element type and shape. Allocate internal host storage using default allocator.
+ ///
+ /// Tensor element type.
+ /// Tensor shape.
+ /// Pointer to pre-allocated host memory.
+ /// A point to ov_tensor_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_tensor_create_from_host_ptr",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_tensor_create_from_host_ptr(
+ uint type,
+ Ov.ov_shape shape,
+ IntPtr host_ptr,
+ ref IntPtr tensor);
+
+ ///
+ /// Constructs Tensor using element type and shape. Allocate internal host storage using default allocator.
+ ///
+ /// Tensor element type
+ /// Tensor shape.
+ /// A point to ov_tensor_t.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_tensor_create",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_tensor_create(
+ uint type,
+ Ov.ov_shape shape,
+ ref IntPtr tensor);
+ ///
+ /// Set new shape for tensor, deallocate/allocate if new total size is bigger than previous one.
+ ///
+ /// A point to ov_tensor_t..
+ /// Tensor shape.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_tensor_set_shape",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_tensor_set_shape(
+ IntPtr tensor,
+ Ov.ov_shape shape);
+
+ ///
+ /// Get shape for tensor.
+ ///
+ /// A point to ov_tensor_t.
+ /// Tensor shape.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_tensor_get_shape",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_tensor_get_shape(
+ IntPtr tensor,
+ IntPtr shape);
+
+ ///
+ /// Get type for tensor.
+ ///
+ /// A point to ov_tensor_t.
+ /// Tensor element type.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_tensor_get_element_type",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_tensor_get_element_type(
+ IntPtr tensor,
+ out uint type);
+
+ ///
+ /// the total number of elements (a product of all the dims or 1 for scalar).
+ ///
+ /// A point to ov_tensor_t.
+ /// number of elements.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_tensor_get_size",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_tensor_get_size(
+ IntPtr tensor,
+ ref ulong elements_size);
+
+ ///
+ /// the size of the current Tensor in bytes.
+ ///
+ /// A point to ov_tensor_t
+ /// the size of the current Tensor in bytes.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_tensor_get_byte_size",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_tensor_get_byte_size(
+ IntPtr tensor,
+ ref ulong byte_size);
+
+ ///
+ /// Provides an access to the underlaying host memory.
+ ///
+ /// A point to ov_tensor_t
+ /// A point to host memory.
+ /// Status code of the operation: OK(0) for success.
+ [DllImport(dll_extern, EntryPoint = "ov_tensor_data",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static ExceptionStatus ov_tensor_data(
+ IntPtr tensor,
+ ref IntPtr data);
+
+ ///
+ /// Free ov_tensor_t.
+ ///
+ /// A point to ov_tensor_t
+ [DllImport(dll_extern, EntryPoint = "ov_tensor_free",
+ CharSet = CharSet.Unicode, CallingConvention = CallingConvention.Cdecl)]
+ public extern static void ov_tensor_free(IntPtr tensor);
+ }
+}
diff --git a/modules/csharp_api/csharp/ov/ov.cs b/modules/csharp_api/csharp/ov/ov.cs
new file mode 100644
index 000000000..b653d0b0e
--- /dev/null
+++ b/modules/csharp_api/csharp/ov/ov.cs
@@ -0,0 +1,50 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Runtime.InteropServices;
+
+namespace OpenVinoSharp
+{ ///
+ /// Global functions under ov namespace
+ ///
+ public static partial class Ov
+ {
+ ///
+ /// Get version of OpenVINO.
+ ///
+ /// Version of OpenVINO
+ public static Version get_openvino_version()
+ {
+ int l = Marshal.SizeOf(typeof(Version));
+ IntPtr ptr = Marshal.AllocHGlobal(l);
+ ExceptionStatus status = NativeMethods.ov_get_openvino_version(ptr);
+ if (status != 0)
+ {
+ System.Diagnostics.Debug.WriteLine("ov get_openvino_version() error!");
+ return new Version();
+ }
+ var temp = Marshal.PtrToStructure(ptr, typeof(Version));
+ Version version = (Version)temp;
+ string build = string.Copy(version.buildNumber);
+ string description = string.Copy(version.description);
+ Version new_version = new Version(build, description);
+ NativeMethods.ov_version_free(ptr);
+ return new_version;
+ }
+
+ public static byte[] content_from_file(string file)
+ {
+ FileStream fs = new FileStream(file, FileMode.Open, FileAccess.Read);
+
+ long len = fs.Seek(0, SeekOrigin.End);
+
+
+ fs.Seek(0, SeekOrigin.Begin);
+
+ byte[] data = new byte[len + 1];
+
+ fs.Read(data, 0, (int)len);
+ return data;
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/csharp/ov/ov_struct.cs b/modules/csharp_api/csharp/ov/ov_struct.cs
new file mode 100644
index 000000000..f6ade49fc
--- /dev/null
+++ b/modules/csharp_api/csharp/ov/ov_struct.cs
@@ -0,0 +1,152 @@
+using System;
+using System.Runtime.InteropServices;
+
+namespace OpenVinoSharp
+{
+ public static partial class Ov
+ {
+ ///
+ /// Reprents a static shape.
+ ///
+ public struct ov_shape
+ {
+ ///
+ /// the rank of shape
+ ///
+ public long rank;
+ ///
+ /// the dims of shape
+ ///
+ public IntPtr dims_ptr;
+ ///
+ /// Get the dims of shape
+ ///
+ /// the dims of shape
+ public long[] get_dims()
+ {
+ long[] dims = new long[rank];
+ Marshal.Copy(dims_ptr, dims, 0, (int)rank);
+ return dims;
+ }
+ }
+#pragma warning disable CS1591
+ ///
+ /// It represents a shape that may be partially or totally dynamic.
+ ///
+ ///
+ /// Dynamic rank. (Informal notation: `?`)
+ /// Static rank, but dynamic dimensions on some or all axes.
+ /// (Informal notation examples: `{1,2,?,4}`, `{?,?,?}`)
+ /// Static rank, and static dimensions on all axes.
+ /// (Informal notation examples: `{1,2,3,4}`, `{6}`, `{}`)
+ ///
+ public struct ov_partial_shape
+ {
+
+ ///
+ /// The rank
+ ///
+ public ov_dimension rank;
+ ///
+ /// The dimension
+ ///
+ public IntPtr dims;
+ }
+ ///
+ /// This is a structure interface equal to ov::Rank
+ ///
+ public struct ov_rank
+ {
+ ///
+ /// The lower inclusive limit for the Rank.
+ ///
+ public long min;
+ ///
+ /// The upper inclusive limit for the Rank.
+ ///
+ public long max;
+ };
+
+ ///
+ /// This is a structure interface equal to ov::Dimension
+ ///
+ public struct ov_dimension
+ {
+ ///
+ /// The lower inclusive limit for the dimension.
+ ///
+ public long min;
+ ///
+ /// The upper inclusive limit for the dimension.
+ ///
+ public long max;
+ };
+
+ ///
+ /// Represents basic inference profiling information per operation.
+ ///
+ ///
+ /// If the operation is executed using tiling, the sum time per each tile is indicated as the total execution time.
+ /// Due to parallel execution, the total execution time for all nodes might be greater than the total inference time.
+ ///
+ public struct ProfilingInfo
+ {
+ ///
+ /// Defines the general status of a node.
+ ///
+ public enum Status
+ {
+ ///
+ /// A node is not executed.
+ ///
+ NOT_RUN,
+ ///
+ /// A node is optimized out during graph optimization phase.
+ ///
+ OPTIMIZED_OUT,
+ ///
+ /// A node is executed.
+ ///
+ EXECUTED
+ };
+
+ public Status status;
+ ///
+ /// The absolute time, in microseconds, that the node ran (in total).
+ ///
+ public ulong real_time;
+ ///
+ /// The net host CPU time that the node ran.
+ ///
+ public ulong cpu_time;
+ ///
+ /// Name of a node.
+ ///
+ public string node_name;
+ ///
+ /// Execution type of a unit.
+ ///
+ public string exec_type;
+ ///
+ /// Node type.
+ ///
+ public string node_type;
+ };
+
+ ///
+ /// A list of profiling info data
+ ///
+ public struct ov_profiling_info_list
+ {
+ ///
+ /// The list of ProfilingInfo
+ ///
+ public IntPtr profiling_infos;
+ ///
+ /// he list size
+ ///
+ public ulong size;
+ };
+
+ }
+}
diff --git a/modules/csharp_api/csharp/preprocess/common.cs b/modules/csharp_api/csharp/preprocess/common.cs
new file mode 100644
index 000000000..67cb895bc
--- /dev/null
+++ b/modules/csharp_api/csharp/preprocess/common.cs
@@ -0,0 +1,73 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess
+{
+ ///
+ /// This enum contains enumerations for color format.
+ ///
+ public enum ColorFormat : uint
+ {
+ ///
+ /// Undefine color format
+ ///
+ UNDEFINE = 0U,
+ ///
+ /// Image in NV12 format as single tensor
+ ///
+ NV12_SINGLE_PLANE,
+ ///
+ /// Image in NV12 format represented as separate tensors for Y and UV planes.
+ ///
+ NV12_TWO_PLANES,
+ ///
+ /// Image in I420 (YUV) format as single tensor
+ ///
+ I420_SINGLE_PLANE,
+ ///
+ /// Image in I420 format represented as separate tensors for Y, U and V planes.
+ ///
+ I420_THREE_PLANES,
+ ///
+ /// Image in RGB interleaved format (3 channels)
+ ///
+ RGB,
+ ///
+ /// Image in BGR interleaved format (3 channels)
+ ///
+ BGR,
+ ///
+ /// Image in GRAY format (1 channel)
+ ///
+ GRAY,
+ ///
+ /// Image in RGBX interleaved format (4 channels)
+ ///
+ RGBX,
+ ///
+ /// Image in BGRX interleaved format (4 channels)
+ ///
+ BGRX
+ };
+ ///
+ /// This enum contains codes for all preprocess resize algorithm.
+ ///
+ public enum ResizeAlgorithm
+ {
+ ///
+ /// linear algorithm
+ ///
+ RESIZE_LINEAR,
+ ///
+ /// cubic algorithm
+ ///
+ RESIZE_CUBIC,
+ ///
+ /// nearest algorithm
+ ///
+ RESIZE_NEAREST
+ };
+}
diff --git a/modules/csharp_api/csharp/preprocess/input_info.cs b/modules/csharp_api/csharp/preprocess/input_info.cs
new file mode 100644
index 000000000..0ba19993e
--- /dev/null
+++ b/modules/csharp_api/csharp/preprocess/input_info.cs
@@ -0,0 +1,93 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess
+{
+ ///
+ /// Class holding preprocessing information for one input
+ /// From preprocessing pipeline perspective, each input can be represented as:
+ /// - User's input parameter info (InputInfo::tensor)
+ /// - Preprocessing steps applied to user's input (InputInfo::preprocess)
+ /// - Model's input info, which is a final input's info after preprocessing (InputInfo::model)
+ ///
+ public class InputInfo : IDisposable
+ {
+ ///
+ /// [private]InputInfo class pointer.
+ ///
+ public IntPtr m_ptr = IntPtr.Zero;
+
+ ///
+ /// [public]InputInfo class pointer.
+ ///
+ public IntPtr Ptr { get { return m_ptr; } set { m_ptr = value; } }
+
+ ///
+ /// Default construction through InputInfo pointer.
+ ///
+ /// InputInfo pointer.
+ public InputInfo(IntPtr ptr)
+ {
+ if (ptr == IntPtr.Zero)
+ {
+ throw new OVException(ExceptionStatus.GENERAL_ERROR, "The ptr is null!");
+ }
+ this.m_ptr = ptr;
+ }
+ ///
+ /// Default destructor
+ ///
+ ~InputInfo() { Dispose(); }
+ ///
+ /// Release unmanaged resources.
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_preprocess_input_info_free(m_ptr);
+ m_ptr = IntPtr.Zero;
+ }
+
+ ///
+ /// Get current input tensor information with ability to change specific data
+ ///
+ /// Reference to current input tensor structure
+ public InputTensorInfo tensor()
+ {
+ IntPtr input_tensor_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_preprocess_input_info_get_tensor_info(m_ptr, ref input_tensor_ptr));
+ return new InputTensorInfo(input_tensor_ptr);
+ }
+
+ ///
+ /// Get current input preprocess information with ability to add more preprocessing steps
+ ///
+ /// Reference to current preprocess steps structure.
+ public PreProcessSteps preprocess()
+ {
+ IntPtr preprocess_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_preprocess_input_info_get_preprocess_steps(m_ptr, ref preprocess_ptr));
+ return new PreProcessSteps(preprocess_ptr);
+ }
+
+ ///
+ /// Get current input model information with ability to change original model's input data
+ ///
+ /// Reference to current model's input information structure.
+ public InputModelInfo model()
+ {
+ IntPtr model_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_preprocess_input_info_get_model_info(m_ptr, ref model_ptr));
+ return new InputModelInfo(model_ptr);
+ }
+ };
+}
diff --git a/modules/csharp_api/csharp/preprocess/input_model_info.cs b/modules/csharp_api/csharp/preprocess/input_model_info.cs
new file mode 100644
index 000000000..d137783e7
--- /dev/null
+++ b/modules/csharp_api/csharp/preprocess/input_model_info.cs
@@ -0,0 +1,73 @@
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess
+{
+ ///
+ /// Information about model's input tensor. If all information is already included to loaded model, this info
+ /// may not be needed. However it can be set to specify additional information about model, like 'layout'.
+ ///
+ ///
+ /// Example of usage of model 'layout':
+ /// Support model has input parameter with shape {1, 3, 224, 224} and user needs to resize input image to model's
+ /// dimensions. It can be done like this
+ ///
+ public class InputModelInfo : IDisposable
+ {
+ ///
+ /// [private]InputModelInfo class pointer.
+ ///
+ public IntPtr m_ptr = IntPtr.Zero;
+
+ ///
+ /// [public]InputModelInfo class pointer.
+ ///
+ public IntPtr Ptr { get { return m_ptr; } set { m_ptr = value; } }
+
+ ///
+ /// Default construction through InputModelInfo pointer.
+ ///
+ /// InputModelInfo pointer.
+ public InputModelInfo(IntPtr ptr)
+ {
+ if (ptr == IntPtr.Zero)
+ {
+ HandleException.handler(ExceptionStatus.PTR_NULL);
+ return;
+ }
+ this.m_ptr = ptr;
+ }
+ ///
+ /// Default destructor
+ ///
+ ~InputModelInfo() { Dispose(); }
+ ///
+ /// Release unmanaged resources
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_preprocess_input_model_info_free(m_ptr);
+ m_ptr = IntPtr.Zero;
+ }
+
+ ///
+ /// Set layout for model's input tensor. This version allows chaining for Lvalue objects
+ ///
+ /// Layout for model's input tensor.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner
+ public InputModelInfo set_layout(Layout layout)
+ {
+ HandleException.handler(
+ NativeMethods.ov_preprocess_input_model_info_set_layout(m_ptr, layout.Ptr));
+ return this;
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/preprocess/input_tensor_info.cs b/modules/csharp_api/csharp/preprocess/input_tensor_info.cs
new file mode 100644
index 000000000..6246f9aef
--- /dev/null
+++ b/modules/csharp_api/csharp/preprocess/input_tensor_info.cs
@@ -0,0 +1,174 @@
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess
+{
+ ///
+ /// Information about user's input tensor. By default, it will be initialized to same data (type/shape/etc) as
+ /// model's input parameter. User application can override particular parameters (like 'element_type') according to
+ /// application's data and specify appropriate conversions in pre-processing steps
+ ///
+ public class InputTensorInfo : IDisposable
+ {
+ ///
+ /// [private]InputTensorInfo class pointer.
+ ///
+ public IntPtr m_ptr = IntPtr.Zero;
+
+ ///
+ /// [public]InputTensorInfo class pointer.
+ ///
+ public IntPtr Ptr { get { return m_ptr; } set { m_ptr = value; } }
+
+ ///
+ /// Default construction through InputTensorInfo pointer.
+ ///
+ /// InputTensorInfo pointer.
+ public InputTensorInfo(IntPtr ptr)
+ {
+ if (ptr == IntPtr.Zero)
+ {
+ HandleException.handler(ExceptionStatus.PTR_NULL);
+ return;
+ }
+ this.m_ptr = ptr;
+ }
+ ///
+ /// Default destructor
+ ///
+ ~InputTensorInfo() { Dispose(); }
+ ///
+ /// Release unmanaged resources
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_preprocess_input_tensor_info_free(m_ptr);
+
+ m_ptr = IntPtr.Zero;
+ }
+ ///
+ /// Set color format for user's input tensor.
+ ///
+ ///
+ /// In general way, some formats support multi-plane input, e.g. NV12 image can be represented as 2 separate tensors
+ /// (planes): Y plane and UV plane. set_color_format API also allows to set sub_names for such parameters for
+ /// convenient usage of plane parameters. During build stage, new parameters for each plane will be inserted to the
+ /// place of original parameter. This means that all parameters located after will shift their positions accordingly
+ /// (e.g. {param1, param2} will become {param1/Y, param1/UV, param2})
+ ///
+ /// Color format of input image.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public InputTensorInfo set_color_format(ColorFormat format, params string[] properties)
+ {
+ IntPtr[] p = new IntPtr[properties.Length];
+ for (int i = 0; i < properties.Length; ++i)
+ {
+ p[i] = Marshal.StringToHGlobalAnsi(properties[i]);
+ }
+ switch (p.Length)
+ {
+ case 0:
+ HandleException.handler(NativeMethods.ov_preprocess_input_tensor_info_set_color_format(m_ptr, (uint)format));
+ break;
+ case 1:
+ NativeMethods.ov_preprocess_input_tensor_info_set_color_format_with_subname(m_ptr, (uint)format, (ulong)properties.Length, p[0]);
+ break;
+ case 2:
+ NativeMethods.ov_preprocess_input_tensor_info_set_color_format_with_subname(m_ptr, (uint)format, (ulong)properties.Length, p[0], p[1]);
+ break;
+ case 3:
+ NativeMethods.ov_preprocess_input_tensor_info_set_color_format_with_subname(m_ptr, (uint)format, (ulong)properties.Length, p[0], p[1], p[2]);
+ break;
+ case 4:
+ NativeMethods.ov_preprocess_input_tensor_info_set_color_format_with_subname(m_ptr, (uint)format, (ulong)properties.Length, p[0], p[1], p[2], p[3]);
+ break;
+ default:
+ throw new ArgumentOutOfRangeException("Properties count > 4 not supported");
+
+ }
+ return this;
+ }
+
+ ///
+ /// Set element type for user's input tensor
+ ///
+ /// Element type for user's input tensor.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public InputTensorInfo set_element_type(OvType type)
+ {
+ HandleException.handler(
+ NativeMethods.ov_preprocess_input_tensor_info_set_element_type(m_ptr, (uint)type.get_type()));
+ return this;
+ }
+
+ ///
+ /// By default, input image shape is inherited from model input shape. Use this method to specify different
+ /// width and height of user's input image. In case if input image size is not known, use
+ /// `set_spatial_dynamic_shape` method.
+ ///
+ /// Set fixed user's input image height.
+ /// Set fixed user's input image width.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public InputTensorInfo set_spatial_static_shape(ulong input_height, ulong input_width)
+ {
+ HandleException.handler(
+ NativeMethods.ov_preprocess_input_tensor_info_set_spatial_static_shape(m_ptr, input_height, input_width));
+ return this;
+ }
+
+ ///
+ /// Set memory type runtime information for user's input tensor
+ ///
+ /// Memory type. Refer to specific plugin's documentation for exact string format
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public InputTensorInfo set_memory_type(string memory_type)
+ {
+ sbyte[] c_mem_type = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(memory_type));
+ HandleException.handler(
+ NativeMethods.ov_preprocess_input_tensor_info_set_memory_type(m_ptr, ref c_mem_type[0]));
+ return this;
+ }
+
+ ///
+ /// Set layout for user's input tensor
+ ///
+ /// Layout for user's input tensor.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public InputTensorInfo set_layout(Layout layout)
+ {
+ HandleException.handler(
+ NativeMethods.ov_preprocess_input_tensor_info_set_layout(m_ptr, layout.Ptr));
+ return this;
+ }
+
+
+ ///
+ /// Helper function to reuse element type and shape from user's created tensor. Use this only in case if
+ /// input tensor is already known and available before. Overwrites previously set element type & shape via
+ /// `set_element_type` and `set_shape`. Tensor's memory type is not reused, so if `runtime_tensor` represents remote
+ /// tensor with particular memory type - you should still specify appropriate memory type manually using
+ /// `set_memory_type`
+ ///
+ ///
+ /// As for `InputTensorInfo::set_shape`, this method shall not be used together with methods
+ /// 'set_spatial_dynamic_shape' and 'set_spatial_static_shape', otherwise ov::AssertFailure exception will be thrown
+ ///
+ /// User's created tensor.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public InputTensorInfo set_from(Tensor runtime_tensor)
+ {
+ HandleException.handler(
+ NativeMethods.ov_preprocess_input_tensor_info_set_from(m_ptr, runtime_tensor.Ptr));
+ return this;
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/preprocess/output_info.cs b/modules/csharp_api/csharp/preprocess/output_info.cs
new file mode 100644
index 000000000..c9bb4b1d3
--- /dev/null
+++ b/modules/csharp_api/csharp/preprocess/output_info.cs
@@ -0,0 +1,70 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess
+{
+ ///
+ /// Class holding postprocessing information for one output
+ /// From postprocessing pipeline perspective, each output can be represented as:
+ /// - Model's output info, (OutputInfo::model)
+ /// - Postprocessing steps applied to user's input (OutputInfo::postprocess)
+ /// - User's desired output parameter information, which is a final one after preprocessing (OutputInfo::tensor)
+ ///
+ public class OutputInfo : IDisposable
+ {
+ ///
+ /// [private]OutputInfo class pointer.
+ ///
+ public IntPtr m_ptr = IntPtr.Zero;
+
+ ///
+ /// [public]OutputInfo class pointer.
+ ///
+ public IntPtr Ptr { get { return m_ptr; } set { m_ptr = value; } }
+
+ ///
+ /// Default construction through OutputInfo pointer.
+ ///
+ /// OutputInfo pointer.
+ public OutputInfo(IntPtr ptr)
+ {
+ if (ptr == IntPtr.Zero)
+ {
+ HandleException.handler(ExceptionStatus.PTR_NULL);
+ return;
+ }
+ this.m_ptr = ptr;
+ }
+ ///
+ /// Default destructor
+ ///
+ ~OutputInfo() { Dispose(); }
+ ///
+ /// Release unmanaged resources
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_preprocess_output_info_free(m_ptr);
+ m_ptr = IntPtr.Zero;
+ }
+
+ ///
+ /// Get current output tensor information with ability to change specific data
+ ///
+ /// Reference to current output tensor structure
+ public OutputTensorInfo tensor()
+ {
+ IntPtr output_tensor_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_preprocess_output_info_get_tensor_info(m_ptr, ref output_tensor_ptr));
+ return new OutputTensorInfo(output_tensor_ptr);
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/preprocess/output_tensor_info.cs b/modules/csharp_api/csharp/preprocess/output_tensor_info.cs
new file mode 100644
index 000000000..c49a5e10f
--- /dev/null
+++ b/modules/csharp_api/csharp/preprocess/output_tensor_info.cs
@@ -0,0 +1,68 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess
+{
+ ///
+ /// Information about user's desired output tensor. By default, it will be initialized to same data
+ /// (type/shape/etc) as model's output parameter. User application can override particular parameters (like
+ /// 'element_type') according to application's data and specify appropriate conversions in post-processing steps
+ ///
+ public class OutputTensorInfo : IDisposable
+ {
+ ///
+ /// [private]OutputTensorInfo class pointer.
+ ///
+ public IntPtr m_ptr = IntPtr.Zero;
+
+ ///
+ /// [public]OutputTensorInfo class pointer.
+ ///
+ public IntPtr Ptr { get { return m_ptr; } set { m_ptr = value; } }
+
+ ///
+ /// Default construction through OutputTensorInfo pointer.
+ ///
+ /// OutputTensorInfo pointer.
+ public OutputTensorInfo(IntPtr ptr)
+ {
+ if (ptr == IntPtr.Zero)
+ {
+ HandleException.handler(ExceptionStatus.PTR_NULL);
+ return;
+ }
+ this.m_ptr = ptr;
+ }
+ ///
+ /// Default destructor
+ ///
+ ~OutputTensorInfo() { Dispose(); }
+ ///
+ /// Release unmanaged resources
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_preprocess_output_tensor_info_free(m_ptr);
+ m_ptr = IntPtr.Zero;
+ }
+
+ ///
+ /// Set element type for user's desired output tensor.
+ ///
+ /// Element type for user's output tensor.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public OutputTensorInfo set_element_type(OvType type)
+ {
+ HandleException.handler(
+ NativeMethods.ov_preprocess_output_set_element_type(m_ptr, (uint)type.get_type()));
+ return this;
+ }
+ }
+}
diff --git a/modules/csharp_api/csharp/preprocess/prepost_processor.cs b/modules/csharp_api/csharp/preprocess/prepost_processor.cs
new file mode 100644
index 000000000..30c1ef2dc
--- /dev/null
+++ b/modules/csharp_api/csharp/preprocess/prepost_processor.cs
@@ -0,0 +1,153 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Reflection;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess
+{
+
+ ///
+ /// Main class for adding pre- and post- processing steps to existing ov::Model
+ ///
+ ///
+ /// This is a helper class for writing easy pre- and post- processing operations on ov::Model object assuming that
+ /// any preprocess operation takes one input and produces one output.
+ ///
+ /// For advanced preprocessing scenarios, like combining several functions with multiple inputs/outputs into one,
+ /// client's code can use transformation passes over ov::Model
+ ///
+ public class PrePostProcessor : IDisposable
+ {
+ ///
+ /// [private]PrePostProcessor class pointer.
+ ///
+ public IntPtr m_ptr = IntPtr.Zero;
+
+ ///
+ /// [public]PrePostProcessor class pointer.
+ ///
+ public IntPtr Ptr { get { return m_ptr; } set { m_ptr = value; } }
+
+ ///
+ /// Default construction through Model.
+ ///
+ /// model.
+ public PrePostProcessor(Model model)
+ {
+ HandleException.handler(
+ NativeMethods.ov_preprocess_prepostprocessor_create(model.Ptr, ref m_ptr));
+ }
+ ///
+ /// Default destructor
+ ///
+ ~PrePostProcessor() { Dispose(); }
+ ///
+ /// Release unmanaged resources
+ ///
+ public void Dispose() {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_preprocess_prepostprocessor_free(m_ptr);
+ m_ptr = IntPtr.Zero;
+ }
+
+ ///
+ /// Gets input pre-processing data structure. Should be used only if model/function has only one input
+ /// Using returned structure application's code is able to set user's tensor data (e.g layout), preprocess steps,
+ /// target model's data
+ ///
+ /// Reference to model's input information structure
+ public InputInfo input()
+ {
+ IntPtr input_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_preprocess_prepostprocessor_get_input_info(m_ptr, ref input_ptr));
+ return new InputInfo(input_ptr);
+ }
+
+ ///
+ /// Gets input pre-processing data structure for input identified by it's tensor name
+ ///
+ /// Tensor name of specific input. Throws if tensor name is not associated with any input in a model
+ /// Reference to model's input information structure
+ public InputInfo input(string tensor_name)
+ {
+ IntPtr input_ptr = IntPtr.Zero;
+ sbyte[] c_tensor_name = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(tensor_name));
+ HandleException.handler(
+ NativeMethods.ov_preprocess_prepostprocessor_get_input_info_by_name(m_ptr, ref c_tensor_name[0], ref input_ptr));
+ return new InputInfo(input_ptr);
+ }
+ ///
+ /// Gets input pre-processing data structure for input identified by it's order in a model
+ ///
+ /// Input index of specific input. Throws if input index is out of range for associated function.
+ /// Reference to model's input information structure
+ public InputInfo input(ulong tensor_index)
+ {
+ IntPtr input_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_preprocess_prepostprocessor_get_input_info_by_index(m_ptr, tensor_index, ref input_ptr));
+ return new InputInfo(input_ptr);
+ }
+
+ ///
+ /// Gets output post-processing data structure. Should be used only if model/function has only one output
+ /// Using returned structure application's code is able to set model's output data, post-process steps, user's
+ /// tensor data (e.g layout)
+ ///
+ /// Reference to model's output information structure
+ public OutputInfo output()
+ {
+ IntPtr input_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_preprocess_prepostprocessor_get_output_info(m_ptr, ref input_ptr));
+ return new OutputInfo(input_ptr);
+ }
+
+ ///
+ /// Gets output post-processing data structure for output identified by it's tensor name
+ ///
+ /// Tensor name of specific output. Throws if tensor name is not associated with any input in a model
+ /// Reference to model's output information structure
+ public OutputInfo output(string tensor_name)
+ {
+ IntPtr input_ptr = IntPtr.Zero;
+ sbyte[] c_tensor_name = (sbyte[])((Array)System.Text.Encoding.Default.GetBytes(tensor_name));
+ HandleException.handler(
+ NativeMethods.ov_preprocess_prepostprocessor_get_output_info_by_name(m_ptr, ref c_tensor_name[0], ref input_ptr));
+ return new OutputInfo(input_ptr);
+ }
+
+ ///
+ /// Gets output post-processing data structure for output identified by it's order in a model
+ ///
+ /// utput index of specific output. Throws if output index is out of range for associated function
+ /// Reference to model's output information structure
+ public OutputInfo output(ulong tensor_index)
+ {
+ IntPtr input_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_preprocess_prepostprocessor_get_output_info_by_index(m_ptr, tensor_index, ref input_ptr));
+ return new OutputInfo(input_ptr);
+ }
+
+ ///
+ /// Adds pre/post-processing operations to function passed in constructor
+ ///
+ /// Function with added pre/post-processing operations
+ public Model build()
+ {
+ IntPtr model_ptr = IntPtr.Zero;
+ HandleException.handler(
+ NativeMethods.ov_preprocess_prepostprocessor_build(m_ptr, ref model_ptr));
+ return new Model(model_ptr);
+ }
+ }
+
+
+}
diff --git a/modules/csharp_api/csharp/preprocess/preprocess_steps.cs b/modules/csharp_api/csharp/preprocess/preprocess_steps.cs
new file mode 100644
index 000000000..e820ee0e1
--- /dev/null
+++ b/modules/csharp_api/csharp/preprocess/preprocess_steps.cs
@@ -0,0 +1,206 @@
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess
+{
+ ///
+ /// Preprocessing steps. Each step typically intends adding of some operation to input parameter
+ /// User application can specify sequence of preprocessing steps in a builder-like manner
+ ///
+ public class PreProcessSteps : IDisposable
+ {
+ ///
+ /// [private]PreProcessSteps class pointer.
+ ///
+ public IntPtr m_ptr = IntPtr.Zero;
+
+ ///
+ /// [public]PreProcessSteps class pointer.
+ ///
+ public IntPtr Ptr { get { return m_ptr; } set { m_ptr = value; } }
+
+ ///
+ /// Default construction through PreProcessSteps pointer.
+ ///
+ /// PreProcessSteps pointer.
+ public PreProcessSteps(IntPtr ptr)
+ {
+ if (ptr == IntPtr.Zero)
+ {
+ HandleException.handler(ExceptionStatus.PTR_NULL);
+ return;
+ }
+ this.m_ptr = ptr;
+ }
+ ///
+ /// Default destructor
+ ///
+ ~PreProcessSteps() { Dispose(); }
+ ///
+ /// Release unmanaged resources
+ ///
+ public void Dispose()
+ {
+ if (m_ptr == IntPtr.Zero)
+ {
+ return;
+ }
+ NativeMethods.ov_preprocess_preprocess_steps_free(m_ptr);
+
+ m_ptr = IntPtr.Zero;
+ }
+
+ ///
+ /// Add resize operation to model's dimensions.
+ ///
+ /// esize algorithm.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public PreProcessSteps resize(ResizeAlgorithm resize)
+ {
+ HandleException.handler(NativeMethods.ov_preprocess_preprocess_steps_resize(
+ m_ptr, (int)resize));
+
+ return this;
+ }
+
+
+ ///
+ /// Add scale preprocess operation. Divide each element of input by specified value.
+ ///
+ /// Scaling value.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public PreProcessSteps scale(float value)
+ {
+ HandleException.handler(NativeMethods.ov_preprocess_preprocess_steps_scale(
+ m_ptr, value));
+
+ return this;
+ }
+
+ ///
+ /// Add mean preprocess operation. Subtract specified value from each element of input.
+ ///
+ /// Value to subtract from each element.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public PreProcessSteps mean(float value)
+ {
+ HandleException.handler(NativeMethods.ov_preprocess_preprocess_steps_mean(
+ m_ptr, value));
+ return this;
+ }
+
+ ///
+ /// Crop input tensor between begin and end coordinates. Under the hood, inserts `opset8::Slice` operation to
+ /// execution graph. It is recommended to use to together with `ov::preprocess::InputTensorInfo::set_shape` to set
+ /// original input shape before cropping
+ ///
+ /// Begin indexes for input tensor cropping. Negative values represent counting elements from the end
+ /// of input tensor
+ /// End indexes for input tensor cropping. End indexes are exclusive, which means values including end
+ /// edge are not included in the output slice. Negative values represent counting elements from the end of input tensor
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public PreProcessSteps crop(int[] begin, int[] end)
+ {
+ HandleException.handler(NativeMethods.ov_preprocess_preprocess_steps_crop(
+ m_ptr, ref begin[0], begin.Length, ref end[0], end.Length));
+ return this;
+ }
+ ///
+ /// Crop input tensor between begin and end coordinates. Under the hood, inserts `opset8::Slice` operation to
+ /// execution graph. It is recommended to use to together with `ov::preprocess::InputTensorInfo::set_shape` to set
+ /// original input shape before cropping
+ ///
+ /// Begin indexes for input tensor cropping. Negative values represent counting elements from the end
+ /// of input tensor
+ /// End indexes for input tensor cropping. End indexes are exclusive, which means values including end
+ /// edge are not included in the output slice. Negative values represent counting elements from the end of input
+ /// tensor
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public PreProcessSteps crop(List begin, List end)
+ {
+ HandleException.handler(NativeMethods.ov_preprocess_preprocess_steps_crop(
+ m_ptr, ref begin.ToArray()[0], begin.Count, ref end.ToArray()[0], end.Count));
+ return this;
+ }
+
+ ///
+ /// Add 'convert layout' operation to specified layout.
+ ///
+ /// New layout after conversion. If not specified - destination layout is obtained from
+ /// appropriate model input properties.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ ///
+ /// Adds appropriate 'transpose' operation between user layout and target layout.
+ /// Current implementation requires source and destination layout to have same number of dimensions
+ ///
+ ///
+ /// when user data has 'NHWC' layout (example is RGB image, [1, 224, 224, 3]) but model expects
+ /// planar input image ('NCHW', [1, 3, 224, 224]). Preprocessing may look like this:
+ ///
+ /// var proc = PrePostProcessor(model);
+ /// proc.input().tensor().set_layout("NHWC"); // User data is NHWC
+ /// proc.input().preprocess().convert_layout("NCHW")) // model expects input as NCHW
+ ///
+ ///
+ public PreProcessSteps convert_layout(Layout layout)
+ {
+ HandleException.handler(NativeMethods.ov_preprocess_preprocess_steps_convert_layout(
+ m_ptr, layout.Ptr));
+
+ return this;
+ }
+
+ ///
+ /// Reverse channels operation.
+ ///
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ ///
+ /// Adds appropriate operation which reverses channels layout. Operation requires layout having 'C'
+ /// dimension Operation convert_color (RGB-BGR) does reversing of channels also, but only for NHWC layout
+ ///
+ ///
+ /// when user data has 'NCHW' layout (example is [1, 3, 224, 224] RGB order) but model expects
+ /// BGR planes order. Preprocessing may look like this:
+ ///
+ /// var proc = PrePostProcessor(function);
+ /// proc.input().preprocess().convert_layout({0, 3, 1, 2});
+ ///
+ ///
+ public PreProcessSteps reverse_channels()
+ {
+ HandleException.handler(NativeMethods.ov_preprocess_preprocess_steps_reverse_channels(
+ m_ptr));
+ return this;
+ }
+
+ ///
+ /// Converts color format for user's input tensor. Requires source color format to be specified by
+ /// nputTensorInfo::set_color_format.
+ ///
+ /// Destination color format of input image.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public PreProcessSteps convert_color(ColorFormat format)
+ {
+ HandleException.handler(NativeMethods.ov_preprocess_preprocess_steps_convert_color(
+ m_ptr, (uint)format));
+ return this;
+ }
+
+ ///
+ /// Add convert element type preprocess operation.
+ ///
+ /// Desired type of input.
+ /// Reference to 'this' to allow chaining with other calls in a builder-like manner.
+ public PreProcessSteps convert_element_type(OvType type)
+ {
+ HandleException.handler(NativeMethods.ov_preprocess_preprocess_steps_convert_element_type(
+ m_ptr, (uint)type.get_type()));
+
+ return this;
+ }
+ }
+}
diff --git a/modules/csharp_api/demos/yolov8/Program.cs b/modules/csharp_api/demos/yolov8/Program.cs
new file mode 100644
index 000000000..837608cee
--- /dev/null
+++ b/modules/csharp_api/demos/yolov8/Program.cs
@@ -0,0 +1,200 @@
+using OpenCvSharp;
+using OpenCvSharp.Dnn;
+using OpenVinoSharp;
+using OpenVinoSharp.model.Yolov8;
+using System.Runtime.InteropServices;
+
+namespace yolov8
+{
+ internal class Program
+ {
+ static void Main(string[] args)
+ {
+ // -------- Get OpenVINO runtime version --------
+
+ OpenVinoSharp.Version version = Ov.get_openvino_version();
+
+ Console.WriteLine("---- OpenVINO INFO----");
+ Console.WriteLine("Description : {0}", version.description);
+ Console.WriteLine("Build number: {0}", version.buildNumber);
+
+ if (args.Length < 2)
+ {
+ Console.WriteLine("Please enter the complete command parameters: <>model_path> ");
+ }
+ string device_name = "AUTO";
+ if (args.Length > 3)
+ {
+ device_name = args[3];
+ Console.WriteLine("Set inference device {0}.", args[3]);
+ }
+ else
+ {
+ Console.WriteLine("No inference device specified, default device set to AUTO.");
+ }
+ string lable = String.Empty;
+ if (args.Length > 4)
+ {
+ lable = args[4];
+ }
+
+ if (args[0] == "det" || args[0] == "seg"|| args[0] == "pose"|| args[0] == "cls")
+ {
+ yolov8_infer(args[0], args[1], args[2], device_name, lable);
+ }
+ else
+ {
+ Console.WriteLine("Please specify the model prediction type, such as 'det'、'seg'、'pose'、'cls'");
+ }
+
+ }
+
+ static void yolov8_infer(string flg, string model_path, string image_path, string device, string classer_path)
+ {
+ // -------- Step 1. Initialize OpenVINO Runtime Core --------
+ Core core = new Core();
+ // -------- Step 2. Read a model --------
+ Console.WriteLine("[INFO] Loading model files: {0}", model_path);
+ Model model = core.read_model(model_path);
+ print_model_info(model);
+
+ // -------- Step 3. Loading a model to the device --------
+ CompiledModel compiled_model = core.compile_model(model, device);
+
+ // -------- Step 4. Create an infer request --------
+ InferRequest infer_request = compiled_model.create_infer_request();
+ // -------- Step 5. Process input images --------
+ Console.WriteLine("[INFO] Read image files: {0}", image_path);
+ Mat image = new Mat(image_path); // Read image by opencvsharp
+ int max_image_length = image.Cols > image.Rows ? image.Cols : image.Rows;
+ Mat max_image = Mat.Zeros(new OpenCvSharp.Size(max_image_length, max_image_length), MatType.CV_8UC3);
+ Rect roi = new Rect(0, 0, image.Cols, image.Rows);
+ image.CopyTo(new Mat(max_image, roi));
+ float[] factors = new float[4];
+ factors[0] = factors[1] = (float)(max_image_length / 640.0);
+ factors[2] = image.Rows;
+ factors[3] = image.Cols;
+
+ // -------- Step 6. Set up input --------
+ Tensor input_tensor = infer_request.get_input_tensor();
+ Shape input_shape = input_tensor.get_shape();
+ Mat input_mat = CvDnn.BlobFromImage(max_image, 1.0 / 255.0, new Size(input_shape[2], input_shape[3]), 0, true, false);
+ float[] input_data = new float[input_shape[1] * input_shape[2] * input_shape[3]];
+ Marshal.Copy(input_mat.Ptr(0), input_data, 0, input_data.Length);
+ input_tensor.set_data(input_data);
+
+
+ // -------- Step 7. Do inference synchronously --------
+
+ infer_request.infer();
+
+ // time test
+ //DateTime start = DateTime.Now;
+ //for (int i = 0; i < 10; ++i)
+ //{
+ // infer_request.infer();
+ //}
+ //DateTime end = DateTime.Now;
+ //TimeSpan ts = end.Subtract(start);
+ //Console.WriteLine("[INFO] infer time: {0}", ts.TotalMilliseconds / 10);
+
+
+ // -------- Step 9. Process output --------
+ Console.WriteLine();
+ if (flg == "det")
+ {
+ Tensor output_tensor = infer_request.get_output_tensor();
+ int output_length = (int)output_tensor.get_size();
+ float[] output_data = output_tensor.get_data(output_length);
+
+ ResultProcess process = new ResultProcess(factors, 80);
+ Result result = process.process_det_result(output_data);
+ process.read_class_names(classer_path);
+
+ process.print_result(result);
+
+ if (classer_path != String.Empty)
+ {
+ process.read_class_names(classer_path);
+ Mat result_image = process.draw_det_result(result, image);
+ Cv2.ImShow("result", result_image);
+ Cv2.WaitKey(0);
+ }
+
+ }
+ else if (flg == "seg")
+ {
+ Tensor output_tensor_det = infer_request.get_tensor("output0");
+ int output_length_det = (int)output_tensor_det.get_size();
+ float[] output_data_det = output_tensor_det.get_data(output_length_det);
+
+ Tensor output_tensor_pro = infer_request.get_tensor("output1");
+ int output_length_pro = (int)output_tensor_pro.get_size();
+ float[] output_data_pro = output_tensor_pro.get_data(output_length_pro);
+
+ ResultProcess process = new ResultProcess(factors, 80);
+ Result result = process.process_seg_result(output_data_det, output_data_pro);
+
+ process.print_result(result);
+
+ if (classer_path != String.Empty)
+ {
+ process.read_class_names(classer_path);
+ Mat result_image = process.draw_seg_result(result, image);
+ Cv2.ImShow("result", result_image);
+ Cv2.WaitKey(0);
+ }
+
+ }
+ else if (flg == "pose")
+ {
+ Tensor output_tensor = infer_request.get_output_tensor();
+ int output_length = (int)output_tensor.get_size();
+ float[] output_data = output_tensor.get_data(output_length);
+
+ ResultProcess process = new ResultProcess(factors, 80);
+ Result result = process.process_pose_result(output_data);
+
+
+ Mat result_image = process.draw_pose_result(result, image, 0.2);
+ process.print_result(result);
+ Cv2.ImShow("result", result_image);
+ Cv2.WaitKey(0);
+ }
+ else if (flg == "cls")
+ {
+ Tensor output_tensor = infer_request.get_output_tensor();
+ int output_length = (int)output_tensor.get_size();
+ float[] output_data = output_tensor.get_data(output_length);
+
+ ResultProcess process = new ResultProcess(factors, 80);
+ KeyValuePair[] result = process.process_cls_result(output_data);
+
+ process.print_result(result);
+
+ }
+ }
+
+ ///
+ /// Output relevant information of the model
+ ///
+ /// Model class
+ static void print_model_info(Model model)
+ {
+ Console.WriteLine("[INFO] model name: {0}", model.get_friendly_name());
+
+ Node input_node = model.get_const_input(0);
+ Console.WriteLine("[INFO] inputs:");
+ Console.WriteLine("[INFO] input name: {0}", input_node.get_name());
+ Console.WriteLine("[INFO] input type: {0}", input_node.get_type().to_string());
+ Console.WriteLine("[INFO] input shape: {0}", input_node.get_shape().to_string());
+ input_node.dispose();
+ Node output_node = model.get_const_output(0);
+ Console.WriteLine("[INFO] outputs:");
+ Console.WriteLine("[INFO] output name: {0}", output_node.get_name());
+ Console.WriteLine("[INFO] output type: {0}", output_node.get_type().to_string());
+ Console.WriteLine("[INFO] output shape: {0}", output_node.get_shape().to_string());
+ output_node.dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/demos/yolov8/Properties/launchSettings.json b/modules/csharp_api/demos/yolov8/Properties/launchSettings.json
new file mode 100644
index 000000000..60d9dd268
--- /dev/null
+++ b/modules/csharp_api/demos/yolov8/Properties/launchSettings.json
@@ -0,0 +1,11 @@
+{
+ "profiles": {
+ "yolov8": {
+ "commandName": "Project",
+ "commandLineArgs": "det ./../../../../../model/yolov8/yolov8s.xml ./../../../../../dataset/image/demo_2.jpg CPU ./../../../../../dataset/lable/COCO_lable.txt"
+ //"commandLineArgs": "cls ./../../../../../model/yolov8/yolov8s-cls.xml ./../../../../../dataset/image/demo_7.jpg CPU "
+ //"commandLineArgs": "pose ./../../../../../model/yolov8/yolov8s-pose.xml ./../../../../../dataset/image/demo_9.jpg CPU ",
+ //"commandLineArgs": "seg ./../../../../../model/yolov8\\yolov8s-seg.xml ./../../../../../dataset/image/demo_2.jpg CPU ./../../../../../dataset/lable/COCO_lable.txt"
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/demos/yolov8/README.md b/modules/csharp_api/demos/yolov8/README.md
new file mode 100644
index 000000000..d6c8d8761
--- /dev/null
+++ b/modules/csharp_api/demos/yolov8/README.md
@@ -0,0 +1,328 @@
+# OpenVINO™ C# API Deployment Yolov8 Model Example
+
+[简体中文](README_cn.md) | English
+
+ OpenVINO™ C# API version 3.0 has undergone significant updates compared to version 2.0, changing from refactoring the C++API to directly reading OpenVino ™ The official C API makes the application more flexible and supports a richer range of functions. OpenVINO™ C# API 3.0 API interface with multiple references to OpenVino ™ C++API implementation, therefore it is closer to the C++API when used, which will be more friendly to friends who are familiar with using the C++API.
+ This example demonstrates how to deploy the Yolov8 full series model using the OpenVINO™ C# API 3.0 API.
+ This example supports the full range of Yolov8 models, as well as official pre training models and personal training models.
+ The following C # APIs will be mainly used in the example:
+
+| Feature | API | Description |
+| :----------------------: | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| OpenVINO Runtime Version | Ov.get_openvino_version() | Get Openvino API version. |
+| Basic Infer Flow | Core.read_model(), core.compiled_model(), CompiledModel.create_infer_request(), InferRequest.get_input_tensor(), InferRequest.get_output_tensor(), InferRequest.get_tensor() | Common API to do inference: read and compile a model, create an infer request, configure input and output tensors. |
+| Synchronous Infer | InferRequest.infer() | Do synchronous inference. |
+| Model Operations | Model.get_friendly_name(), Model.get_const_input(), Model.get_const_output() | Get inputs and outputs of a model. |
+| Node Operations | Node.get_name(), Node.get_type(), Node.get_shape( | Get node message. |
+| Tensor Operations | Tensor.get_shape(), Tensor.set_data(), Tensor.get_size(), Tensor.get_data() | Get a tensor shape, size, data and set data. |
+| Yolov8 Process | ResultProcess.process_det_result(), ResultProcess.process_seg_result(), ResultProcess.process_pose_result, ResultProcess.process_cls_result(), ResultProcess.read_class_names(), ResultProcess.draw_det_result(), ResultProcess.draw_pose_result(), ResultProcess.draw_seg_result(), ResultProcess.print_result() | Process and draw yolov8 result. |
+
+ The information listed below has been verified and tested by code running. If there are other successful testing environments, please feel free to supplement:
+
+| **Options** | **Values** |
+| --------------------- | ------------------------------------------------------------ |
+| Validated Models | Yolov8-det、Yolov8-cls、Yolov8-pose、Yolov8-seg |
+| Model Format | OpenVINO™ toolkit Intermediate Representation (*.xml + *.bin), ONNX (*.onnx) |
+| Supported devices | CPU、iGPU、dGPU(Not tested) |
+| Operating environment | Window 10 、Window 11; |
+| Building environment | Visual Studio 11,.NET 6.0 |
+
+
+
+## How It Works
+
+ When the project runs, the sample program will read the user specified path model, test images, and category files to prepare relevant data for model inference testing; Load the specified model and image into OpenVINO ™ Reasoning the core and performing synchronous reasoning, then loading the obtained reasoning data into a custom Yolov8 data processing class for result processing.
+ OpenVINO used in the project ™ The relevant components have been encapsulated in OpenVINO™ C# API, and there is no need to install OpenVino separately ™。
+
+## Project Dependency
+
+ All dependencies in the project can be installed through the **NuGet** package:
+
+- **OpenVINO™ C# API**
+
+ You can install it through the NuGet tool that comes with Visual Studio,
+
+ If the project is compiled through **dotnet**, the corresponding package can be added using the following statement:
+
+```
+dotnet add package OpenVINO.CSharp.win
+```
+
+## Model acquisition
+
+ All the models used in the project were downloaded from the **ultra tics** platform. The following are download examples:
+
+1. Installing Ultralytics
+
+ ```
+ pip install ultralytics
+ ```
+
+2. Export Yolov8 model
+
+ ```
+ yolo export model=yolov8s.pt format=onnx #yolov8-det
+ yolo export model=yolov8s-cls.pt format=onnx #yolov8-cls
+ yolo export model=yolov8s-pose.pt format=onnx #yolov8-pose
+ yolo export model=yolov8s-seg.pt format=onnx #yolov8-seg
+ ```
+
+3. Convert to IR format
+
+ IR format here via OpenVINO ™ The model optimization tool implementation requires the installation of OpenVINO ™ Python version, specific implementation can refer to [Model Preparation OpenVINO ™ Documentation](https://docs.openvino.ai/2023.0/openvino_docs_model_processing_introduction.html) , can also be achieved through the command line:
+
+ ```
+ mo -input_model yolov8s.onnx
+ ```
+
+## Building
+
+ Currently, rapid implementation in the Window environment has been achieved. Please refer to the installation of the environment for reference[Windows Installation OpenVINO™ C# API](./../../docs/en/windows_install.md)
+
+ The Linux environment is still under development.
+
+- **Download source code**
+
+ The complete project code and model files have been provided in the code repository, and the project source code can be downloaded through Git.
+
+ ```
+ git clone https://github.com/guojin-yan/OpenVINO-CSharp-API.git
+ cd OpenVINO-CSharp-API
+ ```
+
+- **Visual Studio compile**
+
+ If compiling using Visual Studio, you can open the ``CSharp.sln`` solution through the solution and install the project dependencies as described in [Project Dependencies](##Project Dependency). The ```openvino2023.0``` folder will then be added to the project.
+
+
+
+ Finally, the project is built and compiled by right-clicking on the project ->Generate.
+
+- **dotnet compile**
+
+ If the project is compiled through **dotnet**, run the following commands in sequence:
+
+ ```
+ cd demos\yolov8
+ dotnet add package OpenVINO.CSharp.win # add OpenVINO.CSharp.win
+ dotnet build # building project
+ ```
+
+ After the project is compiled, an executable file will be generated in the ``bin\Debug\net6.0'``directory.
+
+## Run
+
+- **Visual Studio Run**
+
+ To run this project on the Visual Studio platform, you need to modify the ``Properties\launchSettings.json`` file to specify the program command line input. The content of the ``launchSettings.json`` file is shown below. To use it, you need to add the command line \.
+
+ ```json
+ {
+ "profiles": {
+ "yolov8": {
+ "commandName": "Project",
+ "commandLineArgs": ""
+ }
+ }
+ }
+ ```
+
+ After adding command line content, rebuild the project and run it.
+
+ The main content of \ parameters is as follows:
+
+ ```shell
+
+ ```
+
+ When running the example, it is necessary to specify the model prediction type, model path, and image file path parameters simultaneously. The prediction type input includes four types: 'det', 'seg', 'pose', and 'cls'; The default inference device is set to 'AUTO'. For 'det' and 'seg' predictions, the \ parameter can be set. If this parameter is set, the results will be plotted on the image. If it is not set, it will be printed through the console.
+
+ - Reasoning that the input parameters of the Yolov8-det model are
+
+ ```shell
+ det ./../../../../../model/yolov8/yolov8s.xml ./../../../../../dataset/image/demo_2.jpg CPU ./../../../../../dataset/lable/COCO_lable.txt
+ ```
+
+ - Reasoning that the input parameters of the Yolov8-cls model are
+
+ ```shell
+ cls ./../../../../../model/yolov8/yolov8s-cls.xml ./../../../../../dataset/image/demo_7.jpg CPU
+ ```
+
+ - Reasoning that the input parameters of the Yolov8-pose model are
+
+ ```shell
+ pose ./../../../../../model/yolov8/yolov8s-pose.xml ./../../../../../dataset/image/demo_9.jpg CPU
+ ```
+
+ - Reasoning that the input parameters of the Yolov8-seg model are
+
+ ```shell
+ seg ./../../../../../model/yolov8/yolov8s-seg.xml ./../../../../../dataset/image/demo_2.jpg CPU ./../../../../../dataset/lable/COCO_lable.txt
+ ```
+
+- **dotnet run**
+
+ If running through dotnet, simply run the following command
+
+ ```shell
+ dotnet run
+ ```
+
+ The \ parameter settings are as follows:
+
+ - Reasoning that the input parameters of the Yolov8-det model are
+
+ ```shell
+ det ./../../model/yolov8/yolov8s.xml ./../../dataset/image/demo_2.jpg CPU ./../../dataset/lable/COCO_lable.txt
+ ```
+
+ - Reasoning that the input parameters of the Yolov8-cls model are
+
+ ```shell
+ cls ./../../model/yolov8/yolov8s-cls.xml ./../../dataset/image/demo_7.jpg CPU
+ ```
+
+ - Reasoning that the input parameters of the Yolov8-pose model are
+
+ ```shell
+ pose ./../../model/yolov8/yolov8s-pose.xml ./../../dataset/image/demo_9.jpg CPU
+ ```
+
+ - Reasoning that the input parameters of the Yolov8-seg model are
+
+ ```shell
+ seg ./../../model/yolov8\\yolov8s-seg.xml ./../../dataset/image/demo_2.jpg CPU ./../../dataset/lable/COCO_lable.txt
+ ```
+
+### Results Display
+
+The program will output model inference information and inference results:
+
+#### Yolov8-det model inference results
+
+```shell
+PS E:\Git_space\OpenVinoSharp\demos\yolov8> dotnet run det ./../../model/yolov8/yolov8s.xml ./../../dataset/image/demo_2.jpg CPU ./../../dataset/lable/COCO_lable.txt
+---- OpenVINO INFO----
+Description : OpenVINO Runtime
+Build number: 2023.0.1-11005-fa1c41994f3-releases/2023/0
+Set inference device CPU.
+[INFO] Loading model files: ./../../model/yolov8/yolov8s.xml
+[INFO] model name: torch_jit
+[INFO] inputs:
+[INFO] input name: images
+[INFO] input type: f32
+[INFO] input shape: Shape : [1, 3, 640, 640]
+[INFO] outputs:
+[INFO] output name: output0
+[INFO] output type: f32
+[INFO] output shape: Shape : [1, 84, 8400]
+[INFO] Read image files: ./../../dataset/image/demo_2.jpg
+
+
+ Detection result :
+
+1: 0 0.89 (x:744 y:43 width:388 height:667)
+2: 0 0.88 (x:149 y:202 width:954 height:507)
+3: 27 0.72 (x:435 y:433 width:98 height:284)
+```
+
+
+
+#### Yolov8-pose model inference results
+
+```shell
+PS E:\Git_space\OpenVinoSharp\demos\yolov8> dotnet run pose ./../../model/yolov8/yolov8s-pose.xml ./../../dataset/image/demo_9.jpg CPU
+---- OpenVINO INFO----
+Description : OpenVINO Runtime
+Build number: 2023.0.1-11005-fa1c41994f3-releases/2023/0
+Set inference device CPU.
+[INFO] Loading model files: ./../../model/yolov8/yolov8s-pose.xml
+[INFO] model name: torch_jit
+[INFO] inputs:
+[INFO] input name: images
+[INFO] input type: f32
+[INFO] input shape: Shape : [1, 3, 640, 640]
+[INFO] outputs:
+[INFO] output name: output0
+[INFO] output type: f32
+[INFO] output shape: Shape : [1, 56, 8400]
+[INFO] Read image files: ./../../dataset/image/demo_9.jpg
+
+
+ Classification result :
+
+1: 1 0.94 (x:104 y:22 width:151 height:365) Nose: (188 ,60 ,0.92) Left Eye: (192 ,52 ,0.83) Right Eye: (179 ,54 ,0.89) Left Ear: (197 ,52 ,0.48) Right Ear: (166 ,56 ,0.75) Left Shoulder: (212 ,91 ,0.92) Right Shoulder: (151 ,94 ,0.94) Left Elbow: (230 ,145 ,0.89) Right Elbow: (138 ,143 ,0.92) Left Wrist: (244 ,199 ,0.88) Right Wrist: (118 ,187 ,0.91) Left Hip: (202 ,191 ,0.97) Right Hip: (169 ,193 ,0.97) Left Knee: (183 ,271 ,0.96) Right Knee: (183 ,275 ,0.96) Left Ankle: (174 ,358 ,0.87) Right Ankle: (197 ,354 ,0.88)
+```
+
+
+
+#### Yolov8-seg model inference results
+
+```shell
+PS E:\Git_space\OpenVinoSharp\demos\yolov8> dotnet run seg ./../../model/yolov8\\yolov8s-seg.xml ./../../dataset/image/demo_2.jpg CPU ./../../dataset/lable/COCO_lable.txt
+---- OpenVINO INFO----
+Description : OpenVINO Runtime
+Build number: 2023.0.1-11005-fa1c41994f3-releases/2023/0
+Set inference device CPU.
+[INFO] Loading model files: ./../../model/yolov8\\yolov8s-seg.xml
+[INFO] model name: torch_jit
+[INFO] inputs:
+[INFO] input name: images
+[INFO] input type: f32
+[INFO] input shape: Shape : [1, 3, 640, 640]
+[INFO] outputs:
+[INFO] output name: output0
+[INFO] output type: f32
+[INFO] output shape: Shape : [1, 116, 8400]
+[INFO] Read image files: ./../../dataset/image/demo_2.jpg
+
+
+ Segmentation result :
+
+1: 0 0.90 (x:745 y:41 width:402 height:671)
+2: 0 0.86 (x:118 y:196 width:1011 height:515)
+3: 27 0.70 (x:434 y:436 width:90 height:280)
+```
+
+
+
+
+
+#### Yolov8-cls model inference results
+
+```shell
+PS E:\Git_space\OpenVinoSharp\demos\yolov8> dotnet run cls ./../../model/yolov8/yolov8s-cls.xml ./../../dataset/image/demo_7.jpg CPU
+---- OpenVINO INFO----
+Description : OpenVINO Runtime
+Build number: 2023.0.1-11005-fa1c41994f3-releases/2023/0
+Set inference device CPU.
+[INFO] Loading model files: ./../../model/yolov8/yolov8s-cls.xml
+[INFO] model name: torch_jit
+[INFO] inputs:
+[INFO] input name: images
+[INFO] input type: f32
+[INFO] input shape: Shape : [1, 3, 224, 224]
+[INFO] outputs:
+[INFO] output name: output0
+[INFO] output type: f32
+[INFO] output shape: Shape : [1, 1000]
+[INFO] Read image files: ./../../dataset/image/demo_7.jpg
+
+
+ Classification Top 10 result :
+
+classid probability
+------- -----------
+294 0.992172
+269 0.002861
+296 0.002111
+295 0.000714
+270 0.000546
+276 0.000432
+106 0.000159
+362 0.000147
+260 0.000078
+272 0.000070
+```
+
diff --git a/modules/csharp_api/demos/yolov8/README_cn.md b/modules/csharp_api/demos/yolov8/README_cn.md
new file mode 100644
index 000000000..2c29bc2f5
--- /dev/null
+++ b/modules/csharp_api/demos/yolov8/README_cn.md
@@ -0,0 +1,332 @@
+![OpenVinoSharp](https://socialify.git.ci/guojin-yan/OpenVinoSharp/image?description=1&descriptionEditable=💞%20OpenVINO%20wrapper%20for%20.NET💞%20&forks=1&issues=1&logo=https%3A%2F%2Fs2.loli.net%2F2023%2F01%2F26%2FylE1K5JPogMqGSW.png&name=1&owner=1&pattern=Circuit%20Board&pulls=1&stargazers=1&theme=Light)
+
+简体中文| [English](README.md)
+
+# OpenVinoSharp部署Yolov8模型实例
+
+ OpenVinoSharp 3.0 版本较2.0版本做了较大程度上的更新,由原来的重构 C++ API 改为直接读取 OpenVINO™ 官方 C API,使得应用更加灵活,所支持的功能更加丰富。OpenVinoSharp 3.0 API 接口多参考 OpenVINO™ C++ API 实现,因此在使用时更加接近C++ API,这对熟悉使用C++ API的朋友会更加友好。
+
+ 此示例演示了如何使用OpenVinoSharp 3.0 版本 API 部署Yolov8 全系列模型。
+
+ 该示例支持Yolov8全系列模型,并且支持官方预训练模型以及个人训练模型。
+
+ 示例中主要会使用以下C# API:
+
+| Feature | API | Description |
+| :----------------------: | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| OpenVINO Runtime Version | Ov.get_openvino_version() | Get Openvino API version. |
+| Basic Infer Flow | Core.read_model(), core.compiled_model(), CompiledModel.create_infer_request(), InferRequest.get_input_tensor(), InferRequest.get_output_tensor(), InferRequest.get_tensor() | Common API to do inference: read and compile a model, create an infer request, configure input and output tensors. |
+| Synchronous Infer | InferRequest.infer() | Do synchronous inference. |
+| Model Operations | Model.get_friendly_name(), Model.get_const_input(), Model.get_const_output() | Get inputs and outputs of a model. |
+| Node Operations | Node.get_name(), Node.get_type(), Node.get_shape( | Get node message. |
+| Tensor Operations | Tensor.get_shape(), Tensor.set_data(), Tensor.get_size(), Tensor.get_data() | Get a tensor shape, size, data and set data. |
+| Yolov8 Process | ResultProcess.process_det_result(), ResultProcess.process_seg_result(), ResultProcess.process_pose_result, ResultProcess.process_cls_result(), ResultProcess.read_class_names(), ResultProcess.draw_det_result(), ResultProcess.draw_pose_result(), ResultProcess.draw_seg_result(), ResultProcess.print_result() | Process and draw yolov8 result. |
+
+下方所列出信息已经经过代码运行验证测试,如有其他环境测试成功欢迎大家进行补充:
+
+| 选项 | 值 |
+| -------- | ------------------------------------------------------- |
+| 支持模型 | Yolov8-det、Yolov8-cls、Yolov8-pose、Yolov8-seg |
+| 模型格式 | OpenVINO™ 工具包中间表示(\*.xml,\*.bin),ONNX (\*.onnx) |
+| 支持设备 | CPU、iGPU、dGPU(未测试) |
+| 运行环境 | Window 10 、Window 11; |
+| 编译环境 | Visual Studio 11,.NET 6.0 |
+
+## 工作原理
+
+ 项目运行时,示例程序会读取用户指定路径模型、测试图片以及类别文件,准备模型推理测试的相关数据;将指定模型和图像加载到OpenVINO™ 推理核心并进行同步推理,然后将获取的推理数据加载到自定义的Yolov8数据处理类中进行结果处理。
+
+ 项目中使用的OpenVINO™相关组件已经封装到OpenVinoSharp中,无需安装在单独安装OpenVINO™。
+
+## 项目依赖
+
+ 项目中所有依赖项均可以通过NuGet 包安装:
+
+- **OpenVinoSharp**
+
+ 可以通过Visual Studio 自带的 NuGet 工具进行安装
+
+ 如果项目是通过**dotnet**编译,可以通过下面语句添加对应的包:
+
+```
+dotnet add package OpenVINO.CSharp.win
+```
+
+## 模型获取
+
+ 项目中所使用的模型全部由**ultralytics**平台下载,下面是下载示例:
+
+1. 安装ultralytics
+
+ ```
+ pip install ultralytics
+ ```
+
+2. 导出 Yolov8模型
+
+ ```
+ yolo export model=yolov8s.pt format=onnx #yolov8-det
+ yolo export model=yolov8s-cls.pt format=onnx #yolov8-cls
+ yolo export model=yolov8s-pose.pt format=onnx #yolov8-pose
+ yolo export model=yolov8s-seg.pt format=onnx #yolov8-seg
+ ```
+
+3. 转为IR格式
+
+ IR格式此处通过OpenVINO™的模型优化工具实现,需要安装OpenVINO™ Python 版本,具体实现可以参考[Model Preparation — OpenVINO™ documentation](https://docs.openvino.ai/2023.0/openvino_docs_model_processing_introduction.html),也可以通过命令行实现:
+
+ ```
+ mo -input_model yolov8s.onnx
+ ```
+
+## 快速构建
+
+ 目前已经实现Window环境下的快速实现,环境安装请参考[Windows 安装 OpenVINOSharp](./../../docs/cn/windows_install.md)。
+
+ Linux环境还在开发中。
+
+- **下载源码**
+
+ 代码仓中已经提供了完整的项目代码和模型文件,通过Git下载项目源码。
+
+ ```
+ git clone https://github.com/guojin-yan/OpenVINOSharp.git
+ cd OpenVINOSharp
+ ```
+
+- **Visual Studio 编译**
+
+ 如果使用Visual Studio 编译,可以通过解决方案打开``OpenVinoSharp.sln`` 解决方案,并按照[项目依赖](##项目依赖)中的方式安装项目依赖,然后项目中会增加``openvino2023.0``文件夹。
+
+
+
+ 最后项目构建和编译,只需要通过右击项目->生成即可。
+
+- **dotnet编译**
+
+ 如果项目通过dotnet编译,依次运行以下命令:
+
+```
+cd demos\yolov8
+dotnet add package OpenVinoSharp.win # 添加OpenVinoSharp包
+dotnet build # 编译项目
+```
+
+ 项目编译后,会在``\bin\Debug\net6.0``目录下生成可执行文件。
+
+## 运行
+
+- **Visual Studio 运行**
+
+ 在 Visual Studio 平台运行该项目需要修改``Properties\launchSettings.json``文件指定程序命令行输入,``launchSettings.json`` 文件内容如下所示,在使用时需要添加命令行\即可即可。
+
+ ```json
+ {
+ "profiles": {
+ "yolov8": {
+ "commandName": "Project",
+ "commandLineArgs": ""
+ }
+ }
+ }
+ ```
+
+ 添加命令行内容后,重新生成项目并运行即可。
+
+ \参数主要内容如下:
+
+ ```shell
+
+ ```
+
+ 运行示例时,需要同时指定模型预测类型、模型路径、图片文件路径参数,预测类型输入包括: 'det'、'seg'、'pose'、'cls'四种类型;默认推理设备设置为'AUTO',对于'det'、'seg'预测,可以设置参数,如果设置该参数,会将结果绘制到图片上,如果未设置,会通过控制台打印出来
+
+ - Yolov8-det 模型推理参数为:
+
+ ```shell
+ det ./../../../../../model/yolov8/yolov8s.xml ./../../../../../dataset/image/demo_2.jpg CPU ./../../../../../dataset/lable/COCO_lable.txt
+ ```
+
+ - Yolov8-cls 模型推理参数为:
+
+ ```shell
+ cls ./../../../../../model/yolov8/yolov8s-cls.xml ./../../../../../dataset/image/demo_7.jpg CPU
+ ```
+
+ - Yolov8-pose 模型推理参数为:
+
+ ```shell
+ pose ./../../../../../model/yolov8/yolov8s-pose.xml ./../../../../../dataset/image/demo_9.jpg CPU
+ ```
+
+ - Yolov8-seg 模型推理参数为:
+
+ ```shell
+ seg ./../../../../../model/yolov8/yolov8s-seg.xml ./../../../../../dataset/image/demo_2.jpg CPU ./../../../../../dataset/lable/COCO_lable.txt
+ ```
+
+- **dotnet运行**
+
+ 如果通过dotnet运行,只需要运行以下命令即可
+
+ ```shell
+ dotnet run
+ ```
+
+ \参数设置如下:
+
+ - Yolov8-det 模型推理参数为:
+
+ ```shell
+ det ./../../model/yolov8/yolov8s.xml ./../../dataset/image/demo_2.jpg CPU ./../../dataset/lable/COCO_lable.txt
+ ```
+
+ - Yolov8-cls 模型推理参数为:
+
+ ```shell
+ cls ./../../model/yolov8/yolov8s-cls.xml ./../../dataset/image/demo_7.jpg CPU
+ ```
+
+ - Yolov8-pose 模型推理参数为:
+
+ ```shell
+ pose ./../../model/yolov8/yolov8s-pose.xml ./../../dataset/image/demo_9.jpg CPU
+ ```
+
+ - Yolov8-seg 模型推理参数为:
+
+ ```shell
+ seg ./../../model/yolov8\\yolov8s-seg.xml ./../../dataset/image/demo_2.jpg CPU ./../../dataset/lable/COCO_lable.txt
+ ```
+
+### 结果展示
+
+程序运行会输出模型推理信息和推理结果:
+
+#### Yolov8-det 模型推理结果
+
+```shell
+PS E:\Git_space\OpenVinoSharp\demos\yolov8> dotnet run det ./../../model/yolov8/yolov8s.xml ./../../dataset/image/demo_2.jpg CPU ./../../dataset/lable/COCO_lable.txt
+---- OpenVINO INFO----
+Description : OpenVINO Runtime
+Build number: 2023.0.1-11005-fa1c41994f3-releases/2023/0
+Set inference device CPU.
+[INFO] Loading model files: ./../../model/yolov8/yolov8s.xml
+[INFO] model name: torch_jit
+[INFO] inputs:
+[INFO] input name: images
+[INFO] input type: f32
+[INFO] input shape: Shape : [1, 3, 640, 640]
+[INFO] outputs:
+[INFO] output name: output0
+[INFO] output type: f32
+[INFO] output shape: Shape : [1, 84, 8400]
+[INFO] Read image files: ./../../dataset/image/demo_2.jpg
+
+
+ Detection result :
+
+1: 0 0.89 (x:744 y:43 width:388 height:667)
+2: 0 0.88 (x:149 y:202 width:954 height:507)
+3: 27 0.72 (x:435 y:433 width:98 height:284)
+```
+
+
+
+#### Yolov8-pose 模型推理结果
+
+```shell
+PS E:\Git_space\OpenVinoSharp\demos\yolov8> dotnet run pose ./../../model/yolov8/yolov8s-pose.xml ./../../dataset/image/demo_9.jpg CPU
+---- OpenVINO INFO----
+Description : OpenVINO Runtime
+Build number: 2023.0.1-11005-fa1c41994f3-releases/2023/0
+Set inference device CPU.
+[INFO] Loading model files: ./../../model/yolov8/yolov8s-pose.xml
+[INFO] model name: torch_jit
+[INFO] inputs:
+[INFO] input name: images
+[INFO] input type: f32
+[INFO] input shape: Shape : [1, 3, 640, 640]
+[INFO] outputs:
+[INFO] output name: output0
+[INFO] output type: f32
+[INFO] output shape: Shape : [1, 56, 8400]
+[INFO] Read image files: ./../../dataset/image/demo_9.jpg
+
+
+ Classification result :
+
+1: 1 0.94 (x:104 y:22 width:151 height:365) Nose: (188 ,60 ,0.92) Left Eye: (192 ,52 ,0.83) Right Eye: (179 ,54 ,0.89) Left Ear: (197 ,52 ,0.48) Right Ear: (166 ,56 ,0.75) Left Shoulder: (212 ,91 ,0.92) Right Shoulder: (151 ,94 ,0.94) Left Elbow: (230 ,145 ,0.89) Right Elbow: (138 ,143 ,0.92) Left Wrist: (244 ,199 ,0.88) Right Wrist: (118 ,187 ,0.91) Left Hip: (202 ,191 ,0.97) Right Hip: (169 ,193 ,0.97) Left Knee: (183 ,271 ,0.96) Right Knee: (183 ,275 ,0.96) Left Ankle: (174 ,358 ,0.87) Right Ankle: (197 ,354 ,0.88)
+```
+
+
+
+#### Yolov8-seg 模型推理结果
+
+```shell
+PS E:\Git_space\OpenVinoSharp\demos\yolov8> dotnet run seg ./../../model/yolov8\\yolov8s-seg.xml ./../../dataset/image/demo_2.jpg CPU ./../../dataset/lable/COCO_lable.txt
+---- OpenVINO INFO----
+Description : OpenVINO Runtime
+Build number: 2023.0.1-11005-fa1c41994f3-releases/2023/0
+Set inference device CPU.
+[INFO] Loading model files: ./../../model/yolov8\\yolov8s-seg.xml
+[INFO] model name: torch_jit
+[INFO] inputs:
+[INFO] input name: images
+[INFO] input type: f32
+[INFO] input shape: Shape : [1, 3, 640, 640]
+[INFO] outputs:
+[INFO] output name: output0
+[INFO] output type: f32
+[INFO] output shape: Shape : [1, 116, 8400]
+[INFO] Read image files: ./../../dataset/image/demo_2.jpg
+
+
+ Segmentation result :
+
+1: 0 0.90 (x:745 y:41 width:402 height:671)
+2: 0 0.86 (x:118 y:196 width:1011 height:515)
+3: 27 0.70 (x:434 y:436 width:90 height:280)
+```
+
+
+
+
+
+#### Yolov8-cls 模型推理结果
+
+```shell
+PS E:\Git_space\OpenVinoSharp\demos\yolov8> dotnet run cls ./../../model/yolov8/yolov8s-cls.xml ./../../dataset/image/demo_7.jpg CPU
+---- OpenVINO INFO----
+Description : OpenVINO Runtime
+Build number: 2023.0.1-11005-fa1c41994f3-releases/2023/0
+Set inference device CPU.
+[INFO] Loading model files: ./../../model/yolov8/yolov8s-cls.xml
+[INFO] model name: torch_jit
+[INFO] inputs:
+[INFO] input name: images
+[INFO] input type: f32
+[INFO] input shape: Shape : [1, 3, 224, 224]
+[INFO] outputs:
+[INFO] output name: output0
+[INFO] output type: f32
+[INFO] output shape: Shape : [1, 1000]
+[INFO] Read image files: ./../../dataset/image/demo_7.jpg
+
+
+ Classification Top 10 result :
+
+classid probability
+------- -----------
+294 0.992172
+269 0.002861
+296 0.002111
+295 0.000714
+270 0.000546
+276 0.000432
+106 0.000159
+362 0.000147
+260 0.000078
+272 0.000070
+```
+
diff --git a/modules/csharp_api/demos/yolov8/yolov8.csproj b/modules/csharp_api/demos/yolov8/yolov8.csproj
new file mode 100644
index 000000000..5bb58892f
--- /dev/null
+++ b/modules/csharp_api/demos/yolov8/yolov8.csproj
@@ -0,0 +1,15 @@
+
+
+
+ Exe
+ net6.0
+ enable
+ enable
+
+
+
+
+
+
+
+
diff --git a/modules/csharp_api/docs/cn/linux_install.md b/modules/csharp_api/docs/cn/linux_install.md
new file mode 100644
index 000000000..53a8b52c5
--- /dev/null
+++ b/modules/csharp_api/docs/cn/linux_install.md
@@ -0,0 +1,134 @@
+# OpenVINO C# API 在Linux 平台使用
+
+ 由于目前 OpenVINO C# API 还在开发阶段,未生成相应的 NuGet Package, 因此此处基于 Ubuntu 20.04 系统,提供了相应的使用案例,方便大家在Linux系统上使用 OpenVINO C# API。
+
+## 一、配置 .NET 环境
+
+ .NET 是一个免费的跨平台开源开发人员平台 ,用于构建多种应用程序。下面将演示 AIxBoard 如何在 Ubuntu 20.04 上安装 .NET环境,支持 .NET Core 2.0-3.1 系列 以及.NET 5-8 系列 ,如果你的 AIxBoard 使用的是其他Linux系统,你可以参考[在 Linux 发行版上安装 .NET - .NET | Microsoft Learn](https://learn.microsoft.com/zh-cn/dotnet/core/install/linux)。
+
+### 1. 添加 Microsoft 包存储库
+
+ 使用 APT 进行安装可通过几个命令来完成。 安装 .NET 之前,请运行以下命令,将 Microsoft 包签名密钥添加到受信任密钥列表,并添加包存储库。
+
+ 打开终端并运行以下命令:
+
+```bash
+wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb
+sudo dpkg -i packages-microsoft-prod.deb
+rm packages-microsoft-prod.deb
+```
+
+ 下图为输入上面命令后控制台的输出:
+
+
+
+### 2. 安装 SDK
+
+ .NET SDK 使你可以通过 .NET 开发应用。 如果安装 .NET SDK,则无需安装相应的运行时。 若要安装 .NET SDK,请运行以下命令:
+
+```bash
+sudo apt-get update
+sudo apt-get install -y dotnet-sdk-3.1
+```
+
+ 下图为安装后控制台的输出:
+
+
+
+
+### 3. 测试安装
+
+ 通过命令行可以检查 SDK 版本以及Runtime时版本。
+
+```
+dotnet --list-sdks
+dotnet --list-runtimes
+```
+
+ 下图为输入测试命令后控制台的输出:
+
+
+
+ 以上就是.NET环境的配置步骤,如果你的环境与本文不匹配,可以通过[.NET 文档 | Microsoft Learn](https://learn.microsoft.com/zh-cn/dotnet/) 获取更多安装步骤。
+
+## 二、安装 OpenVINO C# API
+
+ OpenVINO™ 有两种安装方式: OpenVINO Runtime和OpenVINO Development Tools。OpenVINO Runtime包含用于在处理器设备上运行模型部署推理的核心库。OpenVINO Development Tools是一组用于处理OpenVINO和OpenVINO模型的工具,包括模型优化器、OpenVINO Runtime、模型下载器等。在此处我们只需要安装OpenVINO Runtime即可。
+
+### 1. 下载 OpenVINO Runtime
+
+ 访问[Download the Intel Distribution of OpenVINO Toolkit](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/download.html?ENVIRONMENT=DEV_TOOLS&OP_SYSTEM=WINDOWS&VERSION=v_2023_0_1&DISTRIBUTION=PIP)页面,按照下面流程选择相应的安装选项,在下载页面,由于我们的设备使用的是**Ubuntu20.04**,因此下载时按照指定的编译版本下载即可。
+
+
+
+### 2. 解压安装包
+
+ 我们所下载的 OpenVINO Runtime 本质是一个C++依赖包,因此我们把它放到我们的系统目录下,这样在编译时会根据设置的系统变量获取依赖项。首先在系统文件夹下创建一个文件夹:
+
+```bash
+sudo mkdir -p /opt/intel
+```
+
+ 然后解压缩我们下载的安装文件,并将其移动到指定文件夹下:
+
+```bash
+tar -xvzf l_openvino_toolkit_ubuntu20_2023.0.1.11005.fa1c41994f3_x86_64.tgz
+sudo mv l_openvino_toolkit_ubuntu20_2023.0.1.11005.fa1c41994f3_x86_64 /opt/intel/openvino_2022.3.0
+```
+
+### 3. 安装依赖
+
+ 接下来我们需要安装 OpenVINO Runtime 所许雅的依赖项,通过命令行输入以下命令即可:
+
+```bash
+cd /opt/intel/openvino_2022.3.0/
+sudo -E ./install_dependencies/install_openvino_dependencies.sh
+```
+
+
+
+### 4. 配置环境变量
+
+ 安装完成后,我们需要配置环境变量,以保证在调用时系统可以获取对应的文件,通过命令行输入以下命令即可:
+
+```bash
+source /opt/intel/openvino_2022.3.0/setupvars.sh
+```
+
+ 以上就是 OpenVINO Runtime 环境的配置步骤,如果你的环境与本文不匹配,可以通过[Install OpenVINO™ Runtime — OpenVINO™ documentation — Version(2023.0)](https://docs.openvino.ai/2023.0/openvino_docs_install_guides_install_runtime.html)获取更多安装步骤。
+
+### 5. 添加 OpenVINO™ C# API 依赖
+
+ 由于OpenVINO™ C# API当前正处于开发阶段,还未创建Linux版本的NuGet Package,因此需要通过下载项目源码以项目引用的方式使用。
+
+- **下载源码**
+
+ 通过Git下载项目源码,新建一个Terminal,并输入以下命令克隆远程仓库,将该项目放置在项目同级目录下。
+
+ ```
+ git clone https://github.com/guojin-yan/OpenVINO-CSharp-API.git
+ cd OpenVINO-CSharp-API
+ ```
+
+- **修改OpenVINO™ 依赖**
+
+ 由于项目源码的OpenVINO™ 依赖与本文设置不同,因此需要修改OpenVINO™ 依赖项的路径,主要通过修改``OpenVINO-CSharp-API/src/CSharpAPI/native_methods/ov_base.cs``文件即可,修改内容如下:
+
+ ```
+ private const string dll_extern = "./openvino2023.0/openvino_c.dll";
+ ---修改为--->
+ private const string dll_extern = "libopenvino_c.so";
+ ```
+
+- **添加项目依赖**
+
+ 在Terminal输入以下命令,即可将OpenVINO™ C# API添加到AlxBoard_deploy_yolov8项目引用中。
+
+ ```shell
+ dotnet add reference ./../OpenVINO-CSharp-API/src/CSharpAPI/CSharpAPI.csproj
+ ```
+
+
+
+
+
diff --git a/modules/csharp_api/docs/cn/windows_install.md b/modules/csharp_api/docs/cn/windows_install.md
new file mode 100644
index 000000000..156e4c49e
--- /dev/null
+++ b/modules/csharp_api/docs/cn/windows_install.md
@@ -0,0 +1,32 @@
+# OpenVINO™ C# API 在 Windows 平台使用
+
+ OpenVINO™ C# API 主要基于 OpenVINO™ 和 C# 开发,支持 Windows 10/11版本,目前已经在 x64 架构下完成测试。
+
+## C# 环境配置
+
+ C# 是一种新式编程语言,不仅面向对象,还类型安全。 开发人员利用 C# 能够生成在 .NET 中运行的多种安全可靠的应用程序。C#环境安装可以参考下面的文章进行配置。
+
+- [.NET 安装指南 - .NET | Microsoft Learn](https://learn.microsoft.com/zh-cn/dotnet/core/install/windows?tabs=net70)
+
+- [.NET Framework 安装指南 - .NET Framework | Microsoft Learn](https://learn.microsoft.com/zh-cn/dotnet/framework/install/)
+
+## OpenVINO™ C# API 安装
+
+ 由于在Windows环境下开发C#语言比较方便,因此目前开发了 OpenVINO™ C# API 的 NuGet Package ,在使用时直接通过 C# 的 NuGet Package进行安装即可。在打包NuGet Package时,同时将OpenVINO™ 官方编译的东塔链接库文件一并打包到NuGet Package中,因此此处只需要添加OpenVINO™ C# API即可使用。下面演示两种不同编译方式情况下的安装:
+
+- **Visual Studio 平台**
+
+ Visual Studio 编辑器自带了C# 的 **NuGet Package** 管理功能,因此可以直接通过 **NuGet Package** 进行安装。
+
+- **dotnet**
+
+ dotnet是C#语言的编译平台,可以通过命令行快速编译C#项目,如果使用dotnet编译,可以通过以下方式安装OpenVINO™ C# API:
+
+```
+dotnet add package OpenVINO.CSharp.win
+```
+
+ **说明:**目前**.NET Framework 4.8**版本安装使用会出在问题,因此在项目生成后,需要将程序目录下openvino2023.0文件夹中的除**opencv_c.dll**文件移动到程序目录下,如图所示。
+
+
+
diff --git a/modules/csharp_api/docs/en/linux_install.md b/modules/csharp_api/docs/en/linux_install.md
new file mode 100644
index 000000000..7757821cb
--- /dev/null
+++ b/modules/csharp_api/docs/en/linux_install.md
@@ -0,0 +1,132 @@
+# Using the OpenVINO C # API on the Linux
+
+ Due to the fact that the OpenVINO C # API is still in the development stage and no corresponding NuGet Package has been generated, corresponding use cases are provided based on the Ubuntu 20.04 system to facilitate everyone's use of the OpenVINO C # API on Linux systems.
+
+## Ⅰ. Install. NET
+
+ . NET is a free cross platform open source developer platform for building multiple applications. The following will demonstrate how AIxBoard can install the. NET environment on Ubuntu 20.04, supporting the. NET Core 2.0-3.1 series and. NET 5-8 series. If your AIxBoard is using another Linux system, you can refer to [Install .NET on Linux distributions - .NET | Microsoft Learn](https://learn.microsoft.com/en-us/dotnet/core/install/linux)
+
+### 1. Add Microsoft Package Repository
+
+ The installation using APT can be completed through several commands. Before installing. NET, please run the following command to add the Microsoft package signing key to the trusted key list and add the package repository.
+
+ Open the terminal and run the following command:
+
+```bash
+wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb
+sudo dpkg -i packages-microsoft-prod.deb
+rm packages-microsoft-prod.deb
+```
+
+ The following figure shows the output of the console after entering the above command:
+
+
+
+### 2. Install SDK
+
+ The. NET SDK allows you to develop applications through. NET. If you install the. NET SDK, you do not need to install the corresponding runtime. To install the. NET SDK, run the following command:
+
+```bash
+sudo apt-get update
+sudo apt-get install -y dotnet-sdk-3.1
+```
+
+ The following figure shows the output of the console after entering the above command:
+
+
+
+
+### 3. Test installation
+
+ You can check the SDK version and runtime version through the command line.
+
+```
+dotnet --list-sdks
+dotnet --list-runtimes
+```
+
+ The following figure shows the output of the console after entering the above command:
+
+
+
+ The above are the configuration steps for the. NET environment. If your environment does not match this article, you can obtain more installation steps through [.NET documentation | Microsoft Learn](https://learn.microsoft.com/en-us/dotnet/).
+
+## Ⅲ. Install OpenVINO Runtime
+
+ OpenVINO™ have two installation methods: OpenVINO Runtime and OpenVINO Development Tools. The OpenVINO Runtime contains a core library for running model deployment inference on processor devices. OpenVINO Development Tools is a set of tools used to process OpenVINO and OpenVINO models, including model optimizer, OpenVINO runtime, model downloader, and more. We only need to install OpenVINO Runtime here.
+
+### 1. Download OpenVINO Runtime
+
+ Visit the [Download the Intel Distribution of OpenVINO Toolkit](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/download.html?ENVIRONMENT=DEV_TOOLS&OP_SYSTEM=WINDOWS&VERSION=v_2023_0_1&DISTRIBUTION=PIP) page and follow the process below to select the corresponding installation options. On the download page, as our device is using **Ubuntu 20.04 **, download according to the specified compiled version.
+
+
+
+### 2. Unzip installation package
+
+ The OpenVINO Runtime we downloaded is essentially a C++dependency package, so we placed it in our system directory so that dependencies can be obtained during compilation based on the set system variables. First, create a folder under the system folder:
+
+```bash
+sudo mkdir -p /opt/intel
+```
+
+ Then extract the installation files we downloaded and move them to the specified folder:
+
+```bash
+tar -xvzf l_openvino_toolkit_ubuntu20_2023.0.1.11005.fa1c41994f3_x86_64.tgz
+sudo mv l_openvino_toolkit_ubuntu20_2023.0.1.11005.fa1c41994f3_x86_64 /opt/intel/openvino_2022.3.0
+```
+
+### 3. Installation dependencies
+
+ Next, we need to install the dependencies required by the OpenVINO Runtime. Enter the following command from the command line:
+
+```bash
+cd /opt/intel/openvino_2022.3.0/
+sudo -E ./install_dependencies/install_openvino_dependencies.sh
+```
+
+
+
+### 4. Configure environment variables
+
+ After the installation is completed, we need to configure the environment variables to ensure that the system can obtain the corresponding files when calling. Enter the following command from the command line:
+
+```bash
+source /opt/intel/openvino_2022.3.0/setupvars.sh
+```
+
+ The above are the configuration steps for the OpenVINO Runtime environment. If your environment does not match this article, you can obtain more installation steps through [Install OpenVINO™ Runtime — OpenVINO™ documentation — Version(2023.0)](https://docs.openvino.ai/2023.0/openvino_docs_install_guides_install_runtime.html).
+
+### 5. Add OpenVINO™ C# API Dependency
+
+ Due to the fact that OpenVINO™ C# API is currently in the development phase and has not yet created a Linux version of NuGet Package,
+
+- **Download source code**
+
+ Due to OpenVINO ™ C # API is currently in the development stage and has not yet created a Linux version of NuGet Package. Therefore, it needs to be used by downloading the project source code as a project reference.
+
+ ```
+ git clone https://github.com/guojin-yan/OpenVINO-CSharp-API.git
+ cd OpenVINO-CSharp-API
+ ```
+
+
+
+- **Modify OpenVINO ™ Dependency**
+
+ Due to the OpenVINO™ dependency of the project source code being different from the settings in this article, it is necessary to modify the path of the OpenVINO™ dependency, mainly by modifying the``OpenVINO-CSharp-API/src/CSharpAPI/native_methods/ov_base.cs``. The modifications are as follows:
+
+ ```
+ private const string dll_extern = "./openvino2023.0/openvino_c.dll";
+ ---Modify to--->
+ private const string dll_extern = "libopenvino_c.so";
+ ```
+
+- **Add Project Dependency**
+
+ Enter the following command in Terminal to add OpenVINO™ C# API to AlxBoard_ Deploy_ Yolov8 project reference.
+
+ ```
+ dotnet add reference ./../OpenVINO-CSharp-API/src/CSharpAPI/CSharpAPI.csproj
+ ```
+
diff --git a/modules/csharp_api/docs/en/windows_install.md b/modules/csharp_api/docs/en/windows_install.md
new file mode 100644
index 000000000..acc8a225f
--- /dev/null
+++ b/modules/csharp_api/docs/en/windows_install.md
@@ -0,0 +1,32 @@
+# Windows Installation OpenVINO™ C# API
+
+ OpenVINO™ C# API is mainly based on OpenVINO™ Developed with C #, supports Windows 10/11 version, and has been tested under x64 architecture.
+
+## C# Environmental Configuration
+
+ C# is a new programming language that is not only object-oriented, but also type safe. Developers can use C # to generate multiple secure and reliable applications running in. NET. The C# environment installation can be configured according to the following article.
+
+- [Install .NET on Windows - .NET | Microsoft Learn](https://learn.microsoft.com/en-us/dotnet/core/install/windows?tabs=net70)
+
+- [.NET Framework installation guide - .NET Framework | Microsoft Learn](https://learn.microsoft.com/en-us/dotnet/framework/install/)
+
+## OpenVINO™ C# API Installation
+
+ Due to the convenience of developing the C # language in the Windows environment, OpenVINO™ C# API's NuGet Package has been developed, which can be installed directly through the C # NuGet Package during use. When packaging NuGet Package, also include OpenVINO ™ The officially compiled Dongta Link Library file is packaged into the NuGet Package, so you only need to add OpenVINO™ C# API here to use it. The following demonstrates the installation under two different compilation methods:
+
+- **Visual Studio Platform**
+
+ The Visual Studio editor comes with the **NuGet Package ** management feature of C #, so it can be installed directly through the **NuGet Package **.
+
+- **dotnet**
+
+ Dotnet is a compilation platform for the C # language, which can quickly compile C # projects from the command line. If using dotnet compilation, OpenVINO™ C# API can be installed by:
+
+```
+dotnet add package OpenVINO.CSharp.win
+```
+
+ **Note: ** Currently, there may be issues with the installation and use of **. NET Framework version 4.8 **. Therefore, after the project is generated, it is necessary to move the **opencv_c.dll ** file from the openvino2023.0 folder in the program directory to the program directory, as shown in the figure:
+
+
+
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/Usings.cs b/modules/csharp_api/tests/csharp_api_unit_tests/Usings.cs
new file mode 100644
index 000000000..ab67c7ea9
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/Usings.cs
@@ -0,0 +1 @@
+global using Microsoft.VisualStudio.TestTools.UnitTesting;
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/base_test.cs b/modules/csharp_api/tests/csharp_api_unit_tests/base_test.cs
new file mode 100644
index 000000000..a91d1addf
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/base_test.cs
@@ -0,0 +1,92 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.Tests
+{
+ public class OVBaseTest
+ {
+ public class TestModelInfo
+ {
+ public string model_xml = "..\\..\\..\\..\\..\\tests\\test_data\\model\\yolov8\\yolov8s.xml";
+ public string model_bin = "..\\..\\..\\..\\..\\tests\\test_data\\model\\yolov8\\yolov8s.bin";
+ public string input_name = "images";
+ public string output_name = "output0";
+
+ public OvType input_type = new OvType(ElementType.F32);
+ public Shape input_shape = new Shape(new long[] { 1, 3, 640, 640 });
+
+ public OvType output_type = new OvType(ElementType.F16);
+ public Shape output_shape = new Shape(new long[] { 1, 84, 8400 });
+ }
+ TestModelInfo model_info = new TestModelInfo();
+
+ private string device = "CPU";
+ public string get_model_xml_file_name()
+ {
+ if (!File.Exists(model_info.model_xml))
+ {
+ Assert.Fail();
+ }
+ return model_info.model_xml;
+ }
+ public string get_model_bin_file_name()
+ {
+ if (!File.Exists(model_info.model_bin))
+ {
+ Assert.Fail();
+ }
+ return model_info.model_bin;
+ }
+ public string get_device()
+ {
+ return device;
+ }
+
+ public string model_input_name()
+ {
+ return model_info.input_name;
+ }
+ public string model_output_name()
+ {
+ return model_info.output_name;
+ }
+
+ public Shape model_input_shape()
+ {
+ return model_info.input_shape;
+ }
+
+ public OvType model_input_type()
+ {
+ return model_info.input_type;
+ }
+
+ public Shape model_output_shape()
+ {
+ return model_info.output_shape;
+ }
+
+ public OvType model_output_type()
+ {
+ return model_info.output_type;
+ }
+
+ public byte[] content_from_file(string file)
+ {
+ FileStream fs = new FileStream(get_model_bin_file_name(), FileMode.Open, FileAccess.Read);
+
+ long len = fs.Seek(0, SeekOrigin.End);
+
+
+ fs.Seek(0, SeekOrigin.Begin);
+
+ byte[] data = new byte[len + 1];
+
+ fs.Read(data, 0, (int)len);
+ return data;
+ }
+ }
+}
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/core/CompiledModelTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/core/CompiledModelTests.cs
new file mode 100644
index 000000000..7c7195862
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/core/CompiledModelTests.cs
@@ -0,0 +1,373 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.Tests
+{
+ [TestClass()]
+ public class CompiledModelTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void CompiledModel_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ compiled_model.Dispose();
+ Assert.IsTrue(compiled_model.Ptr == IntPtr.Zero);
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void create_infer_request_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_input_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Node input = compiled_model.get_input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ input.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_input_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Node input = compiled_model.get_input(model_input_name());
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ input.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_input_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Node input = compiled_model.get_input(0);
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ input.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_output_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Node output = compiled_model.get_output();
+ Assert.IsTrue(output.Ptr != IntPtr.Zero);
+ output.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_output_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Node output = compiled_model.get_output(model_output_name());
+ Assert.IsTrue(output.Ptr != IntPtr.Zero);
+ output.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_output_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Node output = compiled_model.get_output(0);
+ Assert.IsTrue(output.Ptr != IntPtr.Zero);
+ output.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_inputs_size_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ ulong size = compiled_model.get_inputs_size();
+ Assert.IsTrue(size > 0);
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_outputs_size_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ ulong size = compiled_model.get_outputs_size();
+ Assert.IsTrue(size > 0);
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void input_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Input input = compiled_model.input();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ input.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void input_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Input input = compiled_model.input(model_input_name());
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ input.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void input_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Input input = compiled_model.input(0);
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ input.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void output_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Output output = compiled_model.output();
+ Assert.IsTrue(output.get_node().Ptr != IntPtr.Zero);
+ output.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void output_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Output output = compiled_model.output(model_output_name());
+ Assert.IsTrue(output.get_node().Ptr != IntPtr.Zero);
+ output.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void output_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Output output = compiled_model.output(0);
+ Assert.IsTrue(output.get_node().Ptr != IntPtr.Zero);
+ output.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void inputs_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ List inputs = compiled_model.inputs();
+ Assert.IsTrue(inputs.Count > 0);
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void outputs_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ List outputs = compiled_model.outputs();
+ Assert.IsTrue(outputs.Count > 0);
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_runtime_model_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ Model runtime = compiled_model.get_runtime_model();
+ Assert.IsTrue(runtime.Ptr != IntPtr.Zero);
+ runtime.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void export_model_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model,get_device());
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ compiled_model.export_model("test_exported_model.blob");
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_property_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model, "BATCH:" + get_device() + "(4)");
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ KeyValuePair key = new KeyValuePair(PropertyKey.AUTO_BATCH_TIMEOUT.ToString(), "5000");
+ compiled_model.set_property(key);
+ string result = compiled_model.get_property("AUTO_BATCH_TIMEOUT");
+ Assert.AreEqual("5000", result);
+ }
+
+ [TestMethod()]
+ public void get_property_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model, get_device());
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ string result = compiled_model.get_property("SUPPORTED_PROPERTIES");
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_context_test()
+ {
+ Assert.Fail();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/core/CoreTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/core/CoreTests.cs
new file mode 100644
index 000000000..9d3de78d3
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/core/CoreTests.cs
@@ -0,0 +1,236 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.Tests
+{
+ [TestClass()]
+ public class CoreTests : OVBaseTest
+ {
+
+
+ [TestMethod()]
+ public void Core_test()
+ {
+ Core core = new Core();
+ Assert.IsTrue(core.Ptr != IntPtr.Zero);
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ Core core = new Core();
+ core.Dispose();
+ Assert.IsTrue(core.Ptr == IntPtr.Zero);
+ }
+
+ [TestMethod()]
+ public void get_versions_test()
+ {
+ var core = new Core();
+ KeyValuePair ver = core.get_versions(get_device());
+ Assert.IsNotNull(ver.Key, ver.Value.buildNumber, ver.Value.description);
+ }
+
+ [TestMethod()]
+ public void read_model_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ model.Dispose();
+ model = core.read_model(get_model_xml_file_name(), get_model_bin_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void read_model_test1()
+ {
+ byte[] data = content_from_file(get_model_bin_file_name());
+
+ Shape shape = new Shape(new List { 1, data.Length });
+ Tensor tensor = new Tensor(new element.Type(element.Type_t.u8), shape, data);
+
+ Core core = new Core();
+ Assert.IsTrue(core.Ptr != IntPtr.Zero);
+ Model model = core.read_model(get_model_xml_file_name(), tensor);
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ }
+
+ [TestMethod()]
+ public void compile_model_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled = core.compile_model(model);
+ Assert.IsTrue(compiled.Ptr != IntPtr.Zero);
+ compiled.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void compile_model_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+
+ Dictionary latency = new Dictionary();
+ latency.Add("PERFORMANCE_HINT", "1");
+
+ CompiledModel compiled = core.compile_model(model, get_device(), latency);
+ Assert.IsTrue(compiled.Ptr != IntPtr.Zero);
+ latency.Add("PERFORMANCE", "1");
+ compiled = core.compile_model(get_model_xml_file_name(), get_device(), latency);
+ Assert.IsTrue(compiled.Ptr != IntPtr.Zero);
+ compiled.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void compile_model_test2()
+ {
+ var core = new Core();
+ Dictionary latency = new Dictionary();
+ latency.Add("PERFORMANCE_HINT", "1");
+ CompiledModel compiled = core.compile_model(get_model_xml_file_name(), latency);
+ Assert.IsTrue(compiled.Ptr != IntPtr.Zero);
+ latency.Add("PERFORMANCE", "1");
+ compiled = core.compile_model(get_model_xml_file_name(), get_device(), latency);
+ Assert.IsTrue(compiled.Ptr != IntPtr.Zero);
+ compiled.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void compile_model_test3()
+ {
+ var core = new Core();
+ Dictionary latency = new Dictionary();
+ latency.Add("PERFORMANCE_HINT", "1");
+ CompiledModel compiled = core.compile_model(get_model_xml_file_name(), get_device(), latency);
+ Assert.IsTrue(compiled.Ptr != IntPtr.Zero);
+ latency.Add("PERFORMANCE", "1");
+ compiled = core.compile_model(get_model_xml_file_name(), get_device(), latency);
+ Assert.IsTrue(compiled.Ptr != IntPtr.Zero);
+ compiled.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_available_devices_test()
+ {
+ var core = new Core();
+ List devicces = core.get_available_devices();
+ Assert.IsNotNull(devicces);
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_property_enum_Test()
+ {
+ var core = new Core();
+ KeyValuePair key = new KeyValuePair(PropertyKey.LOG_LEVEL.ToString(), "WARNING");
+ core.set_property(get_device(), key);
+ core.Dispose();
+ }
+ //[TestMethod()]
+ //public void set_property_invalid_number_property_arguments_Test()
+ //{
+ // var core = new Core();
+ // Dictionary dict = new Dictionary();
+ // dict.Add(PropertyKey.INFERENCE_NUM_THREADS.ToString(), "12");
+ // dict.Add(PropertyKey.NUM_STREAMS.ToString(), "7");
+ // core.set_property(get_device(), dict);
+ // string s = core.get_property(get_device(), PropertyKey.INFERENCE_NUM_THREADS);
+ // Assert.AreEqual("12", s);
+ // s = core.get_property(get_device(), PropertyKey.NUM_STREAMS);
+ // Assert.AreEqual("7", s);
+ // core.Dispose();
+ //}
+
+ [TestMethod()]
+ public void set_property_enum_invalid_Test()
+ {
+ var core = new Core();
+ KeyValuePair key = new KeyValuePair(PropertyKey.PERFORMANCE_HINT.ToString(), "LATENCY");
+ core.set_property(get_device(), key);
+ string s = core.get_property(get_device(), PropertyKey.PERFORMANCE_HINT);
+ Assert.AreEqual("LATENCY", s);
+
+ //key = new KeyValuePair(PropertyKey.PERFORMANCE_HINT.ToString(), "LATENCY_TEST");
+ //core.set_property(get_device(), key);
+ //s = core.get_property(get_device(), PropertyKey.PERFORMANCE_HINT);
+ //Assert.AreEqual("LATENCY_TEST", s);
+
+ key = new KeyValuePair(PropertyKey.ENABLE_CPU_PINNING.ToString(), "YES");
+ core.set_property(get_device(), key);
+ s = core.get_property(get_device(), PropertyKey.ENABLE_CPU_PINNING);
+ Assert.AreEqual("YES", s);
+
+ //key = new KeyValuePair(PropertyKey.ENABLE_CPU_PINNING.ToString(), "INVALID_VAL");
+ //core.set_property(get_device(), key);
+ //s = core.get_property(get_device(), PropertyKey.ENABLE_CPU_PINNING);
+ //Assert.AreEqual("INVALID_VAL", s);
+
+ key = new KeyValuePair(PropertyKey.SCHEDULING_CORE_TYPE.ToString(), "PCORE_ONLY");
+ core.set_property(get_device(), key);
+ s = core.get_property(get_device(), PropertyKey.SCHEDULING_CORE_TYPE);
+ Assert.AreEqual("PCORE_ONLY", s);
+
+ //key = new KeyValuePair(PropertyKey.SCHEDULING_CORE_TYPE.ToString(), "INVALID_VAL");
+ //core.set_property(get_device(), key);
+ //s = core.get_property(get_device(), PropertyKey.SCHEDULING_CORE_TYPE);
+ //Assert.AreEqual("INVALID_VAL", s);
+
+ key = new KeyValuePair(PropertyKey.ENABLE_HYPER_THREADING.ToString(), "YES");
+ core.set_property(get_device(), key);
+ s = core.get_property(get_device(), PropertyKey.ENABLE_HYPER_THREADING);
+ Assert.AreEqual("YES", s);
+
+ //key = new KeyValuePair(PropertyKey.ENABLE_HYPER_THREADING.ToString(), "INVALID_VAL");
+ //core.set_property(get_device(), key);
+ //s = core.get_property(get_device(), PropertyKey.ENABLE_HYPER_THREADING);
+ //Assert.AreEqual("INVALID_VAL", s);
+
+ core.Dispose();
+ }
+
+
+
+ [TestMethod()]
+ public void get_propertyTest()
+ {
+ var core = new Core();
+ core.set_property(get_device(), Ov.cache_dir("./model"));
+ string s = core.get_property(get_device(), PropertyKey.CACHE_DIR);
+ Assert.IsNotNull(s);
+ }
+
+ [TestMethod()]
+ public void set_propertyTest1()
+ {
+ var core = new Core();
+ Dictionary dict = new Dictionary();
+ dict.Add(Ov.cache_dir("./model").Key, Ov.cache_dir("./model").Value);
+ core.set_property(get_device(), dict);
+ string s = core.get_property(get_device(), PropertyKey.CACHE_DIR);
+ Assert.IsNotNull(s);
+ }
+
+ [TestMethod()]
+ public void import_modelTest()
+ {
+ Assert.Fail();
+ }
+ }
+}
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/core/InferRequestTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/core/InferRequestTests.cs
new file mode 100644
index 000000000..71613941d
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/core/InferRequestTests.cs
@@ -0,0 +1,519 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.Tests
+{
+ [TestClass()]
+ public class InferRequestTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void InferRequestTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest request = compiled_model.create_infer_request();
+ Assert.IsTrue(request.Ptr != IntPtr.Zero);
+ request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void DisposeTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest request = compiled_model.create_infer_request();
+ Assert.IsTrue(request.Ptr != IntPtr.Zero);
+ request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_tensorTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_input_shape().data_size()];
+ data[1] = 15.62f;
+ Tensor input_tensor = new Tensor(model_input_shape(), data);
+ float[] d = input_tensor.get_data((int)model_input_shape().data_size());
+ infer_request.set_tensor(model_input_name(), input_tensor);
+ input_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_tensorTest1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_input_shape().data_size()];
+ Node node = model.get_input();
+ Tensor input_tensor = new Tensor(model_input_shape(), data);
+ infer_request.set_tensor(node, input_tensor);
+ input_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_tensorTest2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_input_shape().data_size()];
+ Input node = model.input();
+ Tensor input_tensor = new Tensor(model_input_shape(), data);
+ infer_request.set_tensor(node, input_tensor);
+ input_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_tensorTest3()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_input_shape().data_size()];
+ Node node = model.get_input();
+ Output node_output = new Output(node);
+ Tensor input_tensor = new Tensor(model_input_shape(), data);
+ infer_request.set_tensor(node_output, input_tensor);
+ input_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+ [TestMethod()]
+ public void set_input_tensorTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_input_shape().data_size()];
+ Tensor input_tensor = new Tensor(model_input_shape(), data);
+ infer_request.set_input_tensor(0, input_tensor);
+ input_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_input_tensorTest1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_input_shape().data_size()];
+ Tensor input_tensor = new Tensor(model_input_shape(), data);
+ infer_request.set_input_tensor(input_tensor);
+ input_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_output_tensorTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_output_shape().data_size()];
+ Tensor output_tensor = new Tensor(model_output_shape(), data);
+ infer_request.set_output_tensor(0, output_tensor);
+ output_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_output_tensorTest1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_output_shape().data_size()];
+ Tensor output_tensor = new Tensor(model_output_shape(), data);
+ infer_request.set_output_tensor(output_tensor);
+ output_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_tensorTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+
+ Tensor tensor = infer_request.get_tensor(model_input_name());
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_tensorTest1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ Node node = model.get_input();
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ Tensor tensor = infer_request.get_tensor(node);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ node.Dispose();
+ tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_tensorTest2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ Input node = model.input();
+ Assert.IsTrue(node.get_node().Ptr != IntPtr.Zero);
+ Tensor tensor = infer_request.get_tensor(node);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ node.Dispose();
+ tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_tensorTest3()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ Output node = model.output();
+ Assert.IsTrue(node.get_node().Ptr != IntPtr.Zero);
+ Tensor tensor = infer_request.get_tensor(node);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ node.Dispose();
+ tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_input_tensorTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ Tensor tensor = infer_request.get_input_tensor(0);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_input_tensorTest1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ Tensor tensor = infer_request.get_input_tensor();
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_output_tensorTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ Tensor tensor = infer_request.get_output_tensor(0);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_output_tensorTest1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ Tensor tensor = infer_request.get_output_tensor();
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void inferTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_input_shape().data_size()];
+ Tensor input_tensor = new Tensor(model_input_shape(), data);
+ infer_request.set_tensor(model_input_name(), input_tensor);
+ infer_request.infer();
+ input_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void cancelTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_input_shape().data_size()];
+ Tensor input_tensor = new Tensor(model_input_shape(), data);
+ infer_request.set_tensor(model_input_name(), input_tensor);
+ infer_request.cancel();
+ input_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void start_asyncTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_input_shape().data_size()];
+ Tensor input_tensor = new Tensor(model_input_shape(), data);
+ infer_request.set_tensor(model_input_name(), input_tensor);
+ infer_request.start_async();
+ infer_request.wait();
+
+ Tensor tensor = infer_request.get_output_tensor();
+
+ tensor.Dispose();
+ input_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void waitTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_input_shape().data_size()];
+ Tensor input_tensor = new Tensor(model_input_shape(), data);
+ infer_request.set_tensor(model_input_name(), input_tensor);
+ infer_request.start_async();
+ infer_request.wait();
+
+ Tensor tensor = infer_request.get_output_tensor();
+
+ tensor.Dispose();
+ input_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void wait_forTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model);
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_input_shape().data_size()];
+ Tensor input_tensor = new Tensor(model_input_shape(), data);
+ infer_request.set_tensor(model_input_name(), input_tensor);
+ infer_request.start_async();
+ infer_request.wait_for(1000000);
+
+ Tensor tensor = infer_request.get_output_tensor();
+
+ tensor.Dispose();
+ input_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_profiling_infoTest()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ CompiledModel compiled_model = core.compile_model(model,"CPU");
+ Assert.IsTrue(compiled_model.Ptr != IntPtr.Zero);
+ InferRequest infer_request = compiled_model.create_infer_request();
+ Assert.IsTrue(infer_request.Ptr != IntPtr.Zero);
+ float[] data = new float[model_input_shape().data_size()];
+ Tensor input_tensor = new Tensor(model_input_shape(), data);
+ infer_request.set_tensor(model_input_name(), input_tensor);
+ infer_request.infer();
+ Tensor tensor = infer_request.get_output_tensor();
+ float[] data1 = tensor.get_data((int)tensor.get_size());
+ List pro = infer_request.get_profiling_info();
+ Assert.IsTrue (pro.Count > 0);
+ input_tensor.Dispose();
+ infer_request.Dispose();
+ compiled_model.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/core/InputTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/core/InputTests.cs
new file mode 100644
index 000000000..65fa22069
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/core/InputTests.cs
@@ -0,0 +1,115 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.Tests
+{
+ [TestClass()]
+ public class InputTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void Input_test()
+ {
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+
+ }
+
+ [TestMethod()]
+ public void get_node_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Input input = model.input();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ Node node = input.get_node();
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_index_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Input input = model.input();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ ulong index = input.get_index();
+ Assert.IsNotNull(index);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_element_type_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Input input = model.input();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ OvType type = input.get_element_type();
+ Assert.IsNotNull(type);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_shape_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Input input = model.input();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ Shape shape = input.get_shape();
+ Assert.IsNotNull(shape);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_any_name_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Input input = model.input();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ string name = input.get_any_name();
+ Assert.IsNotNull(name);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_partial_shape_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Input input = model.input();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ PartialShape shape = input.get_partial_shape();
+ Assert.IsNotNull(shape);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/core/ModelTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/core/ModelTests.cs
new file mode 100644
index 000000000..9d5d25acf
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/core/ModelTests.cs
@@ -0,0 +1,544 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.Tests
+{
+ [TestClass()]
+ public class ModelTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void Model_test()
+ {
+ Assert.IsTrue(true);
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ model.Dispose();
+ Assert.IsTrue(model.Ptr == IntPtr.Zero);
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_friendly_name_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ string name = model.get_friendly_name();
+ Assert.IsTrue(name != "");
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_input_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_input();
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_input_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_input(0);
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_input_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_input(model_input_name());
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_output_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_output();
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_output_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_output(0);
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_output_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_output(model_output_name());
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_const_input_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_const_input();
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_const_input_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_const_input(0);
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_const_input_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_const_input(model_input_name());
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_const_output_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_const_output();
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_const_output_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ model.Dispose();
+ model = core.read_model(get_model_xml_file_name(),
+ get_model_bin_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_const_output(0);
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_const_output_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_const_output(model_output_name());
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void input_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Input input = model.input();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void input_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Input input = model.input(0);
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void input_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Input input = model.input(model_input_name());
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void const_input_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Input input = model.const_input();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void const_input_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Input input = model.const_input(0);
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void const_input_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Input input = model.const_input(model_input_name());
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void output_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Output output = model.output();
+ Assert.IsTrue(output.get_node().Ptr != IntPtr.Zero);
+ output.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void output_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Output output = model.output(0);
+ Assert.IsTrue(output.get_node().Ptr != IntPtr.Zero);
+ output.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void output_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Output output = model.output(model_output_name());
+ Assert.IsTrue(output.get_node().Ptr != IntPtr.Zero);
+ output.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void const_output_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Output output = model.const_output();
+ Assert.IsTrue(output.get_node().Ptr != IntPtr.Zero);
+ output.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void const_output_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Output output = model.const_output(0);
+ Assert.IsTrue(output.get_node().Ptr != IntPtr.Zero);
+ output.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void const_output_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Output output = model.const_output(model_output_name());
+ Assert.IsTrue(output.get_node().Ptr != IntPtr.Zero);
+ output.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_inputs_size_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ ulong size = model.get_inputs_size();
+ Assert.IsTrue(size > 0);
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_outputs_size_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ ulong size = model.get_outputs_size();
+ Assert.IsTrue(size > 0);
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void inputs_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ ulong size = model.get_inputs_size();
+ Assert.IsTrue(size > 0);
+ List inputs = model.inputs();
+ Assert.IsTrue(inputs.Count == (int)size);
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void outputs_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ ulong size = model.get_outputs_size();
+ Assert.IsTrue(size > 0);
+ List outputs = model.outputs();
+ Assert.IsTrue(outputs.Count == (int)size);
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void const_inputs_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ ulong size = model.get_inputs_size();
+ Assert.IsTrue(size > 0);
+ List inputs = model.const_inputs();
+ Assert.IsTrue(inputs.Count == (int)size);
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void const_outputs_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ ulong size = model.get_outputs_size();
+ Assert.IsTrue(size > 0);
+ List outputs = model.const_outputs();
+ Assert.IsTrue(outputs.Count == (int)size);
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void is_dynamic_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ model.Dispose();
+ model = core.read_model(get_model_xml_file_name(), get_model_bin_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ bool flag = model.is_dynamic();
+ Assert.IsTrue(!flag);
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void reshape_test()
+ {
+ Shape shape = new Shape(new long[4] { 1, 3, 640, 640 });
+
+ PartialShape partial = new PartialShape(shape);
+
+ Dictionary pairs = new Dictionary();
+
+ Assert.IsTrue(partial.get_partial_shape().rank.max == 4);
+ pairs.Add(model_input_name(), partial);
+
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ model.reshape(pairs);
+
+ PartialShape shape1 = model.get_input().get_partial_shape();
+
+ model.Dispose();
+ core.Dispose();
+
+ }
+
+ [TestMethod()]
+ public void reshape_test1()
+ {
+ Shape shape = new Shape(new long[4] { 1, 3, 640, 640 });
+ PartialShape partial = new PartialShape(shape);
+ Assert.IsTrue(partial.get_partial_shape().rank.max == 4);
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ model.reshape(partial);
+ PartialShape shape1 = model.get_input().get_partial_shape();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void reshape_test2()
+ {
+ Shape shape = new Shape(new long[4] { 1, 3, 640, 640 });
+
+ PartialShape partial = new PartialShape(shape);
+
+ Dictionary pairs = new Dictionary();
+
+ Assert.IsTrue(partial.get_partial_shape().rank.max == 4);
+ pairs.Add(0, partial);
+
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ model.reshape(pairs);
+
+ PartialShape shape1 = model.get_input().get_partial_shape();
+
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void reshape_test3()
+ {
+ Shape shape = new Shape(new long[4] { 1, 3, 640, 640 });
+
+ PartialShape partial = new PartialShape(shape);
+
+ Dictionary pairs = new Dictionary();
+
+ Assert.IsTrue(partial.get_partial_shape().rank.max == 4);
+
+
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+
+ Node input = model.get_input();
+ pairs.Add(input, partial);
+
+ model.reshape(pairs);
+
+ PartialShape shape1 = model.get_input().get_partial_shape();
+
+ model.Dispose();
+ core.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/core/NodeTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/core/NodeTests.cs
new file mode 100644
index 000000000..a9b3d0ab9
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/core/NodeTests.cs
@@ -0,0 +1,84 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.Tests
+{
+ [TestClass()]
+ public class NodeTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void Node_test()
+ {
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ }
+
+ [TestMethod()]
+ public void get_shape_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_const_input(model_input_name());
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ Shape shape = node.get_shape();
+ Assert.IsNotNull(shape);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_partial_shape_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_const_input(model_input_name());
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ PartialShape shape = node.get_partial_shape();
+ Assert.IsNotNull(shape);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_name_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_const_input(model_input_name());
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ string name = node.get_name();
+ Assert.IsNotNull(name);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_element_type_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Node node = model.get_const_input(model_input_name());
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ OvType type = node.get_element_type();
+ Assert.IsNotNull(type);
+ node.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/core/OutputTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/core/OutputTests.cs
new file mode 100644
index 000000000..ceead7ac6
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/core/OutputTests.cs
@@ -0,0 +1,114 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.Tests
+{
+ [TestClass()]
+ public class OutputTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void Output_test()
+ {
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ }
+
+ [TestMethod()]
+ public void get_node_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Output input = model.output();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ Node node = input.get_node();
+ Assert.IsTrue(node.Ptr != IntPtr.Zero);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_index_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Output input = model.output();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ ulong index = input.get_index();
+ Assert.IsNotNull(index);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_element_type_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Output input = model.output();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ OvType type = input.get_element_type();
+ Assert.IsNotNull(type);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_shape_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Output input = model.output();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ Shape shape = input.get_shape();
+ Assert.IsNotNull(shape);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_any_name_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Output input = model.output();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ string name = input.get_any_name();
+ Assert.IsNotNull(name);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_partial_shape_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ Output input = model.output();
+ Assert.IsTrue(input.get_node().Ptr != IntPtr.Zero);
+ PartialShape shape = input.get_partial_shape();
+ Assert.IsNotNull(shape);
+ input.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/core/PartialShapeTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/core/PartialShapeTests.cs
new file mode 100644
index 000000000..ab9945ae6
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/core/PartialShapeTests.cs
@@ -0,0 +1,153 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.Tests
+{
+ [TestClass()]
+ public class PartialShapeTests
+ {
+ [TestMethod()]
+ public void PartialShape_test()
+ {
+ }
+
+ [TestMethod()]
+ public void PartialShape_test1()
+ {
+ Dimension[] dimensions = new Dimension[] { new Dimension(10), new Dimension(10), new Dimension(10) };
+ PartialShape shape = new PartialShape(dimensions);
+ Assert.IsNotNull(shape);
+ }
+
+ [TestMethod()]
+ public void PartialShape_test2()
+ {
+ List dimensions = new List { new Dimension(10), new Dimension(10), new Dimension(10) };
+ PartialShape shape = new PartialShape(dimensions);
+ Assert.IsNotNull(shape);
+ }
+
+ [TestMethod()]
+ public void PartialShape_test3()
+ {
+ Dimension rank = new Dimension(3);
+ Dimension[] dimensions = new Dimension[] { new Dimension(10), new Dimension(10), new Dimension(10) };
+ PartialShape shape = new PartialShape(rank, dimensions);
+ Assert.IsNotNull(shape);
+ }
+
+ [TestMethod()]
+ public void PartialShape_test4()
+ {
+ Dimension rank = new Dimension(3);
+ List dimensions = new List { new Dimension(10), new Dimension(10), new Dimension(10) };
+ PartialShape shape = new PartialShape(rank, dimensions);
+ Assert.IsNotNull(shape);
+ }
+
+ [TestMethod()]
+ public void PartialShape_test5()
+ {
+ long rank = 3;
+ long[] dimensions = new long[] { 10, 10, 10 };
+ PartialShape shape = new PartialShape(rank, dimensions);
+ Assert.IsNotNull(shape);
+ }
+
+ [TestMethod()]
+ public void PartialShape_test6()
+ {
+ long rank = 3;
+ List dimensions = new List { 10, 10, 10 };
+ PartialShape shape = new PartialShape(rank, dimensions);
+ Assert.IsNotNull(shape);
+ }
+
+ [TestMethod()]
+ public void PartialShape_test7()
+ {
+ Shape shape = new Shape(1,3,9);
+ PartialShape shape1 = new PartialShape(shape);
+ Assert.IsNotNull(shape1);
+ }
+
+ [TestMethod()]
+ public void get_partial_shape_test()
+ {
+ Shape shape = new Shape(1, 3, 9);
+ PartialShape shape1 = new PartialShape(shape);
+ Assert.IsNotNull(shape1);
+ Ov.ov_partial_shape ov_partial = shape1.get_partial_shape();
+ Assert.IsNotNull(ov_partial);
+ }
+
+ [TestMethod()]
+ public void get_rank_test()
+ {
+ long rank = 3;
+ long[] dimensions = new long[] { 10, 10, 10 };
+ PartialShape shape = new PartialShape(rank, dimensions);
+ Assert.IsNotNull(shape);
+ Dimension dimension = shape.get_rank();
+ Assert.IsNotNull(dimension);
+ }
+
+ [TestMethod()]
+ public void get_dimensions_test()
+ {
+ long rank = 3;
+ long[] dimensions = new long[] { 10, 10, 10 };
+ PartialShape shape = new PartialShape(rank, dimensions);
+ Assert.IsNotNull(shape);
+ Dimension[] dimension = shape.get_dimensions();
+ Assert.IsNotNull(dimension);
+ }
+
+ [TestMethod()]
+ public void to_shape_test()
+ {
+ long rank = 3;
+ long[] dimensions = new long[] { 10, 10, 10 };
+ PartialShape shape = new PartialShape(rank, dimensions);
+ Assert.IsNotNull(shape);
+ Shape shape1 = shape.to_shape();
+ Assert.IsNotNull(shape1);
+ }
+
+ [TestMethod()]
+ public void is_static_test()
+ {
+ long rank = 3;
+ long[] dimensions = new long[] { 10, 10, 10 };
+ PartialShape shape = new PartialShape(rank, dimensions);
+ Assert.IsNotNull(shape);
+ shape.is_static();
+ }
+
+ [TestMethod()]
+ public void is_dynamic_test()
+ {
+ long rank = 3;
+ long[] dimensions = new long[] { 10, 10, 10 };
+ PartialShape shape = new PartialShape(rank, dimensions);
+ Assert.IsNotNull(shape);
+ shape.is_dynamic();
+ }
+
+ [TestMethod()]
+ public void to_string_test()
+ {
+ long rank = 3;
+ long[] dimensions = new long[] { 10, 10, 10 };
+ PartialShape shape = new PartialShape(rank, dimensions);
+ Assert.IsNotNull(shape);
+ string msg = shape.to_string();
+ Assert.IsNotNull(msg);
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/core/ShapeTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/core/ShapeTests.cs
new file mode 100644
index 000000000..a44027e51
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/core/ShapeTests.cs
@@ -0,0 +1,67 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.Tests
+{
+ [TestClass()]
+ public class ShapeTests
+ {
+ [TestMethod()]
+ public void Shape_test()
+ {
+ }
+
+ [TestMethod()]
+ public void Shape_test1()
+ {
+ List data = new List() { 1, 2, 3 };
+ Shape shape = new Shape(data);
+ shape.Dispose();
+ }
+
+ [TestMethod()]
+ public void Shape_test2()
+ {
+ long[] data = new long[] { 1, 2, 3 };
+ Shape shape = new Shape(data);
+ shape.Dispose();
+ }
+
+ [TestMethod()]
+ public void Shape_test3()
+ {
+ Shape shape = new Shape(1,2,9);
+ shape.Dispose();
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ Shape shape = new Shape(1, 2, 9);
+ shape.Dispose();
+ }
+
+ [TestMethod()]
+ public void to_string_test()
+ {
+ Shape shape = new Shape(1, 2, 9);
+ string msg = shape.to_string();
+ Assert.IsNotNull(msg);
+ shape.Dispose();
+ }
+
+ [TestMethod()]
+ public void data_size_test()
+ {
+ Shape shape = new Shape(1, 2, 9);
+ long size = shape.data_size();
+ Assert.IsTrue(size == 18);
+ shape.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/core/TensorTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/core/TensorTests.cs
new file mode 100644
index 000000000..e0d021d7b
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/core/TensorTests.cs
@@ -0,0 +1,258 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.InteropServices;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.Tests
+{
+ [TestClass()]
+ public class TensorTests
+ {
+ [TestMethod()]
+ public void Tensor_test()
+ {
+ }
+
+ [TestMethod()]
+ public void Tensor_test1()
+ {
+ }
+
+ [TestMethod()]
+ public void Tensor_test2()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void Tensor_test3()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ double[] data = new double[6];
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void Tensor_test4()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ int[] data = new int[6];
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void Tensor_test5()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ short[] data = new short[6];
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void Tensor_test6()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ long[] data = new long[6];
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void Tensor_test7()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ Tensor tensor = new Tensor(new element.Type(ElementType.F32), shape, Marshal.UnsafeAddrOfPinnedArrayElement(data, 0));
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void Tensor_test8()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ Tensor tensor = new Tensor(new element.Type(ElementType.F32), shape);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void Tensor_test9()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ Tensor tensor1 = new Tensor(tensor);
+ Assert.IsTrue(tensor1.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ tensor1.Dispose();
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ }
+
+ [TestMethod()]
+ public void set_shape_test()
+ {
+ Shape shape = new Shape(1, 2, 80);
+ float[] data = new float[6];
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ Shape new_shape = new Shape(1, 2, 15);
+ tensor.set_shape(new_shape);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_shape_test()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ Shape new_shape = tensor.get_shape();
+ Assert.IsTrue(shape.Count == new_shape.Count);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_element_type_test()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ OvType type = tensor.get_element_type();
+ Assert.IsTrue((int)type.get_type()!=100);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_size_test()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ ulong size = tensor.get_size();
+ Assert.IsTrue(size > 0);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_byte_size_test()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ ulong size = tensor.get_byte_size();
+ Assert.IsTrue(size > 0);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void copy_to_test()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ data[0] = 0.6f;
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ Tensor new_tensor = new Tensor(shape, data);
+ tensor.copy_to(new_tensor);
+ float[] new_data = new_tensor.get_data((int)new_tensor.get_size());
+ Assert.IsTrue(new_data[0] == 0.6f);
+ new_tensor.Dispose();
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void data_test()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ data[0] = 0.6f;
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ IntPtr ptr = tensor.data();
+ Assert.IsTrue(ptr != IntPtr.Zero);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_data_test()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ data[0] = 0.6f;
+ Tensor tensor = new Tensor(new OvType(ElementType.F32), shape);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.set_data(data);
+ float[] new_data = tensor.get_data((int)tensor.get_size());
+ Assert.IsTrue(new_data[0] == 0.6f);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void get_data_test()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ data[0] = 0.6f;
+ Tensor tensor = new Tensor(shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ float[] new_data = tensor.get_data((int)tensor.get_size());
+ Assert.IsTrue(new_data[0] == 0.6f);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void Tensor_test10()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ Tensor tensor = new Tensor(new OvType(ElementType.F32), shape, Marshal.UnsafeAddrOfPinnedArrayElement(data, 0));
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void Tensor_test11()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ float[] data = new float[6];
+ Tensor tensor = new Tensor(new OvType(ElementType.F32), shape);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ }
+
+ [TestMethod()]
+ public void Tensor_test12()
+ {
+ Shape shape = new Shape(1, 2, 3);
+ byte[] data = new byte[6];
+ Tensor tensor = new Tensor(new OvType(ElementType.F32), shape, data);
+ Assert.IsTrue(tensor.Ptr != IntPtr.Zero);
+ tensor.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/csharp_api_unit_tests.csproj b/modules/csharp_api/tests/csharp_api_unit_tests/csharp_api_unit_tests.csproj
new file mode 100644
index 000000000..9c55d5ee4
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/csharp_api_unit_tests.csproj
@@ -0,0 +1,23 @@
+
+
+
+ net6.0
+ enable
+ enable
+
+ false
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/ov/OvTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/ov/OvTests.cs
new file mode 100644
index 000000000..0700a4721
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/ov/OvTests.cs
@@ -0,0 +1,28 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.Tests
+{
+ [TestClass()]
+ public class OvTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void get_openvino_version_test()
+ {
+ Version version = Ov.get_openvino_version();
+ Assert.IsNotNull(version);
+ }
+
+ [TestMethod()]
+ public void content_from_file_test()
+ {
+ byte[] data = Ov.content_from_file(get_model_bin_file_name());
+ Assert.IsTrue(data.Length>0);
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/InputInfoTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/InputInfoTests.cs
new file mode 100644
index 000000000..a267e8de9
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/InputInfoTests.cs
@@ -0,0 +1,82 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp.preprocess;
+using OpenVinoSharp.Tests;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess.Tests
+{
+ [TestClass()]
+ public class InputInfoTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void InputInfo_test()
+ {
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ }
+
+ [TestMethod()]
+ public void tensor_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ InputTensorInfo input_tensor = input.tensor();
+ Assert.IsTrue(input_tensor.Ptr != IntPtr.Zero);
+ input_tensor.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void preprocess_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ PreProcessSteps process_steps = input.preprocess();
+ Assert.IsTrue(process_steps.Ptr != IntPtr.Zero);
+ process_steps.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void model_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ InputModelInfo model_info = input.model();
+ Assert.IsTrue(model_info.Ptr != IntPtr.Zero);
+ model_info.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/InputModelInfoTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/InputModelInfoTests.cs
new file mode 100644
index 000000000..4c2df4aea
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/InputModelInfoTests.cs
@@ -0,0 +1,45 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp.preprocess;
+using OpenVinoSharp.Tests;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess.Tests
+{
+ [TestClass()]
+ public class InputModelInfoTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void InputModelInfo_test()
+ {
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ }
+
+ [TestMethod()]
+ public void set_layout_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ InputModelInfo model_info = input.model();
+ Assert.IsTrue(model_info.Ptr != IntPtr.Zero);
+ model_info.set_layout(new Layout("NCHW"));
+ model_info.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/InputTensorInfoTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/InputTensorInfoTests.cs
new file mode 100644
index 000000000..8560208c6
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/InputTensorInfoTests.cs
@@ -0,0 +1,150 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp.preprocess;
+using OpenVinoSharp.Tests;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess.Tests
+{
+ [TestClass()]
+ public class InputTensorInfoTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void InputTensorInfo_test()
+ {
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ }
+
+ [TestMethod()]
+ public void set_color_format_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ InputTensorInfo input_tensor = input.tensor();
+ Assert.IsTrue(input_tensor.Ptr != IntPtr.Zero);
+ input_tensor.set_color_format(ColorFormat.NV12_SINGLE_PLANE);
+ input_tensor.set_color_format(ColorFormat.NV12_TWO_PLANES, "y", "uv");
+ input_tensor.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_element_type_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ InputTensorInfo input_tensor = input.tensor();
+ Assert.IsTrue(input_tensor.Ptr != IntPtr.Zero);
+ input_tensor.set_element_type(new OvType(ElementType.F32));
+ input_tensor.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_spatial_static_shape_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ InputTensorInfo input_tensor = input.tensor();
+ Assert.IsTrue(input_tensor.Ptr != IntPtr.Zero);
+ input_tensor.set_spatial_static_shape(100, 100);
+ input_tensor.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_memory_type_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ InputTensorInfo input_tensor = input.tensor();
+ Assert.IsTrue(input_tensor.Ptr != IntPtr.Zero);
+ input_tensor.set_memory_type("GPU_SURFACE");
+ input_tensor.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_layout_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ InputTensorInfo input_tensor = input.tensor();
+ Assert.IsTrue(input_tensor.Ptr != IntPtr.Zero);
+ input_tensor.set_layout(new Layout("NCHW"));
+ input_tensor.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void set_from_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ InputTensorInfo input_tensor = input.tensor();
+ Assert.IsTrue(input_tensor.Ptr != IntPtr.Zero);
+
+ Shape shape = new Shape(1, 2, 3);
+ Tensor tensor = new Tensor(new element.Type(ElementType.F32), shape);
+
+ input_tensor.set_from(tensor);
+ input_tensor.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/OutputInfoTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/OutputInfoTests.cs
new file mode 100644
index 000000000..85c585c9a
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/OutputInfoTests.cs
@@ -0,0 +1,44 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp.preprocess;
+using OpenVinoSharp.Tests;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess.Tests
+{
+ [TestClass()]
+ public class OutputInfoTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void OutputInfo_test()
+ {
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ }
+
+ [TestMethod()]
+ public void tensor_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ OutputInfo output = processor.output();
+ Assert.IsTrue(output.Ptr != IntPtr.Zero);
+ OutputTensorInfo tensor_info = output.tensor();
+ Assert.IsTrue(tensor_info.Ptr != IntPtr.Zero);
+ tensor_info.Dispose();
+ output.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/OutputTensorInfoTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/OutputTensorInfoTests.cs
new file mode 100644
index 000000000..06950c132
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/OutputTensorInfoTests.cs
@@ -0,0 +1,42 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp.preprocess;
+using OpenVinoSharp.Tests;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess.Tests
+{
+ [TestClass()]
+ public class OutputTensorInfoTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void OutputTensorInfo_test()
+ {
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ }
+
+ [TestMethod()]
+ public void set_element_type_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ OutputInfo output = processor.output();
+ Assert.IsNotNull(output);
+ OutputTensorInfo output_tensor = output.tensor();
+ Assert.IsNotNull(output_tensor);
+ output_tensor.set_element_type(new OvType(ElementType.F32));
+ output.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/PrePostProcessorTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/PrePostProcessorTests.cs
new file mode 100644
index 000000000..a04b3ef47
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/PrePostProcessorTests.cs
@@ -0,0 +1,132 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp.preprocess;
+using OpenVinoSharp.Tests;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess.Tests
+{
+ [TestClass()]
+ public class PrePostProcessorTests : OVBaseTest
+ {
+ [TestMethod()]
+ public void PrePostProcessor_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ processor.Dispose();
+ model.Dispose();
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ }
+
+ [TestMethod()]
+ public void input_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ InputInfo input = processor.input();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ }
+
+ [TestMethod()]
+ public void input_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ InputInfo input = processor.input(model_input_name());
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ }
+
+ [TestMethod()]
+ public void input_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ InputInfo input = processor.input(0);
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ }
+
+ [TestMethod()]
+ public void output_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ OutputInfo input = processor.output();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ }
+
+ [TestMethod()]
+ public void output_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ OutputInfo input = processor.output(model_output_name());
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ }
+
+ [TestMethod()]
+ public void output_test2()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ OutputInfo input = processor.output(0);
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ }
+
+ [TestMethod()]
+ public void build_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+
+ Tensor input_tensor = new Tensor(new OvType(ElementType.U8), new Shape(1, 640, 640, 3));
+ InputInfo input_info = processor.input(0);
+ InputTensorInfo input_tensor_info = input_info.tensor();
+ input_tensor_info.set_from(input_tensor).set_layout(new Layout("NHWC")).set_color_format(ColorFormat.BGR);
+
+ PreProcessSteps process_steps = input_info.preprocess();
+ process_steps.convert_color(ColorFormat.RGB).resize(ResizeAlgorithm.RESIZE_LINEAR)
+ .convert_element_type(new OvType(ElementType.F32)).scale(255.0f).convert_layout(new Layout("NCHW"));
+
+ Model new_model = processor.build();
+ new_model.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/PreProcessStepsTests.cs b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/PreProcessStepsTests.cs
new file mode 100644
index 000000000..a51bde300
--- /dev/null
+++ b/modules/csharp_api/tests/csharp_api_unit_tests/preprocess/PreProcessStepsTests.cs
@@ -0,0 +1,231 @@
+using Microsoft.VisualStudio.TestTools.UnitTesting;
+using OpenVinoSharp.preprocess;
+using OpenVinoSharp.Tests;
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using System.Threading.Tasks;
+
+namespace OpenVinoSharp.preprocess.Tests
+{
+ [TestClass()]
+ public class PreProcessStepsTests:OVBaseTest
+ {
+ [TestMethod()]
+ public void PreProcessSteps_test()
+ {
+ }
+
+ [TestMethod()]
+ public void Dispose_test()
+ {
+ }
+
+ [TestMethod()]
+ public void resize_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ PreProcessSteps process_steps = input.preprocess();
+ Assert.IsTrue(process_steps.Ptr != IntPtr.Zero);
+
+ process_steps.resize(ResizeAlgorithm.RESIZE_LINEAR);
+
+ process_steps.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void scale_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ PreProcessSteps process_steps = input.preprocess();
+ Assert.IsTrue(process_steps.Ptr != IntPtr.Zero);
+
+ process_steps.scale(0.5f);
+
+ process_steps.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void mean_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ PreProcessSteps process_steps = input.preprocess();
+ Assert.IsTrue(process_steps.Ptr != IntPtr.Zero);
+
+ process_steps.mean(0.5f);
+
+ process_steps.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void crop_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ PreProcessSteps process_steps = input.preprocess();
+ Assert.IsTrue(process_steps.Ptr != IntPtr.Zero);
+
+
+ int[] begin = { 0, 0, 5, 10 };
+ int[] end = { 1, 3, 15, 20 };
+
+ process_steps.crop(begin, end);
+
+ process_steps.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void crop_test1()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ PreProcessSteps process_steps = input.preprocess();
+ Assert.IsTrue(process_steps.Ptr != IntPtr.Zero);
+
+
+ List begin = new List { 0, 0, 5, 10 };
+ List end = new List{ 1, 3, 15, 20 };
+
+ process_steps.crop(begin, end);
+
+ process_steps.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void convert_layout_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ PreProcessSteps process_steps = input.preprocess();
+ Assert.IsTrue(process_steps.Ptr != IntPtr.Zero);
+
+ process_steps.convert_layout(new Layout("NCHW"));
+
+ process_steps.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void reverse_channels_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ PreProcessSteps process_steps = input.preprocess();
+ Assert.IsTrue(process_steps.Ptr != IntPtr.Zero);
+
+ process_steps.reverse_channels();
+
+ process_steps.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void convert_color_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ PreProcessSteps process_steps = input.preprocess();
+ Assert.IsTrue(process_steps.Ptr != IntPtr.Zero);
+
+ process_steps.convert_color(ColorFormat.RGB);
+
+ process_steps.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+
+ [TestMethod()]
+ public void convert_element_type_test()
+ {
+ var core = new Core();
+ Model model = core.read_model(get_model_xml_file_name());
+ Assert.IsTrue(model.Ptr != IntPtr.Zero);
+ PrePostProcessor processor = new PrePostProcessor(model);
+ Assert.IsTrue(processor.Ptr != IntPtr.Zero);
+ InputInfo input = processor.input();
+ Assert.IsTrue(input.Ptr != IntPtr.Zero);
+ PreProcessSteps process_steps = input.preprocess();
+ Assert.IsTrue(process_steps.Ptr != IntPtr.Zero);
+
+ process_steps.convert_element_type(new OvType(ElementType.F32));
+
+ process_steps.Dispose();
+ input.Dispose();
+ processor.Dispose();
+ model.Dispose();
+ core.Dispose();
+ }
+ }
+}
\ No newline at end of file