Skip to content

Commit

Permalink
Merge branch 'master' into an/mvn_refactor_2
Browse files Browse the repository at this point in the history
  • Loading branch information
allnes authored Sep 30, 2024
2 parents f18b0a7 + 2507d89 commit 4045e5f
Show file tree
Hide file tree
Showing 197 changed files with 355 additions and 7,515 deletions.
1 change: 0 additions & 1 deletion .github/ISSUE_TEMPLATE/bug.yml
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ body:
- Caffe
- ONNX
- PyTorch
- mxnet
- PaddlePaddle
validations:
required: false
Expand Down
1 change: 0 additions & 1 deletion .github/workflows/coverage.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ jobs:
# For running TensorFlow frontend unit tests
python3 -m pip install -r ${{ github.workspace }}/src/frontends/tensorflow/tests/requirements.txt
# For MO unit tests
python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_mxnet.txt
python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_caffe.txt
python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_kaldi.txt
python3 -m pip install -r ${{ github.workspace }}/tools/mo/requirements_onnx.txt
Expand Down
7 changes: 1 addition & 6 deletions .github/workflows/job_python_unit_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,6 @@ jobs:
extras_to_install="caffe,kaldi,onnx,tensorflow2,pytorch"
if [[ "${{ runner.arch }}" != "ARM64" ]]; then
extras_to_install="mxnet,$extras_to_install"
fi
# Find and install OV dev wheel
pushd ${INSTALL_DIR}/tools
ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl')
Expand Down Expand Up @@ -142,8 +138,7 @@ jobs:
# Skips under tickets: 133405, 122666
python3 -m pytest -s ${INSTALL_TEST_DIR}/mo/unit_tests \
--junitxml=${INSTALL_TEST_DIR}/TEST-ModelOptimizer.xml \
--ignore-glob="**/mo/unit_tests/mo/front/mxnet/**"
--junitxml=${INSTALL_TEST_DIR}/TEST-ModelOptimizer.xml
- name: Python ONNX operators tests
if: (fromJSON(inputs.affected-components).Python_API.test ||
Expand Down
1 change: 0 additions & 1 deletion .github/workflows/mo.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ jobs:
# For UT
pip install unittest-xml-reporting==3.0.2
# MO requirements
pip install -r requirements_mxnet.txt
pip install -r requirements_caffe.txt
pip install -r requirements_kaldi.txt
pip install -r requirements_onnx.txt
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/windows_conditional_compilation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ jobs:

CC_Build:
name: Conditional Compilation
timeout-minutes: 20
timeout-minutes: 25
needs: [Build, Smart_CI]
defaults:
run:
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/windows_vs2019_release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ jobs:
# Find and install the dev OV wheel
$ovDevWheelPath=Get-ChildItem -Path "${{ env.INSTALL_DIR }}\tools" -Filter openvino_dev*.whl | % { $_.FullName }
python3 -m pip install "$ovDevWheelPath[mxnet,caffe,kaldi,onnx,tensorflow2,pytorch]"
python3 -m pip install "$ovDevWheelPath[caffe,kaldi,onnx,tensorflow2,pytorch]"
- name: Install Python API tests dependencies
run: |
Expand Down Expand Up @@ -316,7 +316,7 @@ jobs:
if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test
shell: cmd
run: |
python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/mo/unit_tests --ignore=${{ env.INSTALL_TEST_DIR }}/mo/unit_tests/mo/front/mxnet --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ModelOptimizer.xml
python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/mo/unit_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ModelOptimizer.xml
- name: Install Python Layer tests dependencies
run: |
Expand Down Expand Up @@ -491,7 +491,7 @@ jobs:
- name: Intel CPU plugin func tests (parallel)
run: |
. "${{ env.INSTALL_DIR }}/setupvars.ps1"
python3 ${{ env.PARALLEL_TEST_SCRIPT }} -e ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests.exe -c ${{ env.PARALLEL_TEST_CACHE }} -w ${{ env.INSTALL_TEST_DIR }} -s suite -- --gtest_filter=*smoke*
python3 ${{ env.PARALLEL_TEST_SCRIPT }} -e ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests.exe -c ${{ env.PARALLEL_TEST_CACHE }} -w ${{ env.INSTALL_TEST_DIR }} -s suite -rf 0 -- --gtest_filter=*smoke*
timeout-minutes: 60

- name: Save tests execution time
Expand Down
24 changes: 12 additions & 12 deletions docs/articles_en/about-openvino/release-notes-openvino.rst
Original file line number Diff line number Diff line change
Expand Up @@ -305,21 +305,21 @@ Known Issues
| ID: n/a
| Description:
| For ARM binaries, the `1.16 ONNX library <https://vcpkg.link/ports/onnx/versions>`__
is not yet available, while the current latest has shown two significant vulnerabilities:
is not yet available.
| The ONNX library for ARM, version 1.15, does not include the latest
functional and security updates. OpenVINO, version 2024.5 is targeted to be released in
Q4 2024 and will include additional functional and security updates. Users should update to
the latest version as it becomes available.
| The current vulnerabilities are:
`CVE-2024-27318 <https://nvd.nist.gov/vuln/detail/CVE-2024-27318>`__ and
`CVE-2024-27319 <https://nvd.nist.gov/vuln/detail/CVE-2024-27319>`__.
The vulnerabilities are less severe in the context of OpenVINO and will be fixed as soon as
the most recent version of the library is available for ARM, expected at the 2024.5 release.








| **Component: Kaldi**
| ID: n/a
| Description:
| There is a known issue with the Kaldi DL framework support on the Python version 3.12 due
to the numpy version incompatibilities. As Kaldi support in OpenVINO is currently deprecated
and will be discontinued with version 2025.0, the issue will not be addressed.

Previous 2024 releases
Expand Down
4 changes: 2 additions & 2 deletions docs/articles_en/assets/snippets/ov_extensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def conversion(node):
#! [py_frontend_extension_aten_hardtanh]
import torch
from openvino.frontend import ConversionExtension, NodeContext
from openvino.tools.mo import convert_model
from openvino import convert_model


class HardTanh(torch.nn.Module):
Expand All @@ -69,5 +69,5 @@ def convert_hardtanh(node: NodeContext):

model = HardTanh(min_val=0.1, max_val=2.0)
hardtanh_ext = ConversionExtension("aten::hardtanh", convert_hardtanh)
ov_model = convert_model(input_model=model, extensions=[hardtanh_ext])
ov_model = convert_model(input_model=model, extension=[hardtanh_ext])
#! [py_frontend_extension_aten_hardtanh]
2 changes: 1 addition & 1 deletion docs/articles_en/documentation/legacy-features.rst
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ offering.
Discontinued:
#############

.. dropdown:: Apache MXNet, Caffe, and Kaldi model formats
.. dropdown:: Caffe, and Kaldi model formats

| *New solution:* conversion to ONNX via external tools
| *Old solution:* model support discontinued with OpenVINO 2024.0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ Q14. What does the message "Cannot infer shape for node {} because there is no C
Q15. What does the message "Framework name can not be deduced from the given options. Use --framework to choose one of Caffe, TensorFlow, MXNet" mean?
######################################################################################################################################################
**A:** You have run Model Optimizer without a flag ``--framework caffe|tf|mxnet``. Model Optimizer tries to deduce the framework by the extension of input model file (``.pb`` for TensorFlow, ``.caffemodel`` for Caffe, ``.params`` for Apache MXNet). Your input model might have a different extension and you need to explicitly set the source framework. For example, use ``--framework caffe``.
**A:** You have run Model Optimizer without a flag ``--framework caffe|tf``. Model Optimizer tries to deduce the framework by the extension of input model file (``.pb`` for TensorFlow, ``.caffemodel`` for Caffe, ``.params`` for Apache MXNet). Your input model might have a different extension and you need to explicitly set the source framework. For example, use ``--framework caffe``.
.. _question-16:
Expand Down
1 change: 0 additions & 1 deletion docs/dev/ov_dependencies.txt
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,6 @@ mccabe
mistune
mongo-python-driver
more-itertools
mxnet
mypy
mypy-extensions
networkx
Expand Down
7 changes: 3 additions & 4 deletions docs/dev/pypi_publish/pypi-openvino-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ pip install openvino-dev[extras]
| :-------------------------------| :------------------------------------------------------------------------------- |
| caffe | [Caffe*](https://caffe.berkeleyvision.org/) |
| kaldi | [Kaldi*](https://github.com/kaldi-asr/kaldi) |
| mxnet | [Apache MXNet*](https://mxnet.apache.org/) |
| onnx | [ONNX*](https://github.com/microsoft/onnxruntime/) |
| pytorch | [PyTorch*](https://pytorch.org/) |
| tensorflow | [TensorFlow* 1.x](https://www.tensorflow.org/versions#tensorflow_1) |
Expand Down Expand Up @@ -149,14 +148,14 @@ Users in China might encounter errors while downloading sources via PIP during O
If you use zsh (Z shell) interpreter, that is the default shell for macOS starting with version 10.15 (Catalina), you may encounter the following error while installing `openvino-dev` package with extras:

```sh
pip install openvino-dev[tensorflow2,mxnet,caffe]
zsh: no matches found: openvino-dev[tensorflow2,mxnet,caffe]
pip install openvino-dev[tensorflow2,caffe]
zsh: no matches found: openvino-dev[tensorflow2,caffe]
```

By default zsh interprets square brackets as an expression for pattern matching. To resolve this issue, you need to escape the command with quotes:

```sh
pip install 'openvino-dev[tensorflow2,mxnet,caffe]'
pip install 'openvino-dev[tensorflow2,caffe]'
```

To avoid such issues you can also disable globbing for PIP commands by defining an alias in `~/.zshrc` file:
Expand Down
9 changes: 0 additions & 9 deletions docs/sphinx_setup/_static/download/supported_models.csv
Original file line number Diff line number Diff line change
Expand Up @@ -211,11 +211,6 @@ facenet,Object Detection,onnx,FP16,,,+
facenet,Object Detection,onnx,FP16-INT8,,,+
facenet-20180408-102900,Object Detection,tf,FP16,+,+,+
facenet-20180408-102900,Object Detection,tf,FP32,+,+,
face-recognition-mobilefacenet-arcface,Object Detection,mxnet,FP16,,,+
face-recognition-mobilefacenet-arcface,Object Detection,mxnet,FP16-INT8,,,+
face-recognition-resnet50-arcface,Object Detection,mxnet,FP16,+,+,+
face-recognition-resnet50-aws,Object Detection,mxnet,FP16,,,+
face-recognition-resnet50-aws,Object Detection,mxnet,FP32,,,+
face-reidentification-retail-0095,Object Detection,onnx,FP16,,,+
face-reidentification-retail-0095,Object Detection,onnx,FP16-INT8,,,+
facial-landmarks-35-adas-0002,Object Detection,caffe,FP16,,,+
Expand Down Expand Up @@ -428,8 +423,6 @@ ocrnet-hrnet-w18,Text Detection,paddle,FP16-INT8,+,+,
ocrnet-hrnet-w18,Text Detection,paddle,FP32,+,,
ocrnet-hrnet-w48,Text Detection,paddle,FP16,+,,
ocrnet-hrnet-w48,Text Detection,paddle,FP32,+,,
octave-resnext-101-0.25,Image Classification,mxnet,FP16,+,+,+
octave-resnext-101-0.25,Image Classification,mxnet,FP32,+,,
openchat-3.6-8b-20240522,Large Language Model,pytorch,intel-optimum default,,+,
open-closed-eye-0001,Image Classification,onnx,FP16,,,+
open-closed-eye-0001,Image Classification,onnx,FP16-INT8,,,+
Expand Down Expand Up @@ -629,8 +622,6 @@ squeezenet1.0-12,Image Classification,onnx,FP16,,,+
squeezenet1.0-12,Image Classification,onnx,FP32,+,+,+
squeezenet1.1-caffe,Image Classification,caffe,FP16,+,+,+
squeezenet1.1-caffe,Image Classification,caffe,FP32,+,+,+
squeezenet1.1-mxnet,Image Classification,mxnet,FP16,+,+,+
squeezenet1.1-mxnet,Image Classification,mxnet,FP32,+,+,+
squeezenet1.1-onnx,Image Classification,onnx,FP32,+,,
srgan-onnx,"Image Processing, Enhancement",onnx,FP16,+,,+
srgan-tf,"Image Processing, Enhancement",tf,FP16,+,+,
Expand Down
6 changes: 3 additions & 3 deletions docs/sphinx_setup/_static/html/banner_data.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"p": "",
"p": "This page is a nightly version. It may be incomplete or faulty in both content and functionality. <a href='https://docs.openvino.ai/'>Go to the most recent official documentation version, 2024.</a>",
"version": "1",
"style": "warning",
"styles": "warning info critical"
}
"_styles": "warning info critical"
}
Original file line number Diff line number Diff line change
Expand Up @@ -190,9 +190,19 @@ TSUnsqueezeBackward::TSUnsqueezeBackward() {
return false;
}
} else {
auto rank = main_node->get_output_partial_shape(0).rank();
non_negative_axes =
util::try_get_normalized_axis_vector(unsqueeze_axes->get_tensor_view(), rank, *main_node);
const auto& axes = unsqueeze_axes->cast_vector<int64_t>();
if (std::all_of(axes.begin(), axes.end(), [](int64_t axis) {
return axis >= 0;
})) {
non_negative_axes = std::vector<size_t>(axes.begin(), axes.end());
} else {
auto rank = main_node->get_output_partial_shape(0).rank();
if (rank.is_dynamic()) {
return false;
}
non_negative_axes =
util::try_get_normalized_axis_vector(unsqueeze_axes->get_tensor_view(), rank, *main_node);
}
}

auto transpose_order_values = transpose_order->cast_vector<size_t>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1636,6 +1636,47 @@ auto test_backward_reshape_unsqueeze = []() {
INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonReshapeUnsqueezeBackward,
TSTestFixture,
test_backward_reshape_unsqueeze());

auto test_backward_unsqueeze_dyn_rank = []() {
TestCase test_case;

// Initialize common attributes
test_case.transformation = CREATE_PASS_FACTORY(TSUnsqueezeBackward);
test_case.num_main_ops = {1};
test_case.inputs_to_main = {
parameter(element::f32, PartialShape::dynamic()),
constant<int64_t>(element::i32, {2}, {-1}),
};

auto dyn_transpose = [](const vector<size_t>& idxs, const OutputVector& out_vec) -> OutputVector {
OutputVector result = out_vec;
for (const auto& idx : idxs) {
const auto& out = out_vec[idx];

// fill the order const with the stub values {-1, -2}
auto order = make_shared<Constant>(element::i32, Shape{2}, vector<int64_t>{-1, -2});
auto transpose = make_shared<Transpose>(out, order);
result[idx] = transpose;
}
return result;
};

// Test model description:
test_case.model.main_op = {CREATE_BINARY_FACTORY(Unsqueeze)};
test_case.model.preprocess_outputs_of_main = {{dyn_transpose}, {{0}}};
test_case.model.model_template = create_model;

// Ref model description, the same as the original model, the transformation is not applied
// it's expected.
test_case.model_ref.main_op = {CREATE_BINARY_FACTORY(Unsqueeze)};
test_case.model_ref.preprocess_outputs_of_main = {{dyn_transpose}, {{0}}};
test_case.model_ref.model_template = create_model;
return wrapper(test_case);
};

INSTANTIATE_TEST_SUITE_P(TransposeSinkingCommonUnsqueezeBackwardDynRank,
TSTestFixture,
test_backward_unsqueeze_dyn_rank());
} // namespace common
} // namespace testing
} // namespace transpose_sinking
Original file line number Diff line number Diff line change
Expand Up @@ -1198,6 +1198,58 @@ std::set<std::vector<element::Type>> jit_logical_not_emitter::get_supported_prec
return {{element::f32}};
}

/// LOGICAL_XOR ///
jit_logical_xor_emitter::jit_logical_xor_emitter(dnnl::impl::cpu::aarch64::jit_generator* host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
const std::shared_ptr<ov::Node>& node)
: jit_emitter(host, host_isa, node, get_arithmetic_binary_exec_precision(node)) {
prepare_table();
}

jit_logical_xor_emitter::jit_logical_xor_emitter(dnnl::impl::cpu::aarch64::jit_generator* host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
const ov::element::Type exec_prc)
: jit_emitter(host, host_isa, exec_prc) {
prepare_table();
}

size_t jit_logical_xor_emitter::get_inputs_count() const { return 2; }

size_t jit_logical_xor_emitter::get_aux_vecs_count() const { return 1; }

size_t jit_logical_xor_emitter::get_aux_gprs_count() const { return 1; }

void jit_logical_xor_emitter::emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const {
if (host_isa_ == dnnl::impl::cpu::aarch64::asimd) {
emit_isa<dnnl::impl::cpu::aarch64::asimd>(in_vec_idxs, out_vec_idxs);
} else {
OV_CPU_JIT_EMITTER_THROW("Can't create jit eltwise kernel");
}
}

template <dnnl::impl::cpu::aarch64::cpu_isa_t isa>
void jit_logical_xor_emitter::emit_isa(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const {
OV_CPU_JIT_EMITTER_ASSERT(exec_prc_ == ov::element::f32, "unsupported precision: " + exec_prc_.to_string());

using TReg = typename dnnl::impl::cpu::aarch64::cpu_isa_traits<isa>::TReg;
const TReg src1 = TReg(in_vec_idxs[0]);
const TReg src2 = TReg(in_vec_idxs[1]);
const TReg dst = TReg(out_vec_idxs[0]);
const TReg aux = TReg(aux_vec_idxs[0]);

h->eor(dst.b16, src1.b16, src2.b16);
h->ld1r(aux.s, table_val2("one"));
h->and_(dst.b16, dst.b16, aux.b16);
}

void jit_logical_xor_emitter::register_table_entries() {
push_arg_entry_of("one", 0x3f800000, true);
}

std::set<std::vector<element::Type>> jit_logical_xor_emitter::get_supported_precisions(const std::shared_ptr<ov::Node>& node) {
return {{element::f32, element::f32}};
}

/// MAX ///
jit_maximum_emitter::jit_maximum_emitter(dnnl::impl::cpu::aarch64::jit_generator* host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -549,6 +549,34 @@ class jit_logical_not_emitter : public jit_emitter {
void register_table_entries() override;
};

class jit_logical_xor_emitter : public jit_emitter {
public:
jit_logical_xor_emitter(dnnl::impl::cpu::aarch64::jit_generator *host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
const ov::element::Type exec_prc = ov::element::f32);

jit_logical_xor_emitter(dnnl::impl::cpu::aarch64::jit_generator *host,
dnnl::impl::cpu::aarch64::cpu_isa_t host_isa,
const std::shared_ptr<ov::Node>& n);

size_t get_inputs_count() const override;

size_t get_aux_vecs_count() const override;

size_t get_aux_gprs_count() const override;

static std::set<std::vector<element::Type>> get_supported_precisions(
const std::shared_ptr<ov::Node>& node = nullptr);

private:
void emit_impl(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const override;

template <dnnl::impl::cpu::aarch64::cpu_isa_t isa>
void emit_isa(const std::vector<size_t> &in_vec_idxs, const std::vector<size_t> &out_vec_idxs) const;

void register_table_entries() override;
};

class jit_mod_emitter : public jit_emitter {
public:
jit_mod_emitter(dnnl::impl::cpu::aarch64::jit_generator *host,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ bool JitEltwiseExecutor::isSupported(
Algorithm::EltwiseIsNaN,
Algorithm::EltwiseLessEqual,
Algorithm::EltwiseLogicalNot,
Algorithm::EltwiseLogicalXor,
Algorithm::EltwiseMaximum,
Algorithm::EltwiseMinimum,
Algorithm::EltwiseMish,
Expand Down
Loading

0 comments on commit 4045e5f

Please sign in to comment.