diff --git a/.github/workflows/job_onnx_runtime.yml b/.github/workflows/job_onnx_runtime.yml index 5a6f5cb27eceae..1bf4d4aa96eea3 100644 --- a/.github/workflows/job_onnx_runtime.yml +++ b/.github/workflows/job_onnx_runtime.yml @@ -110,6 +110,7 @@ jobs: --parallel \ --skip_tests \ --compile_no_warning_as_error \ + --allow_running_as_root \ --build_dir ${ONNX_RUNTIME_BUILD_DIR} env: CXXFLAGS: "-Wno-error=deprecated-declarations" @@ -138,11 +139,12 @@ jobs: ./onnxruntime_global_thread_pools_test working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - name: Run onnxruntime_api_tests_without_env - run: | - source ${INSTALL_DIR}/setupvars.sh - ./onnxruntime_api_tests_without_env - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo +# Test removed in onnxruntime 1.16.2 +# - name: Run onnxruntime_api_tests_without_env +# run: | +# source ${INSTALL_DIR}/setupvars.sh +# ./onnxruntime_api_tests_without_env +# working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - name: Run pytorch-converted tests run: | diff --git a/install_build_dependencies.sh b/install_build_dependencies.sh index 9b49c9eb8e292f..2875785f6d0a04 100755 --- a/install_build_dependencies.sh +++ b/install_build_dependencies.sh @@ -220,7 +220,7 @@ fi current_cmake_ver=$($cmake_command --version | sed -ne 's/[^0-9]*\(\([0-9]\.\)\{0,4\}[0-9][^.]\).*/\1/p') required_cmake_ver=3.20.0 if [ ! "$(printf '%s\n' "$required_cmake_ver" "$current_cmake_ver" | sort -V | head -n1)" = "$required_cmake_ver" ]; then - installed_cmake_ver=3.24.0 + installed_cmake_ver=3.26.0 arch=$(uname -m) if command -v apt-get &> /dev/null; then diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 67ed291ff62913..3d7e164d1f9516 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -23,4 +23,6 @@ add_subdirectory(bindings) if(ENABLE_TESTS) add_subdirectory(core/tests) add_subdirectory(tests) +else() + add_subdirectory(tests/ov_helpers/ov_models/ov_builders) endif() diff --git a/src/cmake/openvino.cmake b/src/cmake/openvino.cmake index dec7fcbe7e5ac5..04867095248a24 100644 --- a/src/cmake/openvino.cmake +++ b/src/cmake/openvino.cmake @@ -42,7 +42,6 @@ target_include_directories(${TARGET_NAME} PUBLIC $) target_link_libraries(${TARGET_NAME} PRIVATE openvino::reference - openvino::builders openvino::shape_inference openvino::pugixml ${CMAKE_DL_LIBS} diff --git a/src/common/transformations/CMakeLists.txt b/src/common/transformations/CMakeLists.txt index 0eea2cdbfbdfbc..10ed5fa6eea413 100644 --- a/src/common/transformations/CMakeLists.txt +++ b/src/common/transformations/CMakeLists.txt @@ -25,7 +25,7 @@ ov_build_target_faster(${TARGET_NAME}_obj PCH PRIVATE "src/precomp.hpp" ) -target_link_libraries(${TARGET_NAME}_obj PRIVATE openvino::reference openvino::itt openvino::builders openvino::core::dev openvino::shape_inference) +target_link_libraries(${TARGET_NAME}_obj PRIVATE openvino::reference openvino::itt openvino::core::dev openvino::shape_inference) target_include_directories(${TARGET_NAME}_obj PRIVATE "${PUBLIC_HEADERS_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}/src") diff --git a/src/common/transformations/src/transformations/transpose_sinking/ts_gather.cpp b/src/common/transformations/src/transformations/transpose_sinking/ts_gather.cpp index 42fa6f85cc61cf..5fe25177389dd5 100644 --- a/src/common/transformations/src/transformations/transpose_sinking/ts_gather.cpp +++ b/src/common/transformations/src/transformations/transpose_sinking/ts_gather.cpp @@ -260,7 +260,11 @@ TSGatherBackward::TSGatherBackward() { main_node->input(1).replace_source_output(squeeze->input_value(0)); } } + std::vector new_axes_val; if (!axes_val.empty()) { + for (size_t i = 0; i < axes_val.size(); ++i) { + new_axes_val.push_back(order_val[axes_val[i]]); + } order_val = GetOrderAfterReduction(axes_val, order_val); } @@ -303,7 +307,7 @@ TSGatherBackward::TSGatherBackward() { RemoveTransposeConsumers(main_node); if (success) { auto target_inputs = main_node->get_output_target_inputs(0); - auto unsqueeze_axes = ov::op::v0::Constant::create(element::i32, {axes_val.size()}, axes_val); + auto unsqueeze_axes = ov::op::v0::Constant::create(element::i32, {new_axes_val.size()}, new_axes_val); auto unsqueeze = std::make_shared(main_node, unsqueeze_axes); for (const auto& input : target_inputs) { input.replace_source_output(unsqueeze); diff --git a/src/common/transformations/tests/transpose_sinking/ts_gather_test.cpp b/src/common/transformations/tests/transpose_sinking/ts_gather_test.cpp index 2a410fb0181333..f5d7fcd3cc07a1 100644 --- a/src/common/transformations/tests/transpose_sinking/ts_gather_test.cpp +++ b/src/common/transformations/tests/transpose_sinking/ts_gather_test.cpp @@ -204,6 +204,16 @@ INSTANTIATE_TEST_SUITE_P(TSCommonGatherBackward_1, TSTestFixture, test_backward_ auto test_backward_gather_optimization = [](const GatherBackwardArguments& test_arguments) { TestCase test_case; + auto custom_transpose = [&](const vector& idxs, const OutputVector& out_vec) -> OutputVector { + const auto& order_val = test_arguments.ref_transpose_order; + auto order = constant(i32, {order_val.size()}, order_val); + OutputVector new_outputs = out_vec; + for (const auto& idx : idxs) { + new_outputs[idx] = make_shared(out_vec[idx], order); + } + return new_outputs; + }; + // Initialize common attributes test_case.transformation = CREATE_PASS_FACTORY(TSGatherBackward); test_case.num_main_ops = {1}; @@ -229,7 +239,7 @@ auto test_backward_gather_optimization = [](const GatherBackwardArguments& test_ return {make_shared(out_vec[0], axes)}; }; - test_case.model_ref.preprocess_inputs_to_main = {{set_transpose_for, update_gather_inputs}, {{0}, {1, 2}}}; + test_case.model_ref.preprocess_inputs_to_main = {{custom_transpose, update_gather_inputs}, {{0}, {1, 2}}}; test_case.model_ref.main_op = {CREATE_GATHER_FACTORY(Gather)}; test_case.model_ref.preprocess_outputs_of_main = {{unsqueeze_for}, {{0}}}; test_case.model_ref.model_template = create_model; @@ -240,16 +250,20 @@ auto test_backward_gather_optimization = [](const GatherBackwardArguments& test_ vector tests_arguments_bw_optimization{ {{parameter(f32, {257, 8}), constant(i32, {1, 2}, {0}), constant(i32, {1}, {0})}, constant(i32, {1}, {1}), - AxisVector{}, - AxisVector{0}}, + AxisVector{1, 0}, + AxisVector{2}}, {{parameter(f32, {4}), constant(i32, {1}, {0}), constant(i32, {1}, {0})}, constant(i32, {1}, {0}), - AxisVector{}, + AxisVector{0}, AxisVector{0}}, {{parameter(f32, {4}), constant(i32, {1, 1, 1}, {0}), constant(i32, {1}, {0})}, constant(i32, {1}, {0}), - AxisVector{}, - AxisVector{0, 1, 2}}, + AxisVector{0}, + AxisVector{2, 1, 0}}, + {{parameter(f32, {32, 100}), constant(i32, {1, 60}, {0}), constant(i32, {1}, {0})}, + constant(i32, {1}, {1}), + AxisVector{1, 0}, + AxisVector{2}}, }; INSTANTIATE_TEST_SUITE_P(TSCommonGatherBackwardOptimization_0, @@ -261,6 +275,9 @@ INSTANTIATE_TEST_SUITE_P(TSCommonGatherBackwardOptimization_1, INSTANTIATE_TEST_SUITE_P(TSCommonGatherBackwardOptimization_2, TSTestFixture, test_backward_gather_optimization(tests_arguments_bw_optimization[2])); +INSTANTIATE_TEST_SUITE_P(TSCommonGatherBackwardOptimization_3, + TSTestFixture, + test_backward_gather_optimization(tests_arguments_bw_optimization[3])); } // namespace gather } // namespace testing } // namespace transpose_sinking diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 5db5eea652cb0e..15767de248131d 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -15,7 +15,6 @@ file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp file(GLOB_RECURSE PUBLIC_HEADERS ${OV_CORE_INCLUDE_PATH}/*.hpp) file(GLOB_RECURSE DEV_HEADERS ${OV_CORE_DEV_API_PATH}/*.hpp) -add_subdirectory(builder) add_subdirectory(reference) add_subdirectory(shape_inference) @@ -88,7 +87,7 @@ ov_build_target_faster(ngraph_obj ov_add_version_defines(src/version.cpp ngraph_obj) -target_link_libraries(ngraph_obj PRIVATE openvino::builders openvino::reference openvino::util +target_link_libraries(ngraph_obj PRIVATE openvino::reference openvino::util openvino::pugixml openvino::shape_inference openvino::core::dev) ov_mark_target_as_cc(ngraph_obj) @@ -106,12 +105,12 @@ if(NOT BUILD_SHARED_LIBS) endif() if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - # ngraph is linked against openvino::builders, openvino::reference, openvino::shape_inference static libraries + # ngraph is linked against openvino::reference, openvino::shape_inference static libraries # which include ngraph headers with dllimport attribute. Linker complains about it # but no way to fix this: linking with no attribute defaults to dllexport and we have # multiple defitions for ngraph symbols. # - # The possible way is to use object libraries for openvino::builders, openvino::reference + # The possible way is to use object libraries for openvino::reference # but it's not convinient since these libraries are exported from build tree # and it's better to use them as static libraries in 3rd party projects if(BUILD_SHARED_LIBS) diff --git a/src/core/builder/include/ngraph/builder/autobroadcast.hpp b/src/core/builder/include/ngraph/builder/autobroadcast.hpp deleted file mode 100644 index 53929a5a1a560a..00000000000000 --- a/src/core/builder/include/ngraph/builder/autobroadcast.hpp +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "ngraph/except.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/constant.hpp" - -namespace ngraph { -namespace builder { -class numpy_autobroadcast_incompatible_shapes : public ngraph::ngraph_error { -public: - numpy_autobroadcast_incompatible_shapes(const ngraph::Shape& shape1, const ngraph::Shape& shape2); - -private: - const ngraph::Shape m_shape1; - const ngraph::Shape m_shape2; - - static std::string error_str(const ngraph::Shape& shape1, const ngraph::Shape& shape2); -}; - -/// -/// \brief Broadcast all values, if necessary, to obtain equal shapes according -/// to NumPy's auto-broadcasting scheme. -/// -/// \note There are some shape combinations which the autobroadcast algoritm cannot -/// handle. An exception is thrown when such combinations are provided to this -/// function. -/// -/// \param values Vector of output values. -/// -/// \exception ngraph::builder::numpy_autobroadcast_incompatible_shapes -/// -/// \return Vector of broadcasted values. -/// -OutputVector numpy_broadcast_outputs(const OutputVector& values); - -/// -/// \brief Broadcast input value to provided shape using NumPy's auto-broadcasting -/// rules. -/// -/// \param value Input value -/// \param shape Requested output shape -/// -/// \return Node producing values with requested shape. -/// -std::shared_ptr numpy_broadcast(const Output& value, const Shape& shape); - -/// \brief Wrap two graph values, if necessary, to obtain values with identical shapes, -/// using NumPy's auto-broadcast rules. -/// -/// The elements in the std::pair returned by this function correspond to those supplied -/// in the std::pair provided via \p args. -/// -/// If \p args.first and \p args.second produce identical shapes, then the returned -/// std::pair will have the same value as \p args. -/// -/// If \p args.first and \p args.second produce different shapes, then this function creates -/// new ngraph::op::Reshape and/or ngraph::op::Broadcast nodes, as needed, to wrap -/// \p args.first and/or \p args.second in a manner that yields values with the same shape. -/// -/// There are some shape combinations which the autobroadcast algoritm cannot handle. -/// An exception is thrown when such combinations are provided to this function. -/// -/// \pre -/// - \p args.first is not null -/// - \p args.second is not null -/// -/// \post -/// - The ngraph::Node objects pointed to by \p args.first and \p args.second have not been -/// altered by this function, except by possibly having added consumers of their values. -/// -/// - If an exception was not thrown, then the return value's \p first and \p second -/// elements point to ngraph::Node objects whose output values have the same shape. -/// -/// \exception ngraph::builder::numpy_autobroadcast_incompatible_shapes -std::pair, std::shared_ptr> numpy_broadcast( - const std::pair, Output>& args); - -/// \brief Broadcast shape of two nodes to make them compatible for a matrix -/// multiplication. -/// -/// \note This function is reflecting broadcasting behaviour of NumPy's `matmul` -/// operation. -/// (https://docs.scipy.org/doc/numpy/reference/generated/numpy.matmul.html) -/// This mean that only \"stack of matrices\" axes are bidirectionally -/// broadcasted. The last two dimension are left untouched. -/// -/// \param[in] left The Node providing data for the left-hand side of matrix -/// multiplication. -/// \param[in] right The Node providing data for the right-hand side of matrix -/// multiplication. -/// -/// \return The vector containing both outputs broadcasted. -/// -OutputVector numpy_broadcast_for_matmul_operation(const Output& left, const Output& right); - -/// \brief Cast shape of all input nodes for an element-wise operation that requires -/// shape-compatibility -/// -/// \param inputs Original list of inputs -/// \param axis Index starting to align -/// -/// \return pdpd-style broadcasted list of nodes. -OutputVector pdpd_broadcast(const OutputVector& inputs, int64_t axis); - -/// \brief Generate a list of broadcast axes. -/// -/// \details Informally, a broadcast "adds" axes to the input tensor, replicating -/// elements from the input tensor as needed to fill the new dimensions. -/// Function calculate which of the output axes are added in this way. -/// -/// \param output_shape The new shape for the output tensor. -/// \param input_shape The shape of input tensor. -/// \param start_match_axis The axis along which we want to replicate elements. -/// The starting axis position (0-based) int the output -/// shape from which the current shape of the tensor -/// matches the desired new shape. -/// -/// \return The indices of added axes. -std::shared_ptr calculate_broadcast_axes(const Shape& output_shape, - const Shape& input_shape, - std::size_t start_match_axis); - -/// -/// \brief Calculate the output shape of numpy-style broadcast operation for all input -/// shapes. -/// -/// This function finds the maximum tensor shape that will be the result of -/// element-wise operation that will be applied to the input shapes vector. -/// The function also prepares the shape of each input for the element-wise -/// operation by left-padding those shapes so that their rank is equal to the -/// left_shape's rank. -/// -/// \param input_shapes A vector of input shapes for which a common shape should be -/// found -/// -/// \return A pair that contains the target shape as its first object and a vector of -/// padded input shapes ready to be broadcasted as the second object -/// -std::pair> get_numpy_broadcast_shapes(const std::vector& input_shapes); - -inline std::shared_ptr make_broadcast_node(const Output& value, - const Shape& new_shape, - std::size_t start_match_axis) { - auto shape_const = op::Constant::create(element::u64, Shape{new_shape.size()}, new_shape); - return std::make_shared( - value, - shape_const, - calculate_broadcast_axes(new_shape, value.get_shape(), start_match_axis)); -} - -namespace opset1 { -/// -/// \brief Broadcast right node to left node's shape using legacy scheme. -/// -/// \param[in] left The left hand side node of binary operation. -/// \param[in] right The right hand side node of binary operation. The one -/// to be broadcasted. -/// \param[in] start_match_axis The axis index starting mutually equal shapes -/// of both nodes. -/// -/// \return The Output object connected to node producing broadcasted right node. -/// -Output legacy_broadcast_for_binary_operation(const Output& left, - const Output& right, - size_t start_match_axis); - -/// -/// \brief Reconstructs axes mapping vector for Broadcast:v1 operation. -/// -/// \param[in] output_shape The output shape of Broadcast operation. -/// \param[in] broadcast_axes The broadcast axes used for Broadcast:v0 operator. -/// -/// \return The vector with axes indexes mapping . -/// -std::vector get_axes_mapping(const Shape& output_shape, const AxisSet& broadcast_axes); - -/// -/// \brief Creates Node returning the axes mapping for Broadcast:v1 operation. -/// -/// \param[in] output_shape The output shape of Broadcast operation. -/// \param[in] input_shape The input shape. -/// \param[in] start_match_axis The axis index at which input shape starts to be -/// identical as the output shape. -/// -/// \return Returns the Output object pointing to node with the axes mapping. -/// -Output get_axes_mapping_output(const Shape& output_shape, const Shape& input_shape, std::size_t start_match_axis); - -/// -/// \brief Creates Node returning the axes mapping for Broadcast operation. -/// \note Shapes' ranks need to be static. -/// -/// \param[in] output_shape The output shape of Broadcast operation. -/// \param[in] input_shape The input shape. -/// \param[in] start_match_axis The axis index at which input shape starts to be -/// identical to consecutive subset of output shape -/// dimensions. -/// -/// \return Returns the Output object pointing to node with the axes mapping. -/// -Output get_axes_mapping_output(const PartialShape& output_shape, - const PartialShape& input_shape, - std::size_t start_match_axis); - -/// -/// \brief Creates Node returning the axes mapping for Broadcast:v1 operation. -/// -/// \param[in] output_shape The output shape of Broadcast operation. -/// \param[in] broadcast_axes The broadcast axes used for Broadcast:v0 operator. -/// -/// \return The Output object with Node returning axes mapping. -/// -Output get_axes_mapping_output(const Shape& output_shape, const AxisSet& broadcast_axes); - -Output make_broadcast(const Output& node, const Shape& target_shape, const AxisSet& broadcast_axes); - -Output make_broadcast(const Output& node, const Shape& target_shape, std::size_t start_match_axis); - -} // namespace opset1 -} // namespace builder -} // namespace ngraph diff --git a/src/core/builder/include/ngraph/builder/make_constant.hpp b/src/core/builder/include/ngraph/builder/make_constant.hpp deleted file mode 100644 index 169fefa5ce17cb..00000000000000 --- a/src/core/builder/include/ngraph/builder/make_constant.hpp +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "autobroadcast.hpp" -#include "ngraph/node.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/type/float16.hpp" - -namespace ngraph { -namespace builder { -template -std::shared_ptr make_constant(const element::Type& type, const Shape& shape, const T& num) { - std::shared_ptr val = nullptr; - -#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) -# pragma GCC diagnostic push -# pragma GCC diagnostic error "-Wswitch" -# pragma GCC diagnostic error "-Wswitch-enum" -#endif - std::string unsupported_data_type; - switch (type) { - case element::Type_t::f32: - val = - std::make_shared(type, ngraph::Shape{}, std::vector{static_cast(num)}); - break; - case element::Type_t::f64: - val = std::make_shared(type, - ngraph::Shape{}, - std::vector{static_cast(num)}); - break; - case element::Type_t::f16: - val = std::make_shared( - type, - ngraph::Shape{}, - std::vector{ngraph::float16(static_cast(num))}); - break; - case element::Type_t::bf16: - val = std::make_shared( - type, - ngraph::Shape{}, - std::vector{ngraph::bfloat16(static_cast(num))}); - break; - case element::Type_t::i64: - val = std::make_shared(type, - ngraph::Shape{}, - std::vector{static_cast(num)}); - break; - case element::Type_t::i32: - val = std::make_shared(type, - ngraph::Shape{}, - std::vector{static_cast(num)}); - break; - case element::Type_t::i16: - val = std::make_shared(type, - ngraph::Shape{}, - std::vector{static_cast(num)}); - break; - case element::Type_t::i8: - val = std::make_shared(type, - ngraph::Shape{}, - std::vector{static_cast(num)}); - break; - case element::Type_t::u64: - val = std::make_shared(type, - ngraph::Shape{}, - std::vector{static_cast(num)}); - break; - case element::Type_t::u32: - val = std::make_shared(type, - ngraph::Shape{}, - std::vector{static_cast(num)}); - break; - case element::Type_t::u16: - val = std::make_shared(type, - ngraph::Shape{}, - std::vector{static_cast(num)}); - break; - case element::Type_t::u8: - val = std::make_shared(type, - ngraph::Shape{}, - std::vector{static_cast(num)}); - break; - case element::Type_t::dynamic: - unsupported_data_type = "dynamic"; - break; - case element::Type_t::boolean: - unsupported_data_type = "boolean"; - break; - case element::Type_t::u1: - unsupported_data_type = "u1"; - break; - case element::Type_t::i4: - unsupported_data_type = "i4"; - break; - case element::Type_t::u4: - unsupported_data_type = "u4"; - break; - case element::Type_t::nf4: - unsupported_data_type = "nf4"; - break; - case element::Type_t::string: - unsupported_data_type = "string"; - break; - case element::Type_t::undefined: - unsupported_data_type = "undefined"; - break; - } - if (!unsupported_data_type.empty()) - OPENVINO_THROW("make_constant: Unsupported element type '", unsupported_data_type, "'"); - -#if defined(__GNUC__) && !(__GNUC__ == 4 && __GNUC_MINOR__ == 8) -# pragma GCC diagnostic pop -#endif - - if (shape.size() > 0) { - ngraph::AxisSet axes; - for (size_t i = 0; i < shape.size(); i++) { - axes.insert(i); - } - val = builder::opset1::make_broadcast(val, shape, axes).get_node_shared_ptr(); - } - - return val; -} - -/// \brief Create constant filled with double value -/// -/// \note If num value exeeds capacity of type, the value is clamped. -/// -/// \param[in] type The type of produced Constant node. -/// \param[in] shape The shape of produced Constant node. -/// \param[in] num The value used to fill Constant node. -/// -/// \return The Constant node which have expected type, shape and value. -/// -std::shared_ptr make_constant_from_double(const element::Type& type, const Shape& shape, double num); -} // namespace builder -} // namespace ngraph diff --git a/src/core/builder/include/ngraph/builder/norm.hpp b/src/core/builder/include/ngraph/builder/norm.hpp deleted file mode 100644 index 6e10459dbf9b33..00000000000000 --- a/src/core/builder/include/ngraph/builder/norm.hpp +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "ngraph/axis_set.hpp" -#include "ngraph/node.hpp" - -namespace ngraph { -namespace builder { -/// \brief Specifies method of bias application to avoid numerical problems -enum class BiasMode { - // Add bias to intermediate result - ADD, - // Calculate max of intermediate result and bias - MAX -}; - -namespace opset1 { -/// \brief Calculates L-0 norm of input tensor. -/// -/// \note The L-0 norm represents the cardinality of elements different -/// from zero. This actually is not a "true" norm. -/// -/// \param[in] value The input tensor. -/// \param[in] reduction_axes The axes along which we calculate norm. -/// \param[in] keep_dims The flag indicates if axes will be removed or kept. -/// -/// \return L-0 norm of value. The output sub-graph is composed of v1 ops. -/// -std::shared_ptr l0_norm(const Output& value, const Output& reduction_axes, bool keep_dims = false); - -/// \brief Calculates L-1 norm of a value. -/// -/// \note The L-1 norm represents the sum of absolute values. -/// -/// \param[in] value The input tensor. -/// \param[in] reduction_axes The axes along which we calculate norm. -/// \param[in] bias The bias added to the calculated sum. -/// \param[in] keep_dims The flag indicates if axes will be removed or kept. -/// -/// \return L-1 norm of value. The output sub-graph is composed of v1 ops. -/// -std::shared_ptr l1_norm(const Output& value, - const Output& reduction_axes, - float bias = 0.f, - bool keep_dims = false); - -/// \brief Calculates L-2 norm of input tensor. -/// -/// \note The L-2 norm represents the square root of sum of squares of each -/// individual element. -/// -/// \param[in] value The input tensor. -/// \param[in] reduction_axes The axes along which we calculate norm. -/// \param[in] bias The bias combined with calculated sum. -/// \param[in] bias_mode The method of bias application. -/// \param[in] keep_dims The flag indicates if axes will be removed or kept. -/// -/// \return L-2 norm of value. The output sub-graph is composed of v1 ops. -/// -std::shared_ptr l2_norm(const Output& value, - const Output& reduction_axes, - float bias = 0.f, - BiasMode bias_mode = BiasMode::ADD, - bool keep_dims = false); - -/// \brief Creates node which calculates L-p norm on input tensor. -/// -/// \param[in] value The input tensor. -/// \param[in] reduction_axes The axes along which we calculate norm. -/// \param[in] p_norm The p norm to calculate. -/// \param[in] bias The bias added to the calculated sum. -/// \param[in] keep_dims The flag indicates if axes will be removed or kept. -/// -/// \return L-p norm of value. The output sub-graph is composed of v1 ops. -/// -std::shared_ptr lp_norm(const Output& value, - const Output& reduction_axes, - std::size_t p_norm = 2, - float bias = 0.f, - bool keep_dims = false); -} // namespace opset1 -} // namespace builder -} // namespace ngraph diff --git a/src/core/builder/include/ngraph/builder/reduce_ops.hpp b/src/core/builder/include/ngraph/builder/reduce_ops.hpp deleted file mode 100644 index f21fc4cb15a4b4..00000000000000 --- a/src/core/builder/include/ngraph/builder/reduce_ops.hpp +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "ngraph/axis_set.hpp" -#include "ngraph/node.hpp" - -namespace ngraph { -namespace builder { -namespace opset1 { -// clang-format off - /// \brief Sum-based Mean of a Tensor. - /// - /// Calculates - /// - /// \f$\sum_{i=1}^{N} \frac{x_i}{N}\f$ - /// - /// Where `i` traverses all of the axes provided in `reduction_axes` - /// - /// ## Inputs - /// - /// | | Type | Description | | - /// | ---------------- | --------------------------------- | -------------------------------------------------------| - /// | `node` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape | - /// | `reduction_axes` | AxesSet | The axes to eliminate through reduction (0 indexed). | - /// | `keep_dims` | bool | If set to true it holds reduced axes. | - /// - /// ## Output - /// - /// | Type | Description | - /// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | - /// | \f$E[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by reduction. | -// clang-format on -std::shared_ptr mean(const Output& node, const AxisSet& reduction_axes, bool keep_dims = false); - -std::shared_ptr mean(const Output& node, const Output& reduction_axes, bool keep_dims = false); - -// clang-format off - /// \brief Sum-based Variance of a Tensor. - /// - /// If bessel_correct is true, calculates - /// - /// \f$\frac{\sum_{i=1}^{N}\left(x_i-\bar{x}\right)^2}{N-1}\f$ - /// - /// else, calculates - /// - /// \f$\frac{\sum_{i=1}^{N}\left(x_i-\bar{x}\right)^2}{N}\f$ - /// - /// Where `i` traverses all of the axes provided in `reduction_axes` and \f$\bar{x} = \sum_{i=1}^{N} \frac{x_i}{N}\f$ - /// - /// ## Inputs - /// - /// | | Type | Description | - /// | ------------------- | --------------------------------- | ------------------------------------------------------------ | - /// | `value | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape | - /// | `reduction_axes` | AxesSet | The axes to eliminate through reduction (0 indexed). | - /// | `bessel_correction` | bool (default = false) | Enable Bessel's correction to std_dev for Small sample sizes | - /// - /// ## Output - /// - /// | Type | Description | - /// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | - /// | \f$E[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by reduction. | -// clang-format on -std::shared_ptr variance(const Output& value, - const AxisSet& reduction_axes, - const bool bessel_correction = false); - -std::shared_ptr variance(const Output& value, - const Output& reduction_axes, - bool keep_dims = false, - bool bessel_correction = false); -} // namespace opset1 - -} // namespace builder -} // namespace ngraph diff --git a/src/core/builder/include/ngraph/builder/reshape.hpp b/src/core/builder/include/ngraph/builder/reshape.hpp deleted file mode 100644 index efa1501e843ea8..00000000000000 --- a/src/core/builder/include/ngraph/builder/reshape.hpp +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include - -#include "ngraph/node.hpp" -#include "ngraph/shape.hpp" - -namespace ngraph { -namespace builder { -namespace opset1 { -/// \brief Change shape of a value -/// -/// \param[in] value The value to be reshaped. -/// \param[in] shape The new shape. -/// -/// \return Reshape:v1 op. -std::shared_ptr reshape(const Output& value, const Shape& shape); - -/// \brief Permute axes according to specified axes_order parameter. -/// -/// \param The vlaue whose axes we want to permute. -/// \param axes_order The permutation of axes. -/// -/// \return Transpose:v1 op. -std::shared_ptr reorder_axes(const Output& value, std::vector axes_order = {}); - -/// \brief Return transposed value (with axes in reversed order). -/// -/// \param Value to transpose. -/// -/// \return Transpose:v1 op. -std::shared_ptr transpose(const Output& value); - -/// \brief Flatten a value into a 2D matrix, with a static dividing axis. -/// -/// \param The tensor to be flattened. -/// \param The axis dividing shape. -/// -/// \return The new value will be a 2D matrix representing the flattened input -/// node. -std::shared_ptr flatten(const Output& value, int axis); - -/// \brief Expands node tensor shape with empty axis at -/// specified position. -/// -/// \param[in] value The value to be expanded. -/// \param[in] axis The position in the expanded axes where the -/// new axis is placed. -/// -/// \return Reshape:v1 op. -std::shared_ptr expand_dims(const Output& value, std::size_t axis = 0); - -/// \brief Remove empty axes from input tensor. -/// -/// \param[in] value The value to be squeezed. -/// \param[in] axes The vector defining indexes of axes to be removed. -/// -/// \return Reshape:v1 op. -std::shared_ptr squeeze(const Output& value, std::vector axes = {0}); - -/// \brief Collapse specified axes into single one. -/// -/// \note Collapsed axes create a continuous range starting from outermost axis. -/// -/// \param[in] value The value to be reshaped. -/// \param[in] start_axis The start axis index. -/// \param[in] end_axis The end axis (inclusive) index. -/// -/// \return The node with collapsed specified axes. -/// -std::shared_ptr collapse(const Output& value, const std::size_t start_axis, const std::size_t end_axis); -} // namespace opset1 -} // namespace builder -} // namespace ngraph diff --git a/src/core/builder/src/builder/autobroadcast.cpp b/src/core/builder/src/builder/autobroadcast.cpp deleted file mode 100644 index ecfd79c2a43d3c..00000000000000 --- a/src/core/builder/src/builder/autobroadcast.cpp +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/builder/autobroadcast.hpp" - -#include -#include -#include - -#include "ngraph/axis_vector.hpp" -#include "ngraph/builder/reshape.hpp" -#include "ngraph/check.hpp" -#include "ngraph/op/broadcast.hpp" -#include "ngraph/op/reshape.hpp" -#include "ngraph/opsets/opset7.hpp" -#include "ngraph/util.hpp" - -using namespace std; - -namespace ngraph { -namespace builder { -OPENVINO_SUPPRESS_DEPRECATED_START -numpy_autobroadcast_incompatible_shapes::numpy_autobroadcast_incompatible_shapes(const Shape& shape1, - const Shape& shape2) - : ngraph_error(error_str(shape1, shape2)), - m_shape1(shape1), - m_shape2(shape2) {} - -string numpy_autobroadcast_incompatible_shapes::error_str(const Shape& shape1, const Shape& shape2) { - ostringstream os; - os << "Auto-broadcast not possible for these input shapes:" - << " shape1=" << vector_to_string(shape1) << " shape2=" << vector_to_string(shape2); - return os.str(); -} -OPENVINO_SUPPRESS_DEPRECATED_END - -/// -/// \brief Calculate the output shape of numpy-style broadcast operation for two -/// shapes. -/// -/// \note More info: -/// https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html#general-broadcasting-rules -/// Example: left: [3, 1, 10] right: [5, 1] return: [3, 5, 10] -/// -/// \param lhs_shape First input shape. -/// \param rhs_shape Second input Shape. -/// -/// \return Broadcast shape of input shapes. -/// -static Shape calculate_broadcast_shape(Shape lhs_shape, Shape rhs_shape) { - Shape result; - auto lhs_rank = lhs_shape.size(); - auto rhs_rank = rhs_shape.size(); - auto max_rank = max(lhs_rank, rhs_rank); - - // left-pad the lhs_shape with ones - lhs_shape.insert(begin(lhs_shape), max_rank - lhs_rank, 1); - // left-pad the rhs_shape with ones - rhs_shape.insert(begin(rhs_shape), max_rank - rhs_rank, 1); - - for (size_t index = 0; index < max_rank; ++index) { - size_t lhs_dim = lhs_shape.at(index); - size_t rhs_dim = rhs_shape.at(index); - - if (lhs_dim != rhs_dim && lhs_dim != 1 && rhs_dim != 1) { - throw numpy_autobroadcast_incompatible_shapes(lhs_shape, rhs_shape); - } - - result.push_back(max(lhs_dim, rhs_dim)); - } - - return result; -}; - -pair> get_numpy_broadcast_shapes(const vector& input_shapes) { - Shape target_shape = accumulate(begin(input_shapes), end(input_shapes), Shape{}, calculate_broadcast_shape); - - vector full_shapes; - for (const Shape& input : input_shapes) { - Shape padded_shape{input}; - padded_shape.insert(begin(padded_shape), target_shape.size() - padded_shape.size(), 1); - full_shapes.push_back(std::move(padded_shape)); - } - - return {target_shape, full_shapes}; -} - -static pair> get_numpy_broadcast_shapes(const OutputVector& values) { - vector input_shapes; - - for (const auto& input : values) { - input_shapes.push_back(input.get_shape()); - } - - return get_numpy_broadcast_shapes(input_shapes); -} - -/// \brief Broadcast input node. -/// -/// \note The source shape does not have to be the actual shape of input node. However -/// it should be a superset of it (containing it as a continuous subset). This -/// implies we may expand the number of axes of input node. The ranks of -/// source_shape and output_shape must be equal. This means that the -/// source_shape has to be padded with ones for this operation. -/// -/// \param[in] value The input Node to be broadcast. -/// \param[in] output_shape The output shape. -/// \param[in] source_shape The source shape from which we want to broadcast input node. -/// -/// \return The broadcasted Node. -/// -static shared_ptr numpy_broadcast_node(const Output& value, - const Shape& output_shape, - const Shape& source_shape) { - shared_ptr broadcasted_node = value.get_node_shared_ptr(); - // If node already has the required shape, return original node - if (output_shape == value.get_shape()) { - return broadcasted_node; - } - - NGRAPH_CHECK(source_shape.size() == output_shape.size(), - "Ranks of source_shape and output_shape dont match: ", - source_shape.size(), - " vs ", - output_shape.size()); - - AxisVector broadcast_axes; - Shape squeezed_shape; - // Positions of axes which have length of 1 are needed to calculate broadcast_axes - // for nGraph broadcast operation. We need to remove ones from source shape - // to avoid broadcasting axis conflict. - for (size_t index = 0; index < output_shape.size(); ++index) { - if (source_shape.at(index) == 1 && output_shape.at(index) != 1) { - broadcast_axes.push_back(index); - } else { - squeezed_shape.push_back(source_shape.at(index)); - } - } - - if (squeezed_shape != value.get_shape()) { - broadcasted_node = builder::opset1::reshape(value, squeezed_shape); - } - - if (!broadcast_axes.empty()) { - auto shape_const = op::Constant::create(element::u64, Shape{output_shape.size()}, output_shape); - broadcasted_node = - make_shared(broadcasted_node, - shape_const, - opset1::get_axes_mapping_output(output_shape, broadcast_axes)); - } - - return broadcasted_node; -} - -/// \brief Broadcast input node. -/// -/// \param[in] value The input Node to be broadcast. -/// \param[in] output_shape The output shape. -/// \param[in] axis The start index to align with output_shape -/// -/// \return The broadcasted Node. -/// -static shared_ptr broadcast_value_pdpd_style(const Output& value, const Shape& output_shape, int64_t axis) { - auto value_shape = value.get_shape(); - - // If node already has the required shape, return original node - if (output_shape == value_shape) { - return value.get_node_shared_ptr(); - } - - if (axis == -1) { - axis = output_shape.size() - value_shape.size(); - } - - auto trimmed_value_shape = value_shape; - while (trimmed_value_shape.size() > 0 && trimmed_value_shape.back() == 1) { - trimmed_value_shape.pop_back(); - } - - AxisSet axes; - for (int64_t i = 0; i < axis; ++i) { - axes.insert(static_cast(i)); - } - - for (size_t i = axis + trimmed_value_shape.size(); i < output_shape.size(); ++i) { - axes.insert(i); - } - - auto trimmed_value = value; - if (value_shape != trimmed_value_shape) { - trimmed_value = builder::opset1::reshape(value, trimmed_value_shape); - } - - auto shape_const = op::Constant::create(element::u64, Shape{output_shape.size()}, output_shape); - auto value_bcast = - make_shared(trimmed_value, shape_const, opset1::get_axes_mapping_output(output_shape, axes)); - - return std::move(value_bcast); -} - -pair, shared_ptr> numpy_broadcast(const pair, Output>& args) { - NGRAPH_CHECK(args.first.get_node()); - NGRAPH_CHECK(args.second.get_node()); - - const Shape& arg1_in_shape = args.first.get_shape(); - const Shape& arg2_in_shape = args.second.get_shape(); - - // Handle the trivial case... - if (arg1_in_shape == arg2_in_shape) { - return make_pair(args.first.get_node_shared_ptr(), args.second.get_node_shared_ptr()); - } - - NodeVector bcasted_outputs = as_node_vector(numpy_broadcast_outputs({args.first, args.second})); - - return make_pair(bcasted_outputs.at(0), bcasted_outputs.at(1)); -} - -OutputVector numpy_broadcast_outputs(const OutputVector& values) { - if (values.size() <= 1) { - return values; - } - - // find the output tensor's shape, then broadcast all inputs so that they are compatible - auto bcast_shapes = get_numpy_broadcast_shapes(values); - - OutputVector broadcasted_inputs; - for (size_t i = 0; i < values.size(); ++i) { - broadcasted_inputs.push_back(numpy_broadcast_node(values[i], bcast_shapes.first, bcast_shapes.second[i])); - } - return broadcasted_inputs; -} - -shared_ptr numpy_broadcast(const Output& value, const Shape& shape) { - auto bcast_shape = get_numpy_broadcast_shapes({value.get_shape(), shape}); - return numpy_broadcast_node(value, bcast_shape.first, bcast_shape.second[0]); -} - -OutputVector numpy_broadcast_for_matmul_operation(const Output& left, const Output& right) { - const auto& left_shape = left.get_shape(); - const auto& right_shape = right.get_shape(); - // Broadcast only _stack of matrices_ axes. - const auto& numpy_shapes = get_numpy_broadcast_shapes( - {Shape{begin(left_shape), next(end(left_shape), -2)}, Shape{begin(right_shape), next(end(right_shape), -2)}}); - - // Prepare tensors output shapes with broadcasted _stack of matrices_ axes. - auto left_output_shape = numpy_shapes.first; - auto right_output_shape = numpy_shapes.first; - // Append the last two axes original dimensions. - left_output_shape.insert(end(left_output_shape), next(begin(left_shape), left_shape.size() - 2), end(left_shape)); - right_output_shape.insert(end(right_output_shape), - next(begin(right_shape), right_shape.size() - 2), - end(right_shape)); - - auto left_full_shape = numpy_shapes.second.at(0); - auto right_full_shape = numpy_shapes.second.at(1); - // Append the last two axes original dimensions. - left_full_shape.insert(end(left_full_shape), next(begin(left_shape), left_shape.size() - 2), end(left_shape)); - right_full_shape.insert(end(right_full_shape), next(begin(right_shape), right_shape.size() - 2), end(right_shape)); - - return {numpy_broadcast_node(left, left_output_shape, left_full_shape), - numpy_broadcast_node(right, right_output_shape, right_full_shape)}; -} - -OutputVector pdpd_broadcast(const OutputVector& inputs, int64_t axis) { - if (inputs.size() <= 1) { - return inputs; - } - - OutputVector broadcasted_inputs{inputs[0]}; - for (size_t i = 1; i < inputs.size(); ++i) { - broadcasted_inputs.push_back(broadcast_value_pdpd_style(inputs[i], inputs[0].get_shape(), axis)); - } - return broadcasted_inputs; -} - -std::shared_ptr calculate_broadcast_axes(const Shape& output_shape, - const Shape& input_shape, - size_t start_match_axis) { - vector axes(output_shape.size() - input_shape.size()); - // Populate the axes vector with monotonic increasing series from 0 until - // output_shape_size, excluding values in range: - // [start_match_axis, start_match_axis + input_shape.size()] - iota(begin(axes), begin(axes) + start_match_axis, 0); - iota(begin(axes) + start_match_axis, end(axes), start_match_axis + input_shape.size()); - - auto axes_mapping = opset1::get_axes_mapping(output_shape, axes); - return op::Constant::create(element::i64, Shape{axes_mapping.size()}, axes_mapping); -} - -namespace opset1 { -Output legacy_broadcast_for_binary_operation(const Output& left, - const Output& right, - size_t start_match_axis) { - const auto& left_shape = left.get_shape(); - const auto& right_shape = right.get_shape(); - - bool dimensions_identical = (left_shape == right_shape); - if (dimensions_identical) { - return right; - } - - // Prepare new shape of right operand for broadcasting - // Remove dimensions with length=1 from back - auto new_right_shape = right_shape; - for (int dimension = static_cast(new_right_shape.size()) - 1; dimension >= 0; --dimension) { - if (new_right_shape.at(dimension) == 1) { - new_right_shape.pop_back(); - } else { - break; - } - } - - // Find first dimensions at front with length different from 1 - size_t num_ones = 0; - for (size_t dimension : new_right_shape) { - if (dimension == 1) { - ++num_ones; - } else { - break; - } - } - - // Remove dimensions with length=1 from front - new_right_shape.erase(begin(new_right_shape), next(begin(new_right_shape), num_ones)); - - auto reshape_right = reshape(right, new_right_shape); - - // Move broadcast start axis parameter to right - start_match_axis += num_ones; - - return make_broadcast(reshape_right, left_shape, start_match_axis); -} - -vector get_axes_mapping(const Shape& output_shape, const AxisSet& broadcast_axes) { - NGRAPH_CHECK((broadcast_axes.size() <= output_shape.size())); - vector axes_mapping(output_shape.size()); - iota(axes_mapping.begin(), axes_mapping.end(), 0); - for (auto i = broadcast_axes.rbegin(); i != broadcast_axes.rend(); ++i) { - axes_mapping.erase(axes_mapping.begin() + *i); - } - return axes_mapping; -} - -Output get_axes_mapping_output(const PartialShape& output_shape, - const PartialShape& input_shape, - std::size_t start_match_axis) { - NGRAPH_CHECK((input_shape.rank().is_static() && output_shape.rank().is_static()), - "Tensor's rank has to be static."); - NGRAPH_CHECK( - (input_shape.rank().get_length() + static_cast(start_match_axis) <= output_shape.rank().get_length()), - "Unable to figure out axes mapping."); - - vector mapping(input_shape.rank().get_length()); - iota(begin(mapping), end(mapping), start_match_axis); - - return op::Constant::create(element::i64, Shape{mapping.size()}, mapping); -} - -Output get_axes_mapping_output(const Shape& output_shape, const AxisSet& broadcast_axes) { - vector axes_mapping{get_axes_mapping(output_shape, broadcast_axes)}; - return op::Constant::create(element::i64, Shape{axes_mapping.size()}, axes_mapping); -} - -static Output get_axes_mapping_output(const PartialShape& output_shape, - const Output& input_shape, - std::size_t start_match_axis) { - const auto one_node = opset7::Constant::create(element::i64, Shape{}, {1}); - const auto zero_node = opset7::Constant::create(element::i64, Shape{}, {0}); - const auto start_match_axis_node = opset7::Constant::create(element::i64, Shape{}, {start_match_axis}); - const auto target_shape_rank_node = - builder::opset1::reshape(std::make_shared(input_shape), Shape{}); - - const auto range_node = std::make_shared(zero_node, target_shape_rank_node, one_node, element::i64); - - // workaround for GPU plugin type incompatibility - const auto range_node_converted = - std::make_shared(range_node, start_match_axis_node->get_element_type()); - // end of workaround - - const auto result = std::make_shared(range_node_converted, start_match_axis_node); - return result; -} - -Output make_broadcast(const Output& node, const Shape& target_shape, const AxisSet& broadcast_axes) { - return make_shared(node, - op::Constant::create(element::i64, Shape{target_shape.size()}, target_shape), - get_axes_mapping_output(target_shape, broadcast_axes)); -} - -Output make_broadcast(const Output& node, const Shape& target_shape, size_t start_match_axis) { - const auto node_shape = std::make_shared(node); - return make_shared(node, - op::Constant::create(element::i64, Shape{target_shape.size()}, target_shape), - get_axes_mapping_output(target_shape, node_shape, start_match_axis)); -} - -} // namespace opset1 -} // namespace builder -} // namespace ngraph diff --git a/src/core/builder/src/builder/make_constant.cpp b/src/core/builder/src/builder/make_constant.cpp deleted file mode 100644 index aea4bfd16b8d99..00000000000000 --- a/src/core/builder/src/builder/make_constant.cpp +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/builder/make_constant.hpp" - -NGRAPH_SUPPRESS_DEPRECATED_START -namespace ngraph { -namespace builder { -std::shared_ptr make_constant_from_double(const element::Type& type, const Shape& shape, double num) { - auto ceil_func = [](double x) { - return ceil(x); - }; - - std::shared_ptr result = nullptr; - switch (type) { - case element::Type_t::i8: { - result = std::make_shared(type, shape, double_to_int(num, ceil_func)); - break; - } - case element::Type_t::i16: { - result = std::make_shared(type, shape, double_to_int(num, ceil_func)); - break; - } - case element::Type_t::i32: { - result = std::make_shared(type, shape, double_to_int(num, ceil_func)); - break; - } - case element::Type_t::i64: { - result = std::make_shared(type, shape, double_to_int(num, ceil_func)); - break; - } - case element::Type_t::u8: { - result = std::make_shared(type, shape, double_to_int(num, ceil_func)); - break; - } - case element::Type_t::u16: { - result = std::make_shared(type, shape, double_to_int(num, ceil_func)); - break; - } - case element::Type_t::u32: { - result = std::make_shared(type, shape, double_to_int(num, ceil_func)); - break; - } - case element::Type_t::u64: { - result = std::make_shared(type, shape, double_to_int(num, ceil_func)); - break; - } - case element::Type_t::f16: { - result = builder::make_constant(type, shape, static_cast(num)); - break; - } - case element::Type_t::bf16: { - result = builder::make_constant(type, shape, static_cast(num)); - break; - } - case element::Type_t::f32: { - result = builder::make_constant(type, shape, static_cast(num)); - break; - } - case element::Type_t::f64: { - result = builder::make_constant(type, shape, num); - break; - } - default: - OPENVINO_THROW("Unsupported data type during make_constant_from_double"); - break; - } - return result; -} -} // namespace builder -} // namespace ngraph diff --git a/src/core/builder/src/builder/norm.cpp b/src/core/builder/src/builder/norm.cpp deleted file mode 100644 index b2561ebf4d6d1b..00000000000000 --- a/src/core/builder/src/builder/norm.cpp +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/builder/norm.hpp" - -#include "ngraph/op/abs.hpp" -#include "ngraph/op/add.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/convert.hpp" -#include "ngraph/op/maximum.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/not_equal.hpp" -#include "ngraph/op/power.hpp" -#include "ngraph/op/reduce_sum.hpp" -#include "ngraph/op/sqrt.hpp" -#include "ngraph/opsets/opset1.hpp" -#include "ngraph/shape.hpp" - -using namespace std; - -namespace ngraph { -namespace builder { -namespace detail { -namespace opset1 { -namespace { -shared_ptr lp_norm(const Output& value, - size_t p_norm, - const Output& reduction_axes, - float bias, - bool keep_dims) { - // In general "entrywise" lp-norm for matrix `A` is defined as following double - // sum: - // ||A||_p = ||vec(A)||_p = [sum_{i=1}^m sum_{j=1}^n abs(a_{i,j})^p]^{1/p} - shared_ptr abs_values{make_shared(value)}; - shared_ptr p_node = ngraph::opset1::Constant::create(value.get_element_type(), Shape{}, {p_norm}); - - // Get inner part of equation: abs_values^p_node, then sum over reduction_axes. - shared_ptr values{make_shared(abs_values, p_node)}; - values = make_shared(values, reduction_axes, keep_dims); - - shared_ptr bias_node{ngraph::opset1::Constant::create(values->get_element_type(), Shape{}, {bias})}; - - values = make_shared(values, bias_node); - - // Get outer part of equation: raise values to 1/p_norm exponent. - shared_ptr inv_p_node = ngraph::opset1::Constant::create(values->get_element_type(), Shape{}, {1.f / p_norm}); - - return {make_shared(values, inv_p_node)}; -} -} // namespace -} // namespace opset1 -} // namespace detail - -shared_ptr builder::opset1::l0_norm(const Output& value, - const Output& reduction_axes, - bool keep_dims) { - // L0 norm returns number of elements different from zero. - const shared_ptr zero_node{ngraph::opset1::Constant::create(value.get_element_type(), Shape{}, {0.f})}; - - // Convert bool values to input node data type. - const shared_ptr non_zero_values = - make_shared(make_shared(value, zero_node), - value.get_element_type()); - - return make_shared(non_zero_values, reduction_axes, keep_dims); -} - -shared_ptr builder::opset1::l1_norm(const Output& value, - const Output& reduction_axes, - float bias, - bool keep_dims) { - const shared_ptr values{ - make_shared(make_shared(value), reduction_axes, keep_dims)}; - - const shared_ptr bias_node{ngraph::opset1::Constant::create(values->get_element_type(), Shape{}, {bias})}; - - return make_shared(values, bias_node); -} - -shared_ptr builder::opset1::l2_norm(const Output& value, - const Output& reduction_axes, - float bias, - BiasMode bias_mode, - bool keep_dims) { - shared_ptr pow = - make_shared(value, - make_shared(value.get_element_type(), Shape{}, 2)); - shared_ptr values{make_shared(pow, reduction_axes, keep_dims)}; - - shared_ptr bias_node{ngraph::opset1::Constant::create(values->get_element_type(), Shape{}, {bias})}; - shared_ptr result; - switch (bias_mode) { - case BiasMode::MAX: { - result = make_shared(make_shared(values, bias_node)); - break; - } - case BiasMode::ADD: - default: - result = make_shared(make_shared(values, bias_node)); - } - return result; -} - -shared_ptr builder::opset1::lp_norm(const Output& value, - const Output& reduction_axes, - size_t p_norm, - float bias, - bool keep_dims) { - // The number of non-zero elements - if (p_norm == 0) { - return opset1::l0_norm(value, reduction_axes, keep_dims); - } - // sum of absolute values. - else if (p_norm == 1) { - return opset1::l1_norm(value, reduction_axes, bias, keep_dims); - } - // sqrt of sum of squares - Euclidean norm - else if (p_norm == 2) { - return opset1::l2_norm(value, reduction_axes, bias, BiasMode::ADD, keep_dims); - } - // generic case - else { - return detail::opset1::lp_norm(value, p_norm, reduction_axes, bias, keep_dims); - } -} - -} // namespace builder - -} // namespace ngraph diff --git a/src/core/builder/src/builder/reduce_ops.cpp b/src/core/builder/src/builder/reduce_ops.cpp deleted file mode 100644 index 7f809ab5fe9d6b..00000000000000 --- a/src/core/builder/src/builder/reduce_ops.cpp +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/builder/reduce_ops.hpp" - -#include - -#include "ngraph/axis_set.hpp" -#include "ngraph/builder/autobroadcast.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/divide.hpp" -#include "ngraph/op/multiply.hpp" -#include "ngraph/op/reshape.hpp" -#include "ngraph/op/sqrt.hpp" -#include "ngraph/op/subtract.hpp" -#include "ngraph/opsets/opset1.hpp" -#include "ngraph/util.hpp" - -namespace ngraph { -namespace builder { -namespace { -size_t get_num_elements(const Shape& shape, const AxisSet& reduction_axes) { - size_t N = 1; - for (auto a : reduction_axes) { - N *= shape[a]; - } - return N; -} - -std::shared_ptr get_num_elements(const Output& value, const Output& reduction_axes) { - const auto value_shape = std::make_shared(value); - const auto dim_values = - std::make_shared(value_shape, - reduction_axes, - ngraph::opset1::Constant::create(element::i64, {}, {0})); - - return std::make_shared(dim_values, - ngraph::opset1::Constant::create(element::i64, {}, {0})); -} - -} // namespace - -std::shared_ptr builder::opset1::mean(const Output& value, const AxisSet& reduction_axes, bool keep_dims) { - std::shared_ptr elems_number; - const auto value_elem_type = value.get_element_type(); - const auto reduction_axes_const = - ngraph::opset1::Constant::create(element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()); - const auto value_elems_sum = std::make_shared(value, reduction_axes_const, keep_dims); - if (value.get_partial_shape().is_static()) { - const auto elems_number_value = get_num_elements(value.get_shape(), reduction_axes); - elems_number = ngraph::opset1::Constant::create(value_elem_type, Shape{}, {elems_number_value}); - } else { - elems_number = get_num_elements(value, reduction_axes_const); - elems_number = std::make_shared(elems_number, value_elem_type); - } - - return std::make_shared(value_elems_sum, elems_number); -} - -std::shared_ptr builder::opset1::mean(const Output& value, - const Output& reduction_axes, - bool keep_dims) { - std::shared_ptr elems_number; - const auto value_elem_type = value.get_element_type(); - const auto value_elems_sum = std::make_shared(value, reduction_axes, keep_dims); - elems_number = get_num_elements(value, reduction_axes); - elems_number = std::make_shared(elems_number, value_elem_type); - - return std::make_shared(value_elems_sum, elems_number); -} - -std::shared_ptr builder::opset1::variance(const Output& value, - const AxisSet& reduction_axes, - const bool bessel_correction) { - const bool keep_dims = true; - std::shared_ptr mu = opset1::mean(value, reduction_axes, keep_dims); - - Output diff = std::make_shared(value, mu); - - diff = std::make_shared( - std::make_shared(diff, diff), - ngraph::opset1::Constant::create(element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()), - false); - - const auto& et = value.get_element_type(); - const auto N = get_num_elements(value.get_shape(), reduction_axes); - - std::shared_ptr result; - if (bessel_correction) { - const auto N1const = ngraph::opset1::Constant::create(et, Shape{}, {N - 1}); - result = std::make_shared(diff, N1const); - } else { - const auto Nconst = ngraph::opset1::Constant::create(et, Shape{}, {N}); - result = std::make_shared(diff, Nconst); - } - return result; -} - -std::shared_ptr builder::opset1::variance(const Output& value, - const Output& reduction_axes, - bool keep_dims, - bool bessel_correction) { - std::shared_ptr mu = opset1::mean(value, reduction_axes, keep_dims); - - Output diff = std::make_shared(value, mu); - - diff = std::make_shared(std::make_shared(diff, diff), - reduction_axes, - keep_dims); - - const auto& et = value.get_element_type(); - auto N = get_num_elements(value, reduction_axes); - N = std::make_shared(N, et); - - std::shared_ptr result; - if (bessel_correction) { - const auto one = std::make_shared(et, Shape{}, 1); - N = std::make_shared(N, one); - } - - result = std::make_shared(diff, N); - return result; -} - -} // namespace builder -} // namespace ngraph diff --git a/src/core/builder/src/builder/reshape.cpp b/src/core/builder/src/builder/reshape.cpp deleted file mode 100644 index 11cc31fe980988..00000000000000 --- a/src/core/builder/src/builder/reshape.cpp +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/builder/reshape.hpp" - -#include -#include -#include -#include - -#include "ngraph/axis_vector.hpp" -#include "ngraph/op/concat.hpp" -#include "ngraph/op/constant.hpp" -#include "ngraph/op/reduce_prod.hpp" -#include "ngraph/op/reshape.hpp" -#include "ngraph/op/shape_of.hpp" -#include "ngraph/op/squeeze.hpp" -#include "ngraph/op/transpose.hpp" -#include "ngraph/op/variadic_split.hpp" -#include "ngraph/opsets/opset1.hpp" -#include "ngraph/util.hpp" -#include "ngraph/validation_util.hpp" - -using namespace ngraph; -using namespace std; - -shared_ptr builder::opset1::reshape(const Output& value, const Shape& shape) { - if (value.get_partial_shape().same_scheme(shape)) { - return value.get_node_shared_ptr(); - } else if (is_scalar(shape)) { - auto value_rank = value.get_shape().size(); - AxisVector axes_vector(value_rank); - std::iota(axes_vector.begin(), axes_vector.end(), 0); - auto axes = op::Constant::create(element::i64, Shape{value_rank}, axes_vector); - return std::make_shared(value, axes); - } else { - auto out_pattern = - op::Constant::create(element::i64, Shape{shape.size()}, vector(shape.begin(), shape.end())); - - return make_shared(value, out_pattern, false); - } -} - -shared_ptr builder::opset1::reorder_axes(const Output& value, vector axes_order) { - const auto axes_order_const = op::Constant::create(element::i64, - Shape{axes_order.size()}, - vector(axes_order.begin(), axes_order.end())); - return make_shared(value, axes_order_const); -} - -shared_ptr builder::opset1::transpose(const Output& value) { - // This part is left to preserve backward compatibility and ensure passing ONNX tests. - if (value.get_partial_shape().is_static()) { - vector axes_order(value.get_shape().size()); - iota(begin(axes_order), end(axes_order), 0); - reverse(begin(axes_order), end(axes_order)); - return builder::opset1::reorder_axes(value, axes_order); - } - - const auto input_rank = std::make_shared(std::make_shared(value)); - const auto neg_one = ngraph::opset1::Constant::create(element::i64, Shape{}, {-1}); - const auto start_node = std::make_shared(input_rank, neg_one); - const auto reverse_axes_order = std::make_shared(reshape(start_node, Shape{}), // start - neg_one, // stop (exclusive) - neg_one); // step - return std::make_shared(value, reverse_axes_order); -} - -namespace ngraph { -namespace builder { -namespace opset1 { -namespace { -/// -/// \brief Return the node representing normalized axis with respect to -/// provided rank. -/// -/// \param[in] node_rank The node representing rank used for normalization. -/// \param[in] axis The axis value to be normalized. -/// -/// \return The new Constant node representing normalized axis value. -/// -std::shared_ptr get_normalized_axis_node(const std::shared_ptr node_rank, int64_t axis) { - auto axis_node = ngraph::opset1::Constant::create(element::i64, Shape{1}, {axis}); - // shortcut for already positive value - if (axis >= 0) { - return axis_node; - } - - // TODO: What if axis value is beyond acceptable values? [-node_rank, - // node_rank-1] - return make_shared(node_rank, axis_node); -} -} // namespace -} // namespace opset1 -} // namespace builder -} // namespace ngraph - -shared_ptr builder::opset1::flatten(const Output& value, int axis) { - // First dimension of output tensor is the product of [d_0, ... d_{axis-1}] dimensions of - // input tensor. The last dimension is the product of the rest of input tensor dimensions: - // [d_{axis}, ..., d_n] - shared_ptr output_shape; - if (axis == 0) { - output_shape = ngraph::opset1::Constant::create(element::i64, Shape{2}, {1, -1}); - } else if (axis == 1) { - output_shape = ngraph::opset1::Constant::create(element::i64, Shape{2}, {0, -1}); - } else { - const auto value_shape = make_shared(value); - const auto value_rank = make_shared(value_shape); - const auto axis_node = get_normalized_axis_node(value_rank, axis); - - const auto first_part_dims = - make_shared(value_shape, - ngraph::opset1::Constant::create(element::i64, {1}, {0}), - axis_node, - vector{0}, - vector{0}); - const auto first_part_dims_length = - make_shared(first_part_dims, - ngraph::opset1::Constant::create(element::i64, {}, {0}), - true); - - const auto remaining_part_length = ngraph::opset1::Constant::create(element::i64, {1}, {-1}); - - output_shape = - make_shared(OutputVector{first_part_dims_length, remaining_part_length}, 0); - } - return make_shared(value, output_shape, true); -} - -shared_ptr builder::opset1::expand_dims(const Output& value, size_t axis) { - Shape output_shape(value.get_shape()); - // Add empty axis at specified position. - auto empty_axis_it = begin(output_shape); - advance(empty_axis_it, axis); - output_shape.insert(empty_axis_it, 1); - return builder::opset1::reshape(value, output_shape); -} - -shared_ptr builder::opset1::squeeze(const Output& value, vector axes) { - if (axes.empty()) { - return value.get_node_shared_ptr(); - } - - Shape in_shape{value.get_shape()}; - for (size_t idx = 0; idx < axes.size(); ++idx) { - in_shape.at(axes.at(idx)) = 0; - } - Shape output_shape; - for (auto axis : in_shape) { - if (axis != 0) { - output_shape.push_back(axis); - } - } - return builder::opset1::reshape(value, output_shape); -} - -shared_ptr builder::opset1::collapse(const Output& value, const size_t start_axis, const size_t end_axis) { - if (start_axis == end_axis) { - return value.get_node_shared_ptr(); - } - - if (value.get_partial_shape().is_static()) { - auto shape = value.get_shape(); - // Multiply all elements of shape from start_axis to end_axis inclusive - size_t collapsed_axis_size = accumulate(next(begin(shape), start_axis), - next(begin(shape), end_axis + 1), - size_t{1}, - multiplies()); - Shape output_shape{}; - output_shape.insert(begin(output_shape), begin(shape), next(begin(shape), start_axis)); - output_shape.insert(end(output_shape), collapsed_axis_size); - output_shape.insert(end(output_shape), next(begin(shape), end_axis + 1), end(shape)); - return builder::opset1::reshape(value, output_shape); - } - - const auto shape = make_shared(value); - const auto rank = make_shared(shape); - - // Split lengths used in VariadicSplit - const auto start_axis_node = ngraph::opset1::Constant::create(element::i64, {1}, {start_axis}); - const auto end_axis_node = ngraph::opset1::Constant::create(element::i64, {1}, {end_axis + 1}); - const auto collapsed_axis = make_shared(end_axis_node, start_axis_node); - const auto post_axis = make_shared(rank, end_axis_node); - - const auto split_lengths = - make_shared(OutputVector{start_axis_node, collapsed_axis, post_axis}, 0); - const auto split_axis = ngraph::opset1::Constant::create(element::i64, {}, {0}); - const auto split_node = make_shared(shape, split_axis, split_lengths); - - const auto reduced_axis = ngraph::opset1::Constant::create(element::i64, {1}, {0}); - const auto collapsed_axis_size = make_shared(split_node->output(1), reduced_axis, true); - - const auto collapsed_shape = make_shared( - OutputVector{split_node->output(0), collapsed_axis_size, split_node->output(2)}, - 0); - - return make_shared(value, collapsed_shape, false); -} diff --git a/src/core/builder/src/builder/split.cpp b/src/core/builder/src/builder/split.cpp deleted file mode 100644 index 95216e53e1d84e..00000000000000 --- a/src/core/builder/src/builder/split.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "ngraph/builder/split.hpp" - -#include "ngraph/opsets/opset1.hpp" - -using namespace ngraph; - -OutputVector builder::opset1::split(const Output& value, - const std::vector& split_lengths, - int64_t axis) { - const auto axis_node = ngraph::opset1::Constant::create(element::i64, Shape{}, {axis}); - const auto split_lengths_node = - ngraph::opset1::Constant::create(element::i64, Shape{split_lengths.size()}, split_lengths); - const auto variadic_split = std::make_shared(value, axis_node, split_lengths_node); - - return variadic_split->outputs(); -} - -OutputVector builder::opset1::split(const Output& value, int64_t num_splits, int64_t axis) { - const auto axis_node = ngraph::opset1::Constant::create(element::i64, Shape{}, {axis}); - const auto split = std::make_shared(value, axis_node, num_splits); - - return split->outputs(); -} diff --git a/src/core/builder/src/precomp.hpp b/src/core/builder/src/precomp.hpp deleted file mode 100644 index ca7d02c911509e..00000000000000 --- a/src/core/builder/src/precomp.hpp +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include - -#include diff --git a/src/core/tests/build_graph.cpp b/src/core/tests/build_graph.cpp index 7fdebc1799c2df..c6714fcb715358 100644 --- a/src/core/tests/build_graph.cpp +++ b/src/core/tests/build_graph.cpp @@ -9,7 +9,6 @@ #include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/test_tools.hpp" #include "common_test_utils/type_prop.hpp" -#include "ngraph/builder/autobroadcast.hpp" #include "ngraph/graph_util.hpp" #include "openvino/core/except.hpp" #include "openvino/op/abs.hpp" @@ -26,6 +25,7 @@ #include "openvino/op/split.hpp" #include "openvino/op/squeeze.hpp" #include "openvino/op/util/variable.hpp" +#include "ov_models/ov_builders/broadcast.hpp" using namespace std; using namespace ov; @@ -36,8 +36,8 @@ TEST(build_graph, build_simple) { auto arg1 = make_shared(element::f32, Shape{3}); auto arg2 = make_shared(element::f32, Shape{32, 7}); auto arg3 = make_shared(element::f32, Shape{32, 7}); - auto broadcast_1 = ngraph::builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); - auto b1 = ngraph::builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); + auto broadcast_1 = ov::op::util::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); + auto b1 = ov::op::util::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto dot = make_shared(arg2, arg0); ASSERT_EQ(dot->input_value(0).get_node_shared_ptr(), arg2); ASSERT_EQ(dot->input_value(1).get_node_shared_ptr(), arg0); @@ -92,8 +92,8 @@ TEST(build_graph, function_undeclared_parameters) { auto arg1 = make_shared(element::f32, Shape{3}); auto arg2 = make_shared(element::f32, Shape{32, 7}); auto arg3 = make_shared(element::f32, Shape{32, 7}); - auto broadcast_1 = ngraph::builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); - auto b1 = ngraph::builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); + auto broadcast_1 = ov::op::util::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); + auto b1 = ov::op::util::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto dot = make_shared(arg2, arg0); ASSERT_EQ(dot->input_values()[0].get_node_shared_ptr(), arg2); ASSERT_EQ(dot->input_values()[1].get_node_shared_ptr(), arg0); @@ -438,8 +438,8 @@ TEST(build_graph, build_graph_parameters_autodetection) { auto arg1 = make_shared(element::f32, Shape{3}); auto arg2 = make_shared(element::f32, Shape{32, 7}); auto arg3 = make_shared(element::f32, Shape{32, 7}); - auto broadcast_1 = ngraph::builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); - auto b1 = ngraph::builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); + auto broadcast_1 = ov::op::util::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); + auto b1 = ov::op::util::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto dot = make_shared(arg2, arg0); auto f = make_shared(OutputVector{dot}); diff --git a/src/core/tests/type_prop/tensor_iterator.cpp b/src/core/tests/type_prop/tensor_iterator.cpp index bab7c4265c671d..64bd9df242860f 100644 --- a/src/core/tests/type_prop/tensor_iterator.cpp +++ b/src/core/tests/type_prop/tensor_iterator.cpp @@ -7,9 +7,9 @@ #include #include "common_test_utils/type_prop.hpp" -#include "ngraph/builder/reshape.hpp" #include "openvino/core/model.hpp" #include "openvino/opsets/opset5.hpp" +#include "ov_models/ov_builders/reshape.hpp" using namespace std; using namespace ov; @@ -34,14 +34,14 @@ TEST(type_prop, tensor_iterator_lstm) { auto X = make_shared(element::f32, Shape{N, 1, I}); auto W_body = make_shared(element::f32, Shape{4 * H, I}); auto R_body = make_shared(element::f32, Shape{4 * H, H}); - auto LSTM_cell = make_shared(ngraph::builder::opset1::reshape(X, Shape{N, I}), - ngraph::builder::opset1::reshape(H_t, Shape{N, H}), - ngraph::builder::opset1::reshape(C_t, Shape{N, H}), + auto LSTM_cell = make_shared(ov::op::util::reshape(X, Shape{N, I}), + ov::op::util::reshape(H_t, Shape{N, H}), + ov::op::util::reshape(C_t, Shape{N, H}), W_body, R_body, H); - auto H_o = ngraph::builder::opset1::reshape(LSTM_cell->output(0), Shape{N, 1, H}); - auto C_o = ngraph::builder::opset1::reshape(LSTM_cell->output(1), Shape{N, 1, H}); + auto H_o = ov::op::util::reshape(LSTM_cell->output(0), Shape{N, 1, H}); + auto C_o = ov::op::util::reshape(LSTM_cell->output(1), Shape{N, 1, H}); auto body = make_shared(OutputVector{H_o, C_o}, ParameterVector{X, H_t, C_t, W_body, R_body}); auto tensor_iterator = make_shared(); @@ -197,14 +197,14 @@ TEST(type_prop, tensor_iterator_with_dynamic_reshape) { auto X = make_shared(element::f32, Shape{N, 1, I}); auto W_body = make_shared(element::f32, Shape{4 * H, I}); auto R_body = make_shared(element::f32, Shape{4 * H, H}); - auto LSTM_cell = make_shared(ngraph::builder::opset1::reshape(X, Shape{N, I}), - ngraph::builder::opset1::reshape(H_t, Shape{N, H}), - ngraph::builder::opset1::reshape(C_t, Shape{N, H}), + auto LSTM_cell = make_shared(ov::op::util::reshape(X, Shape{N, I}), + ov::op::util::reshape(H_t, Shape{N, H}), + ov::op::util::reshape(C_t, Shape{N, H}), W_body, R_body, H); - auto H_o = ngraph::builder::opset1::reshape(LSTM_cell->output(0), Shape{N, 1, H}); - auto C_o = ngraph::builder::opset1::reshape(LSTM_cell->output(1), Shape{N, 1, H}); + auto H_o = ov::op::util::reshape(LSTM_cell->output(0), Shape{N, 1, H}); + auto C_o = ov::op::util::reshape(LSTM_cell->output(1), Shape{N, 1, H}); auto body = make_shared(OutputVector{H_o, C_o}, ParameterVector{X, H_t, C_t, W_body, R_body}); auto tensor_iterator = make_shared(); diff --git a/src/core/tests/visitors/op/tensor_iterator.cpp b/src/core/tests/visitors/op/tensor_iterator.cpp index 8d2c7baf6396e5..5b7a836ab6d841 100644 --- a/src/core/tests/visitors/op/tensor_iterator.cpp +++ b/src/core/tests/visitors/op/tensor_iterator.cpp @@ -6,10 +6,10 @@ #include -#include "ngraph/builder/reshape.hpp" #include "openvino/op/add.hpp" #include "openvino/op/lstm_cell.hpp" #include "openvino/op/multiply.hpp" +#include "ov_models/ov_builders/reshape.hpp" #include "visitors/visitors.hpp" using namespace std; @@ -39,14 +39,14 @@ TEST(attributes, tensor_iterator_lstm) { auto X = make_shared(element::f32, Shape{N, 1, I}); auto W_body = make_shared(element::f32, Shape{4 * H, I}); auto R_body = make_shared(element::f32, Shape{4 * H, H}); - auto LSTM_cell = make_shared(ngraph::builder::opset1::reshape(X, Shape{N, I}), - ngraph::builder::opset1::reshape(H_t, Shape{N, H}), - ngraph::builder::opset1::reshape(C_t, Shape{N, H}), + auto LSTM_cell = make_shared(ov::op::util::reshape(X, Shape{N, I}), + ov::op::util::reshape(H_t, Shape{N, H}), + ov::op::util::reshape(C_t, Shape{N, H}), W_body, R_body, H); - auto H_o = ngraph::builder::opset1::reshape(LSTM_cell->output(0), Shape{N, 1, H}); - auto C_o = ngraph::builder::opset1::reshape(LSTM_cell->output(1), Shape{N, 1, H}); + auto H_o = ov::op::util::reshape(LSTM_cell->output(0), Shape{N, 1, H}); + auto C_o = ov::op::util::reshape(LSTM_cell->output(1), Shape{N, 1, H}); auto body = make_shared(OutputVector{H_o, C_o}, ParameterVector{X, H_t, C_t, W_body, R_body}); auto tensor_iterator = make_shared(); diff --git a/src/frontends/common/src/random_normal_helper.cpp b/src/frontends/common/src/random_normal_helper.cpp index 5e789a9f72f2f5..7d989a377debec 100644 --- a/src/frontends/common/src/random_normal_helper.cpp +++ b/src/frontends/common/src/random_normal_helper.cpp @@ -2,15 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // +#define _USE_MATH_DEFINES #include "openvino/frontend/common/random_normal_helper.hpp" +#include + #include "ngraph/output_vector.hpp" #include "openvino/op/constant.hpp" #include "openvino/opsets/opset12.hpp" #include "openvino/pass/graph_rewrite.hpp" #include "transformations/rt_info/disable_fp16_compression.hpp" -#define _USE_MATH_DEFINES -#include namespace ov { namespace frontend { diff --git a/src/frontends/onnx/frontend/src/frontend.cpp b/src/frontends/onnx/frontend/src/frontend.cpp index 0cbc4a0a2fa318..0a2cbc60c194cf 100644 --- a/src/frontends/onnx/frontend/src/frontend.cpp +++ b/src/frontends/onnx/frontend/src/frontend.cpp @@ -14,7 +14,6 @@ #include #include -#include #include #include #include @@ -31,6 +30,7 @@ #include "ops_bridge.hpp" #include "transformations/resolve_names_collisions.hpp" #include "utils/common.hpp" +#include "utils/legacy_conversion_extension.hpp" using namespace ov; using namespace ov::frontend::onnx; @@ -195,6 +195,13 @@ bool FrontEnd::supported_impl(const std::vector& variants) const { return false; } +namespace { +const auto legacy_conversion_extension = std::make_shared(); +const ngraph::onnx_import::LegacyConversionExtension::Ptr get_legacy_conversion_extension() { + return legacy_conversion_extension; +} +} // namespace + void FrontEnd::add_extension(const std::shared_ptr& extension) { if (auto telemetry = std::dynamic_pointer_cast(extension)) { m_extensions.telemetry = telemetry; @@ -212,7 +219,7 @@ void FrontEnd::add_extension(const std::shared_ptr& extension) { } else if (const auto& legacy_ext = std::dynamic_pointer_cast(extension)) { m_other_extensions.push_back(legacy_ext); std::call_once(has_legacy_extension, [this] { - m_extensions.conversions.push_back(ngraph::onnx_import::detail::get_legacy_conversion_extension()); + m_extensions.conversions.push_back(get_legacy_conversion_extension()); }); } } diff --git a/src/frontends/onnx/frontend/src/op/add.cpp b/src/frontends/onnx/frontend/src/op/add.cpp index 453b41fa80e5c8..61ac900e731775 100644 --- a/src/frontends/onnx/frontend/src/op/add.cpp +++ b/src/frontends/onnx/frontend/src/op/add.cpp @@ -6,7 +6,6 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/builder/autobroadcast.hpp" #include "ngraph/shape.hpp" #include "utils/common.hpp" diff --git a/src/frontends/onnx/frontend/src/op/affine.cpp b/src/frontends/onnx/frontend/src/op/affine.cpp index 1c7a2feaded83b..443a353ff853b1 100644 --- a/src/frontends/onnx/frontend/src/op/affine.cpp +++ b/src/frontends/onnx/frontend/src/op/affine.cpp @@ -6,7 +6,6 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/builder/autobroadcast.hpp" #include "ngraph/shape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/clip.cpp b/src/frontends/onnx/frontend/src/op/clip.cpp index cf469b4b366168..ca3cde2036da39 100644 --- a/src/frontends/onnx/frontend/src/op/clip.cpp +++ b/src/frontends/onnx/frontend/src/op/clip.cpp @@ -8,7 +8,6 @@ #include #include "default_opset.hpp" -#include "ngraph/builder/make_constant.hpp" #include "ngraph/validation_util.hpp" #include "onnx_import/core/null_node.hpp" diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp index df075686196eeb..bc9e9b256e810e 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/attention.cpp @@ -5,8 +5,8 @@ #include "op/com.microsoft/attention.hpp" #include "default_opset.hpp" -#include "ngraph/builder/split.hpp" #include "onnx_import/core/null_node.hpp" +#include "ov_models/ov_builders/split.hpp" namespace ngraph { namespace onnx_import { @@ -122,7 +122,7 @@ NodeVector split_to_QKV(const std::shared_ptr& node, // head_size = hidden_size / num_heads head_size = std::make_shared(hidden_size, num_heads_node); // split the node into 3 even parts Q, K, V with shape (batch_size, sequence_len, hidden_size) - split = ngraph::builder::opset1::split(node, 3, 2); + split = ov::op::util::split(node, 3, 2); // and reshape each part to new shape (batch_size, sequence_len, num_heads, head_size) auto new_shape = std::make_shared(NodeVector{batch_size_seq_len, num_heads_node, head_size}, 0); @@ -141,7 +141,7 @@ NodeVector split_to_QKV(const std::shared_ptr& node, // Q: (batch_size, sequence_len, qkv_hidden_sizes[0]) // K: (batch_size, sequence_len, qkv_hidden_sizes[1]) // V: (batch_size, sequence_len, qkv_hidden_sizes[2]) - split = ngraph::builder::opset1::split(node, qkv_hidden_sizes, 2); + split = ov::op::util::split(node, qkv_hidden_sizes, 2); // and reshape each part to new shape (batch_size, sequence_len, num_heads, head_size) for (size_t i = 0; i < split.size(); i++) { auto new_shape = std::make_shared( @@ -455,7 +455,7 @@ std::shared_ptr attention_softmax(const OutputVector& op_inputs, // (2, batch_size, num_heads, past_sequence_length + sequence_length, head_size) // so we need to split it into two parts, remove first dimension from each part and concatenate first part // with current K and second part with current V - const auto split = ngraph::builder::opset1::split(past, 2, 0); + const auto split = ov::op::util::split(past, 2, 0); const auto past_K = std::make_shared(split[0], zero); K = std::make_shared(NodeVector{past_K, K}, 2); const auto past_V = std::make_shared(split[1], zero); diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp index b7d3300bad5539..75c12141d0ae78 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/fusedgemm.cpp @@ -7,7 +7,6 @@ #include #include "default_opset.hpp" -#include "ngraph/builder/reshape.hpp" #include "ngraph/op/add.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/matmul.hpp" diff --git a/src/frontends/onnx/frontend/src/op/compress.cpp b/src/frontends/onnx/frontend/src/op/compress.cpp index 7960d4e16e4739..d1d31f02a192fd 100644 --- a/src/frontends/onnx/frontend/src/op/compress.cpp +++ b/src/frontends/onnx/frontend/src/op/compress.cpp @@ -7,7 +7,7 @@ #include #include "default_opset.hpp" -#include "ngraph/builder/reshape.hpp" +#include "ov_models/ov_builders/reshape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -22,7 +22,7 @@ OutputVector compress(const Node& node) { if (node.has_attribute("axis")) { axis = node.get_attribute_value("axis"); } else { - data = std::make_shared(ngraph::builder::opset1::flatten(data, static_cast(axis))); + data = std::make_shared(ov::op::util::flatten(data, static_cast(axis))); } auto axis_node = default_opset::Constant::create(element::i64, Shape{}, {axis}); auto zero_node = default_opset::Constant::create(element::i64, Shape{}, {0}); diff --git a/src/frontends/onnx/frontend/src/op/conv.cpp b/src/frontends/onnx/frontend/src/op/conv.cpp index 48b1007a4a4ad2..e5efdbabb9faea 100644 --- a/src/frontends/onnx/frontend/src/op/conv.cpp +++ b/src/frontends/onnx/frontend/src/op/conv.cpp @@ -10,10 +10,10 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/builder/reshape.hpp" #include "ngraph/op/group_conv.hpp" #include "ngraph/op/util/attr_types.hpp" #include "onnx_import/core/null_node.hpp" +#include "ov_models/ov_builders/reshape.hpp" #include "utils/conv_factory.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" diff --git a/src/frontends/onnx/frontend/src/op/conv_transpose.cpp b/src/frontends/onnx/frontend/src/op/conv_transpose.cpp index 09f59bc2394603..0fcd58d900f310 100644 --- a/src/frontends/onnx/frontend/src/op/conv_transpose.cpp +++ b/src/frontends/onnx/frontend/src/op/conv_transpose.cpp @@ -13,14 +13,13 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/builder/autobroadcast.hpp" -#include "ngraph/builder/reshape.hpp" #include "ngraph/coordinate_diff.hpp" #include "ngraph/op/util/attr_types.hpp" #include "ngraph/output_vector.hpp" #include "ngraph/partial_shape.hpp" #include "ngraph/shape.hpp" #include "ngraph/validation_util.hpp" +#include "ov_models/ov_builders/reshape.hpp" #include "utils/convpool.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/crop.cpp b/src/frontends/onnx/frontend/src/op/crop.cpp index e7aeda12f4f665..175b18bf218680 100644 --- a/src/frontends/onnx/frontend/src/op/crop.cpp +++ b/src/frontends/onnx/frontend/src/op/crop.cpp @@ -6,7 +6,6 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/builder/autobroadcast.hpp" #include "ngraph/shape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp index 0647e1f6adb0cc..84e885f35a0cad 100644 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp @@ -9,7 +9,6 @@ #include "default_opset.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/builder/make_constant.hpp" #include "ngraph/op/convert.hpp" #include "ngraph/shape.hpp" #include "ngraph/validation_util.hpp" diff --git a/src/frontends/onnx/frontend/src/op/div.hpp b/src/frontends/onnx/frontend/src/op/div.hpp index 1da6ae3bc4dc32..8d37bae67a81f9 100644 --- a/src/frontends/onnx/frontend/src/op/div.hpp +++ b/src/frontends/onnx/frontend/src/op/div.hpp @@ -10,7 +10,6 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include #include "default_opset.hpp" -#include "ngraph/builder/autobroadcast.hpp" #include "ngraph/node.hpp" #include "ngraph/shape.hpp" #include "onnx_import/core/node.hpp" diff --git a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp index e147f825ee0141..74a33816db2419 100644 --- a/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dynamic_quantize_linear.cpp @@ -9,7 +9,6 @@ #include "default_opset.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/builder/make_constant.hpp" #include "ngraph/op/convert.hpp" #include "ngraph/shape.hpp" #include "ngraph/validation_util.hpp" diff --git a/src/frontends/onnx/frontend/src/op/flatten.cpp b/src/frontends/onnx/frontend/src/op/flatten.cpp index 2a8d445be1f8cd..ebf4499a279d4a 100644 --- a/src/frontends/onnx/frontend/src/op/flatten.cpp +++ b/src/frontends/onnx/frontend/src/op/flatten.cpp @@ -7,8 +7,8 @@ #include #include "exceptions.hpp" -#include "ngraph/builder/reshape.hpp" #include "ngraph/validation_util.hpp" +#include "ov_models/ov_builders/reshape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -28,7 +28,7 @@ OutputVector flatten(const Node& node) { axis = ngraph::normalize_axis(node.get_description(), axis, data_rank_value, -data_rank_value, data_rank_value); OPENVINO_SUPPRESS_DEPRECATED_END } - return {ngraph::builder::opset1::flatten(data, static_cast(axis))}; + return {ov::op::util::flatten(data, static_cast(axis))}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/gemm.cpp b/src/frontends/onnx/frontend/src/op/gemm.cpp index c05f1963b99060..7807539f72f4cf 100644 --- a/src/frontends/onnx/frontend/src/op/gemm.cpp +++ b/src/frontends/onnx/frontend/src/op/gemm.cpp @@ -7,11 +7,11 @@ #include #include "default_opset.hpp" -#include "ngraph/builder/reshape.hpp" #include "ngraph/op/add.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/matmul.hpp" #include "ngraph/op/multiply.hpp" +#include "ov_models/ov_builders/reshape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -37,15 +37,15 @@ OutputVector gemm(const Node& node) { const bool trans_b = node.get_attribute_value("transB", 0); if (trans_a) { - input_a = ngraph::builder::opset1::transpose(input_a); + input_a = ov::op::util::transpose(input_a); } if (trans_b) { - input_b = ngraph::builder::opset1::transpose(input_b); + input_b = ov::op::util::transpose(input_b); } - input_a = ngraph::builder::opset1::flatten(input_a, 1); - input_b = ngraph::builder::opset1::flatten(input_b, 1); + input_a = ov::op::util::flatten(input_a, 1); + input_b = ov::op::util::flatten(input_b, 1); std::shared_ptr matmul_node = std::make_shared(input_a, input_b); diff --git a/src/frontends/onnx/frontend/src/op/gru.cpp b/src/frontends/onnx/frontend/src/op/gru.cpp index 62453ffe293d44..d21c32aa8c763c 100644 --- a/src/frontends/onnx/frontend/src/op/gru.cpp +++ b/src/frontends/onnx/frontend/src/op/gru.cpp @@ -8,10 +8,10 @@ #include #include "default_opset.hpp" -#include "ngraph/builder/reshape.hpp" -#include "ngraph/builder/split.hpp" #include "ngraph/shape.hpp" #include "onnx_import/core/null_node.hpp" +#include "ov_models/ov_builders/reshape.hpp" +#include "ov_models/ov_builders/split.hpp" #include "utils/recurrent.hpp" OPENVINO_SUPPRESS_DEPRECATED_START @@ -33,7 +33,7 @@ struct GRUInputMap : public recurrent::OpInputMap { auto bias = ng_inputs.at(3); // gates_count * 2 since B is: [Wb, Rb] const int split_parts = 2 * 3; - const auto split_bias = builder::opset1::split(bias, split_parts, 1); + const auto split_bias = ov::op::util::split(bias, split_parts, 1); const auto wr_z_bias = std::make_shared(split_bias.at(0), split_bias.at(3)); const auto wr_r_bias = std::make_shared(split_bias.at(1), split_bias.at(4)); // The result has shape: [num_directions, 4 * hidden_size] @@ -98,7 +98,7 @@ OutputVector gru(const Node& node) { const auto Y = gru_sequence->output(0); const auto Y_h = gru_sequence->output(1); - return {builder::opset1::reorder_axes(Y, {2, 1, 0, 3}), builder::opset1::reorder_axes(Y_h, {1, 0, 2})}; + return {ov::op::util::reorder_axes(Y, {2, 1, 0, 3}), ov::op::util::reorder_axes(Y_h, {1, 0, 2})}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/hardmax.cpp b/src/frontends/onnx/frontend/src/op/hardmax.cpp index 8200ffbc2db0c6..8079e8cf1fa659 100644 --- a/src/frontends/onnx/frontend/src/op/hardmax.cpp +++ b/src/frontends/onnx/frontend/src/op/hardmax.cpp @@ -5,10 +5,10 @@ #include "op/hardmax.hpp" #include "exceptions.hpp" -#include "ngraph/builder/reshape.hpp" #include "ngraph/op/one_hot.hpp" #include "ngraph/op/topk.hpp" #include "ngraph/validation_util.hpp" +#include "ov_models/ov_builders/reshape.hpp" #include "utils/common.hpp" #include "utils/reshape.hpp" @@ -29,7 +29,7 @@ OutputVector hardmax(const Node& node) { } // reshape to 2D - "batch size" x "input feature dimensions" (NxD) - const auto coerced_tensor = ngraph::builder::opset1::flatten(input, static_cast(axis)); + const auto coerced_tensor = ov::op::util::flatten(input, static_cast(axis)); const auto coerced_tensor_shape = std::make_shared(coerced_tensor); Output row_size = diff --git a/src/frontends/onnx/frontend/src/op/instance_norm.cpp b/src/frontends/onnx/frontend/src/op/instance_norm.cpp index e738940d659b82..a082aed15cd869 100644 --- a/src/frontends/onnx/frontend/src/op/instance_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/instance_norm.cpp @@ -10,8 +10,6 @@ #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/builder/autobroadcast.hpp" -#include "ngraph/builder/reduce_ops.hpp" #include "ngraph/op/add.hpp" #include "ngraph/op/divide.hpp" #include "ngraph/op/multiply.hpp" diff --git a/src/frontends/onnx/frontend/src/op/log_softmax.cpp b/src/frontends/onnx/frontend/src/op/log_softmax.cpp index e1f4ef9167c3e7..76d0c1c4a8e080 100644 --- a/src/frontends/onnx/frontend/src/op/log_softmax.cpp +++ b/src/frontends/onnx/frontend/src/op/log_softmax.cpp @@ -7,15 +7,15 @@ #include #include "default_opset.hpp" -#include "ngraph/builder/reshape.hpp" #include "ngraph/validation_util.hpp" +#include "ov_models/ov_builders/reshape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace { std::shared_ptr onnx_logsoftmax(const Output data, const int64_t axis) { - const auto coerced_data = ngraph::builder::opset1::flatten(data, static_cast(axis)); + const auto coerced_data = ov::op::util::flatten(data, static_cast(axis)); const auto result = std::make_shared(coerced_data, 1); const auto data_shape = std::make_shared(data); return std::make_shared(result, data_shape, false); diff --git a/src/frontends/onnx/frontend/src/op/lp_norm.cpp b/src/frontends/onnx/frontend/src/op/lp_norm.cpp index 62d86dd216989a..76b899304c5598 100644 --- a/src/frontends/onnx/frontend/src/op/lp_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/lp_norm.cpp @@ -13,9 +13,9 @@ #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/builder/norm.hpp" #include "ngraph/op/divide.hpp" #include "ngraph/validation_util.hpp" +#include "ov_models/ov_builders/norm.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -42,7 +42,7 @@ OutputVector lp_norm(const Node& node) { const auto normalize_axis_const = default_opset::Constant::create(element::i64, {}, {normalize_axis}); std::shared_ptr norm = - ngraph::builder::opset1::lp_norm(data, normalize_axis_const, static_cast(p_norm), 0.0f, true); + ov::op::util::lp_norm(data, normalize_axis_const, static_cast(p_norm), 0.0f, true); return {std::make_shared(data, norm)}; } diff --git a/src/frontends/onnx/frontend/src/op/lp_pool.cpp b/src/frontends/onnx/frontend/src/op/lp_pool.cpp index b8dbf6a974d49c..16d92ac9d01635 100644 --- a/src/frontends/onnx/frontend/src/op/lp_pool.cpp +++ b/src/frontends/onnx/frontend/src/op/lp_pool.cpp @@ -11,9 +11,9 @@ #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/builder/norm.hpp" -#include "ngraph/builder/split.hpp" #include "ngraph/util.hpp" +#include "ov_models/ov_builders/norm.hpp" +#include "ov_models/ov_builders/split.hpp" #include "utils/common.hpp" OPENVINO_SUPPRESS_DEPRECATED_START @@ -36,13 +36,13 @@ OutputVector global_lp_pool(const Node& node) { CHECK_VALID_NODE(node, p_norm >= 0, "Only positive (including zero) values are supported for 'p' attribute."); - OutputVector slices = ngraph::builder::opset1::split(data, channels_count, channel_axis); + OutputVector slices = ov::op::util::split(data, channels_count, channel_axis); for (auto& slice : slices) { // all dimensions except spatial/feature const auto reduction_axes = common::get_monotonic_range_along_node_rank(data, 2); - slice = ngraph::builder::opset1::lp_norm(slice, reduction_axes, static_cast(p_norm)); + slice = ov::op::util::lp_norm(slice, reduction_axes, static_cast(p_norm)); // output shape is all ones except N channel Shape output_shape(data_shape.rank().get_length(), 1); diff --git a/src/frontends/onnx/frontend/src/op/lstm.cpp b/src/frontends/onnx/frontend/src/op/lstm.cpp index 93bfcae63632fa..5f4cceaeba80a8 100644 --- a/src/frontends/onnx/frontend/src/op/lstm.cpp +++ b/src/frontends/onnx/frontend/src/op/lstm.cpp @@ -13,8 +13,6 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/builder/reshape.hpp" -#include "ngraph/builder/split.hpp" #include "ngraph/enum_names.hpp" #include "ngraph/log.hpp" #include "ngraph/op/add.hpp" @@ -25,6 +23,8 @@ #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" #include "onnx_import/core/null_node.hpp" +#include "ov_models/ov_builders/reshape.hpp" +#include "ov_models/ov_builders/split.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -55,7 +55,7 @@ struct LSTMNgInputMap { // Packed input sequences. // ONNX Shape: [seq_length, batch_size, input_size] // OpenVino Shape: [batch_size, seq_length, input_size] - m_input_map[LSTMInput::LSTM_INPUT_X] = builder::opset1::reorder_axes(ng_inputs.at(0), {1, 0, 2}); + m_input_map[LSTMInput::LSTM_INPUT_X] = ov::op::util::reorder_axes(ng_inputs.at(0), {1, 0, 2}); // Weight tensor for the gates. // Shape: [num_directions, 4*hidden_size, input_size] @@ -101,7 +101,7 @@ struct LSTMNgInputMap { // OpenVino Shape: [num_directions, 4*hidden_size] if (ng_inputs.size() > 3 && !ngraph::op::is_null(ng_inputs.at(3))) { auto bias = ng_inputs.at(3); - auto split_bias = builder::opset1::split(bias, 2, 1); + auto split_bias = ov::op::util::split(bias, 2, 1); m_input_map[LSTMInput::LSTM_INPUT_B] = std::make_shared(split_bias.at(0), split_bias.at(1)); m_input_map[LSTMInput::LSTM_INPUT_B] = @@ -132,7 +132,7 @@ struct LSTMNgInputMap { // ONNX Shape: [num_directions, batch_size, hidden_size] // OpenVino Shape: [batch_size, num_directions, hidden_size] if (ng_inputs.size() > 5 && !ngraph::op::is_null(ng_inputs.at(5))) { - m_input_map[LSTMInput::LSTM_INPUT_INIT_H] = builder::opset1::reorder_axes(ng_inputs.at(5), {1, 0, 2}); + m_input_map[LSTMInput::LSTM_INPUT_INIT_H] = ov::op::util::reorder_axes(ng_inputs.at(5), {1, 0, 2}); } else { auto init_h_shape = std::make_shared( OutputVector{batch_size_node, num_directions_node, hidden_size_node}, @@ -145,7 +145,7 @@ struct LSTMNgInputMap { // ONNX Shape: [num_directions, batch_size, hidden_size] // OpenVino Shape: [batch_size, num_directions, hidden_size] if (ng_inputs.size() > 6 && !ngraph::op::is_null(ng_inputs.at(6))) { - m_input_map[LSTMInput::LSTM_INPUT_INIT_C] = builder::opset1::reorder_axes(ng_inputs.at(6), {1, 0, 2}); + m_input_map[LSTMInput::LSTM_INPUT_INIT_C] = ov::op::util::reorder_axes(ng_inputs.at(6), {1, 0, 2}); } else { auto init_c_shape = std::make_shared( OutputVector{batch_size_node, num_directions_node, hidden_size_node}, @@ -258,9 +258,9 @@ OutputVector lstm(const Node& node) { const auto Y_h = lstm_sequence->output(1); const auto Y_c = lstm_sequence->output(2); - return {builder::opset1::reorder_axes(Y, {2, 1, 0, 3}), - builder::opset1::reorder_axes(Y_h, {1, 0, 2}), - builder::opset1::reorder_axes(Y_c, {1, 0, 2})}; + return {ov::op::util::reorder_axes(Y, {2, 1, 0, 3}), + ov::op::util::reorder_axes(Y_h, {1, 0, 2}), + ov::op::util::reorder_axes(Y_c, {1, 0, 2})}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/mul.hpp b/src/frontends/onnx/frontend/src/op/mul.hpp index 51b4d8cb4cbabd..f9a1d01a236472 100644 --- a/src/frontends/onnx/frontend/src/op/mul.hpp +++ b/src/frontends/onnx/frontend/src/op/mul.hpp @@ -10,7 +10,6 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include #include "default_opset.hpp" -#include "ngraph/builder/autobroadcast.hpp" #include "ngraph/node.hpp" #include "ngraph/op/broadcast.hpp" #include "ngraph/op/multiply.hpp" diff --git a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp index ea0110c78376db..9fa72a999a4a73 100644 --- a/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp +++ b/src/frontends/onnx/frontend/src/op/org.openvinotoolkit/group_norm.cpp @@ -5,8 +5,6 @@ #include "op/org.openvinotoolkit/group_norm.hpp" #include "default_opset.hpp" -#include "ngraph/builder/reduce_ops.hpp" -#include "ngraph/builder/split.hpp" #include "ngraph/node.hpp" #include "ngraph/opsets/opset5.hpp" #include "onnx_import/core/node.hpp" diff --git a/src/frontends/onnx/frontend/src/op/pad.cpp b/src/frontends/onnx/frontend/src/op/pad.cpp index 326d652aa3c16a..54d2f979ad42c0 100644 --- a/src/frontends/onnx/frontend/src/op/pad.cpp +++ b/src/frontends/onnx/frontend/src/op/pad.cpp @@ -8,7 +8,6 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/builder/split.hpp" #include "ngraph/coordinate_diff.hpp" #include "ngraph/op/constant.hpp" #include "ngraph/op/convert.hpp" @@ -16,6 +15,7 @@ #include "ngraph/shape.hpp" #include "onnx_import/core/null_node.hpp" #include "op/pad.hpp" +#include "ov_models/ov_builders/split.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" @@ -91,7 +91,7 @@ OutputVector pad(const Node& node) { padding_begin = default_opset::Constant::create(element::i64, ngraph::Shape{half_size}, padding_begin_values); padding_end = default_opset::Constant::create(element::i64, ngraph::Shape{half_size}, padding_end_values); } else { - OutputVector padding = builder::opset1::split(pads, 2, 0); + OutputVector padding = ov::op::util::split(pads, 2, 0); padding_begin = padding.at(0); padding_end = padding.at(1); diff --git a/src/frontends/onnx/frontend/src/op/quantize_linear.cpp b/src/frontends/onnx/frontend/src/op/quantize_linear.cpp index 9468a579e169f1..22d5e83217319a 100644 --- a/src/frontends/onnx/frontend/src/op/quantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/quantize_linear.cpp @@ -12,10 +12,10 @@ #include "default_opset.hpp" #include "exceptions.hpp" #include "ngraph/axis_set.hpp" -#include "ngraph/builder/reshape.hpp" #include "ngraph/shape.hpp" #include "ngraph/type/element_type.hpp" #include "ngraph/validation_util.hpp" +#include "ov_models/ov_builders/reshape.hpp" #include "utils/reshape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START @@ -196,7 +196,7 @@ OutputVector quantize_linear(Output x, Shape target_shape(x_shape.rank().get_length(), 1); target_shape[axis] = static_cast(x_shape[axis].get_length()); - y_scale = builder::opset1::reshape(y_scale, target_shape); + y_scale = ov::op::util::reshape(y_scale, target_shape); } if (y_zero_point_shape.rank().is_static() && y_zero_point_shape.rank().get_length() == 1 && @@ -211,7 +211,7 @@ OutputVector quantize_linear(Output x, Shape target_shape(x_shape.rank().get_length(), 1); target_shape[axis] = static_cast(x_shape[axis].get_length()); - y_zero_point = builder::opset1::reshape(y_zero_point, target_shape); + y_zero_point = ov::op::util::reshape(y_zero_point, target_shape); } return {detail::make_fake_quantize(y_scale, y_zero_point, x)}; diff --git a/src/frontends/onnx/frontend/src/op/range.cpp b/src/frontends/onnx/frontend/src/op/range.cpp index cd01426f89060e..14767584c1532b 100644 --- a/src/frontends/onnx/frontend/src/op/range.cpp +++ b/src/frontends/onnx/frontend/src/op/range.cpp @@ -7,6 +7,7 @@ #include #include "default_opset.hpp" +#include "exceptions.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -14,9 +15,29 @@ namespace onnx_import { namespace op { namespace set_1 { OutputVector range(const Node& node) { - const Output start{node.get_ng_inputs().at(0)}; - const Output stop{node.get_ng_inputs().at(1)}; - const Output step{node.get_ng_inputs().at(2)}; + const auto inputs = node.get_ng_inputs(); + CHECK_VALID_NODE(node, inputs.size() >= 3, "Minimum 3 inputs are required. Got: ", inputs.size()); + + Output start{inputs[0]}; + Output stop{inputs[1]}; + Output step{inputs[2]}; + + auto axes = + std::make_shared(ngraph::element::i64, ngraph::Shape{}, std::vector{0}); + + // Check if step is a tensor with a single value + if (start.get_shape().size() == 1 && start.get_shape()[0] == 1) { + start = std::make_shared(start, axes); + } + + if (stop.get_shape().size() == 1 && stop.get_shape()[0] == 1) { + stop = std::make_shared(stop, axes); + } + + if (step.get_shape().size() == 1 && step.get_shape()[0] == 1) { + step = std::make_shared(step, axes); + } + return {std::make_shared(start, stop, step, start.get_element_type())}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/reduce.cpp b/src/frontends/onnx/frontend/src/op/reduce.cpp index 9e70e09f4bdeb4..39b1d430f2c796 100644 --- a/src/frontends/onnx/frontend/src/op/reduce.cpp +++ b/src/frontends/onnx/frontend/src/op/reduce.cpp @@ -9,7 +9,6 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/builder/norm.hpp" #include "ngraph/node.hpp" #include "op/identity.hpp" #include "utils/common.hpp" diff --git a/src/frontends/onnx/frontend/src/op/rnn.cpp b/src/frontends/onnx/frontend/src/op/rnn.cpp index b1c43a45a20193..26f8349223b8f7 100644 --- a/src/frontends/onnx/frontend/src/op/rnn.cpp +++ b/src/frontends/onnx/frontend/src/op/rnn.cpp @@ -7,7 +7,7 @@ #include #include "default_opset.hpp" -#include "ngraph/builder/reshape.hpp" +#include "ov_models/ov_builders/reshape.hpp" #include "utils/recurrent.hpp" OPENVINO_SUPPRESS_DEPRECATED_START @@ -50,7 +50,7 @@ OutputVector rnn(const Node& node) { const auto Y = rnn_sequence->output(0); const auto Y_h = rnn_sequence->output(1); - return {builder::opset1::reorder_axes(Y, {2, 1, 0, 3}), builder::opset1::reorder_axes(Y_h, {1, 0, 2})}; + return {ov::op::util::reorder_axes(Y, {2, 1, 0, 3}), ov::op::util::reorder_axes(Y_h, {1, 0, 2})}; } } // namespace set_1 } // namespace op diff --git a/src/frontends/onnx/frontend/src/op/softmax.cpp b/src/frontends/onnx/frontend/src/op/softmax.cpp index df8769f7181f20..9b0289500848ba 100644 --- a/src/frontends/onnx/frontend/src/op/softmax.cpp +++ b/src/frontends/onnx/frontend/src/op/softmax.cpp @@ -7,15 +7,15 @@ #include #include "default_opset.hpp" -#include "ngraph/builder/reshape.hpp" #include "ngraph/validation_util.hpp" +#include "ov_models/ov_builders/reshape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { namespace onnx_import { namespace { std::shared_ptr onnx_softmax(const Output data, const int64_t axis) { - const auto coerced_data = ngraph::builder::opset1::flatten(data, static_cast(axis)); + const auto coerced_data = ov::op::util::flatten(data, static_cast(axis)); const auto result = std::make_shared(coerced_data, 1); const auto data_shape = std::make_shared(data); const bool special_zero = false; diff --git a/src/frontends/onnx/frontend/src/op/split.cpp b/src/frontends/onnx/frontend/src/op/split.cpp index 8da89e4f06fe42..8a90f5a318b547 100644 --- a/src/frontends/onnx/frontend/src/op/split.cpp +++ b/src/frontends/onnx/frontend/src/op/split.cpp @@ -2,12 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/builder/split.hpp" +#include "op/split.hpp" #include #include "default_opset.hpp" -#include "op/split.hpp" +#include "ov_models/ov_builders/split.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -20,10 +20,10 @@ OutputVector split(const Node& node) { if (node.has_attribute("split")) { const auto splits = node.get_attribute_value>("split"); - return ngraph::builder::opset1::split(input, splits, axis); + return ov::op::util::split(input, splits, axis); } else { const auto outputs_number = node.get_output_names().size(); - return ngraph::builder::opset1::split(input, outputs_number, axis); + return ov::op::util::split(input, outputs_number, axis); } } @@ -36,7 +36,7 @@ OutputVector split(const Node& node) { if (inputs.size() < 2) { const auto outputs_number = node.get_output_names().size(); - return ngraph::builder::opset1::split(inputs.at(0), outputs_number, axis); + return ov::op::util::split(inputs.at(0), outputs_number, axis); } else { const auto axis_node = default_opset::Constant::create(element::Type_t::i64, Shape{}, {axis}); return {std::make_shared(inputs.at(0), axis_node, inputs.at(1))->outputs()}; @@ -49,4 +49,4 @@ OutputVector split(const Node& node) { } // namespace onnx_import } // namespace ngraph -OPENVINO_SUPPRESS_DEPRECATED_END +OPENVINO_SUPPRESS_DEPRECATED_END \ No newline at end of file diff --git a/src/frontends/onnx/frontend/src/op/sub.hpp b/src/frontends/onnx/frontend/src/op/sub.hpp index 0e0ac4e2b73077..b625bdd70e4913 100644 --- a/src/frontends/onnx/frontend/src/op/sub.hpp +++ b/src/frontends/onnx/frontend/src/op/sub.hpp @@ -8,7 +8,6 @@ OPENVINO_SUPPRESS_DEPRECATED_START #include "default_opset.hpp" -#include "ngraph/builder/autobroadcast.hpp" #include "ngraph/node.hpp" #include "onnx_import/core/node.hpp" diff --git a/src/frontends/onnx/frontend/src/op/transpose.cpp b/src/frontends/onnx/frontend/src/op/transpose.cpp index 74272483f7e8d9..5c3eb212ca289d 100644 --- a/src/frontends/onnx/frontend/src/op/transpose.cpp +++ b/src/frontends/onnx/frontend/src/op/transpose.cpp @@ -7,8 +7,8 @@ #include #include -#include "ngraph/builder/reshape.hpp" #include "ngraph/node.hpp" +#include "ov_models/ov_builders/reshape.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -20,8 +20,7 @@ OutputVector transpose(const Node& node) { auto permute_axes = node.get_attribute_value>("perm", {}); - return {(permute_axes.empty()) ? ngraph::builder::opset1::transpose(data) - : ngraph::builder::opset1::reorder_axes(data, permute_axes)}; + return {(permute_axes.empty()) ? ov::op::util::transpose(data) : ov::op::util::reorder_axes(data, permute_axes)}; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/utils/conv_factory.cpp b/src/frontends/onnx/frontend/src/utils/conv_factory.cpp index 3a4214638823d6..617ae6d3eab1a9 100644 --- a/src/frontends/onnx/frontend/src/utils/conv_factory.cpp +++ b/src/frontends/onnx/frontend/src/utils/conv_factory.cpp @@ -6,10 +6,10 @@ #include "default_opset.hpp" #include "exceptions.hpp" -#include "ngraph/builder/reshape.hpp" #include "ngraph/op/group_conv.hpp" #include "ngraph/op/util/attr_types.hpp" #include "onnx_import/core/null_node.hpp" +#include "ov_models/ov_builders/reshape.hpp" #include "utils/conv_factory.hpp" #include "utils/convpool.hpp" #include "utils/reshape.hpp" diff --git a/src/frontends/onnx/frontend/src/utils/recurrent.cpp b/src/frontends/onnx/frontend/src/utils/recurrent.cpp index 14a414dfa2aee6..889d6f87b0ecbf 100644 --- a/src/frontends/onnx/frontend/src/utils/recurrent.cpp +++ b/src/frontends/onnx/frontend/src/utils/recurrent.cpp @@ -9,12 +9,11 @@ #include #include "default_opset.hpp" -#include "ngraph/builder/autobroadcast.hpp" -#include "ngraph/builder/reshape.hpp" -#include "ngraph/builder/split.hpp" #include "ngraph/check.hpp" #include "ngraph/enum_names.hpp" #include "onnx_import/core/null_node.hpp" +#include "ov_models/ov_builders/reshape.hpp" +#include "ov_models/ov_builders/split.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -23,7 +22,7 @@ namespace recurrent { OpInputMap::OpInputMap(const onnx_import::Node& node, std::size_t gates_count) { const auto& ng_inputs = node.get_ng_inputs(); - m_map[OpInput::X] = builder::opset1::reorder_axes(ng_inputs.at(0), {1, 0, 2}); + m_map[OpInput::X] = ov::op::util::reorder_axes(ng_inputs.at(0), {1, 0, 2}); m_map[OpInput::W] = ng_inputs.at(1); m_map[OpInput::R] = ng_inputs.at(2); @@ -56,7 +55,7 @@ OpInputMap::OpInputMap(const onnx_import::Node& node, std::size_t gates_count) { // ------ Optional inputs ------ if (ng_inputs.size() > 3 && !ngraph::op::is_null(ng_inputs.at(3))) { auto bias = ng_inputs.at(3); - auto split_bias = builder::opset1::split(bias, 2, 1); + auto split_bias = ov::op::util::split(bias, 2, 1); m_map[OpInput::B] = std::make_shared(split_bias.at(0), split_bias.at(1)); } else { auto b_shape = std::make_shared( @@ -76,7 +75,7 @@ OpInputMap::OpInputMap(const onnx_import::Node& node, std::size_t gates_count) { } // The initial value of the hidden. if (ng_inputs.size() > 5 && !ngraph::op::is_null(ng_inputs.at(5))) { - m_map[OpInput::INIT_H] = builder::opset1::reorder_axes(ng_inputs.at(5), {1, 0, 2}); + m_map[OpInput::INIT_H] = ov::op::util::reorder_axes(ng_inputs.at(5), {1, 0, 2}); } else { auto init_h_shape = std::make_shared( OutputVector{batch_size_node, num_directions_node, hidden_size_node}, diff --git a/src/frontends/onnx/frontend/src/utils/reshape.cpp b/src/frontends/onnx/frontend/src/utils/reshape.cpp index d4ab7a4cc7af22..d1fad12c8fade9 100644 --- a/src/frontends/onnx/frontend/src/utils/reshape.cpp +++ b/src/frontends/onnx/frontend/src/utils/reshape.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/builder/reshape.hpp" +#include "ov_models/ov_builders/reshape.hpp" #include #include @@ -10,7 +10,6 @@ #include #include "default_opset.hpp" -#include "ngraph/builder/make_constant.hpp" #include "ngraph/op/util/op_types.hpp" #include "ngraph/shape.hpp" #include "utils/reshape.hpp" @@ -79,7 +78,7 @@ Output interpret_as_scalar(const Output& node) { return std::make_shared(node.get_element_type(), ngraph::Shape{}, value); } - return builder::opset1::reshape(node, Shape{}); + return ov::op::util::reshape(node, Shape{}); } Output reshape_channel_shaped_node_to_nchw(const Output& node, diff --git a/src/frontends/onnx/tests/CMakeLists.txt b/src/frontends/onnx/tests/CMakeLists.txt index 461e4019222bc3..0a9c36c711d151 100644 --- a/src/frontends/onnx/tests/CMakeLists.txt +++ b/src/frontends/onnx/tests/CMakeLists.txt @@ -45,9 +45,8 @@ endif() # backend specific test files must meet the following requirements: # 1) The must be named .in.cpp # 2) They must be in the `frontends/tests/onnx` directory -# 3) Include "util/test_control.hpp" in your cpp file -# 4) add the line `static string s_manifest = "${MANIFEST}";` to your cpp file -# 5) Use the `NGRAPH_TEST` macro in place of `TEST`. +# 3) Include "util/test_control.hpp" and "onnx_utils.hpp" in your cpp file +# 4) add the line `static string s_manifest = onnx_backend_manifest("${MANIFEST}");` to your cpp file # All such files are configured via cmake which replaces all instances of cmake variables # such as ${BACKEND_NAME} with their values, such as CPU, GPU, or INTERPRETER. diff --git a/src/frontends/onnx/tests/__init__.py b/src/frontends/onnx/tests/__init__.py index 4bc643580a3b00..e47f08323e0a48 100644 --- a/src/frontends/onnx/tests/__init__.py +++ b/src/frontends/onnx/tests/__init__.py @@ -107,8 +107,7 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): xfail_issue_36534 = xfail_test(reason="RuntimeError: node input index is out of range") xfail_issue_36536 = xfail_test(reason="RuntimeError: can't protect") xfail_issue_36538 = xfail_test(reason="RuntimeError: Check 'PartialShape::broadcast_merge_into( pshape, " - "node->get_input_partial_shape(i), autob)' failed at " - "/openvino/ngraph/src/ngraph/op/util/elementwise_args.cpp:48:") + "node->get_input_partial_shape(i), autob)' failed at ") skip_issue_39658 = pytest.mark.skip(reason="RuntimeError: Tile operation has a form that is not supported." " z should be converted to TileIE operation.") diff --git a/src/frontends/onnx/tests/ci_utils/onnxruntime/skip_tests b/src/frontends/onnx/tests/ci_utils/onnxruntime/skip_tests index 475b79b4acac66..01c751928881c8 100644 --- a/src/frontends/onnx/tests/ci_utils/onnxruntime/skip_tests +++ b/src/frontends/onnx/tests/ci_utils/onnxruntime/skip_tests @@ -4,3 +4,19 @@ TransposeOpTest.TwoDim_int16 GatherOpTest.Gather_axis1_indices2d_int16 SoftmaxOperator.ThreeDimsAxis1 SoftmaxOperator.ThreeDimsAxis0 +UpsampleOpTest.NhwcUpsampleOpNearestTest +UpsampleOpTest.NhwcUpsampleOpNearestTest_int32 +UpsampleOpTest.NhwcUpsampleOpNearestTest_uint8 +UpsampleOpTest.NhwcUpsampleOpNearest2XTest +UpsampleOpTest.UpsampleOpNearest222XTest +UpsampleOpTest.NhwcUpsampleOpNearest222XTest +UpsampleOpTest.NhwcUpsampleOpNearest15XTest +UpsampleOpTest.NhwcUpsampleOpNearest2XTest_int32 +UpsampleOpTest.NhwcUpsampleOp4D1CBilinearTest +UpsampleOpTest.NhwcUpsampleOp4DBilinearTest +UpsampleOpTest.UpsampleOp4DBilinearTest_int32 +UpsampleOpTest.NhwcUpsampleOp4DBilinearTest_int32 +UpsampleOpTest.NhwcUpsampleOpNearest2XTest_opset9 +TensorOpTest.ReshapeWithEmptyDim +TensorOpTest.Unsqueeze_scalar +TensorOpTest.Unsqueeze_scalar_2 diff --git a/src/frontends/onnx/tests/ci_utils/onnxruntime/version b/src/frontends/onnx/tests/ci_utils/onnxruntime/version index ce4c889e94a34b..d2452e2d7b872b 100644 --- a/src/frontends/onnx/tests/ci_utils/onnxruntime/version +++ b/src/frontends/onnx/tests/ci_utils/onnxruntime/version @@ -1 +1,2 @@ -rel-1.16.0 +rel-1.16.2 + diff --git a/src/frontends/onnx/tests/model_support_tests.cpp b/src/frontends/onnx/tests/model_support_tests.cpp index e76eb8fac61220..1efdffc7e825d0 100644 --- a/src/frontends/onnx/tests/model_support_tests.cpp +++ b/src/frontends/onnx/tests/model_support_tests.cpp @@ -19,7 +19,7 @@ std::string model_path(const char* model) { } // namespace TEST(ONNXReader_ModelSupported, basic_model) { - // this model is a basic ONNX model taken from ngraph's unit test (add_abc.onnx) + // this model is a basic ONNX model taken from OpenVINO's unit test (add_abc.onnx) // it contains the minimum number of fields required to accept this file as a valid model EXPECT_NO_THROW(ov::Core{}.read_model(model_path("supported/basic.onnx"))); } diff --git a/src/frontends/onnx/tests/onnx_editor.cpp b/src/frontends/onnx/tests/onnx_editor.cpp index 1881c6d7f130c3..b3c8038165c4d8 100644 --- a/src/frontends/onnx/tests/onnx_editor.cpp +++ b/src/frontends/onnx/tests/onnx_editor.cpp @@ -8,10 +8,8 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" -#include "default_opset.hpp" #include "editor.hpp" #include "gtest/gtest.h" -#include "onnx_import/onnx.hpp" #include "onnx_test_util.hpp" #include "onnx_utils.hpp" #include "openvino/op/constant.hpp" diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index a15ebb3ba14ddc..5269cf3b0ce355 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -4365,7 +4365,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_constant_sparse_tensor_float16_3x4) { auto model = convert_model("constant_sparse_tensor_float16_3x4.onnx"); auto test_case = ov::test::TestCase(model, s_device); - test_case.add_expected_output(Shape{3, 4}, {1, 0, 0, 8, 0, 0, 0, 0, 0, 0, 3, 0}); + test_case.add_expected_output(Shape{3, 4}, {1, 0, 0, 8, 0, 0, 0, 0, 0, 0, 3, 0}); test_case.run(); } @@ -4429,7 +4429,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_constant_sparse_tensor_bfloat16_3x4) { auto model = convert_model("constant_sparse_tensor_bfloat16_3x4.onnx"); auto test_case = ov::test::TestCase(model, s_device); - test_case.add_expected_output(Shape{3, 4}, {1, 0, 0, 8, 0, 0, 0, 0, 0, 0, 3, 0}); + test_case.add_expected_output(Shape{3, 4}, {1, 0, 0, 8, 0, 0, 0, 0, 0, 0, 3, 0}); test_case.run(); } @@ -4495,7 +4495,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_float16_tensor_as_int32) { auto test_case = ov::test::TestCase(model, s_device); // clang-format off - test_case.add_input(Shape{1, 1, 4, 4}, + test_case.add_input(Shape{1, 1, 4, 4}, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, @@ -4504,7 +4504,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_float16_tensor_as_int32) { [[[[0.25, 0.5, 0.25], [0.5, 1.0, 0.5], [0.25, 0.5, 0.25]]]] */ - test_case.add_expected_output(Shape{1, 1, 2, 2}, + test_case.add_expected_output(Shape{1, 1, 2, 2}, { 20, 24, 36, 40 }); // clang-format on @@ -5651,7 +5651,7 @@ OPENVINO_TEST(${BACKEND_NAME}, castlike_float16_to_uint32) { auto test_case = ov::test::TestCase(model, s_device); - test_case.add_input(Shape{1, 1, 2, 2}, std::vector{1.5f, 2.3f, 3.f, 4.f}); + test_case.add_input(Shape{1, 1, 2, 2}, std::vector{1.5f, 2.3f, 3.f, 4.f}); test_case.add_input(Shape{4}, {1, 2, 3, 4}); test_case.add_expected_output(std::vector{1, 2, 3, 4}); @@ -5663,7 +5663,7 @@ OPENVINO_TEST(${BACKEND_NAME}, castlike_float16_to_int64) { auto test_case = ov::test::TestCase(model, s_device); - test_case.add_input(Shape{1, 1, 2, 2}, std::vector{1.5f, 2.3f, 3.f, 4.f}); + test_case.add_input(Shape{1, 1, 2, 2}, std::vector{1.5f, 2.3f, 3.f, 4.f}); test_case.add_input(Shape{4}, {1, 2, 3, 4}); test_case.add_expected_output(std::vector{1, 2, 3, 4}); @@ -5700,8 +5700,8 @@ OPENVINO_TEST(${BACKEND_NAME}, castlike_int8_to_float16) { auto test_case = ov::test::TestCase(model, s_device); test_case.add_input(Shape{1, 1, 2, 2}, std::vector{-127, -2, 3, 4}); - test_case.add_input(Shape{4}, {1, 2, 3, 4}); - test_case.add_expected_output(std::vector{-127.0, -2.0, 3.0, 4.0}); + test_case.add_input(Shape{4}, {1, 2, 3, 4}); + test_case.add_expected_output(std::vector{-127.0, -2.0, 3.0, 4.0}); test_case.run(); } diff --git a/src/frontends/onnx/tests/onnx_import_com_microsoft.in.cpp b/src/frontends/onnx/tests/onnx_import_com_microsoft.in.cpp index b45bcd342bb519..94e5ec9d596fef 100644 --- a/src/frontends/onnx/tests/onnx_import_com_microsoft.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_com_microsoft.in.cpp @@ -16,8 +16,6 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" -#include "default_opset.hpp" -#include "onnx_import/onnx.hpp" #include "onnx_utils.hpp" using namespace ov; diff --git a/src/frontends/onnx/tests/onnx_import_const_folding.in.cpp b/src/frontends/onnx/tests/onnx_import_const_folding.in.cpp index 24e5c45271e6d6..7ef1daba717877 100644 --- a/src/frontends/onnx/tests/onnx_import_const_folding.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_const_folding.in.cpp @@ -20,14 +20,14 @@ static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); namespace { template -void test_constant_folding(std::shared_ptr ng_function, +void test_constant_folding(std::shared_ptr ov_model, const std::vector expected_output, const PartialShape expected_shape = PartialShape::dynamic()) { pass::Manager pass_manager; pass_manager.register_pass(); - pass_manager.run_passes(ng_function); + pass_manager.run_passes(ov_model); - for (auto ng_node : ng_function->get_ordered_ops()) { + for (auto ng_node : ov_model->get_ordered_ops()) { if (ov::is_type(ng_node)) { const auto folded_node = ov::as_type_ptr(ng_node); const auto output_values = folded_node->cast_vector(); diff --git a/src/frontends/onnx/tests/onnx_import_controlflow.in.cpp b/src/frontends/onnx/tests/onnx_import_controlflow.in.cpp index 165cf755e85782..94d77c678619b7 100644 --- a/src/frontends/onnx/tests/onnx_import_controlflow.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_controlflow.in.cpp @@ -7,7 +7,6 @@ #include "common_test_utils/test_control.hpp" #include "common_test_utils/test_tools.hpp" #include "common_test_utils/type_prop.hpp" -#include "default_opset.hpp" #include "gtest/gtest.h" #include "onnx_utils.hpp" #include "openvino/op/loop.hpp" diff --git a/src/frontends/onnx/tests/onnx_import_convpool.in.cpp b/src/frontends/onnx/tests/onnx_import_convpool.in.cpp index 464ee806755bd5..f77dff5748968a 100644 --- a/src/frontends/onnx/tests/onnx_import_convpool.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_convpool.in.cpp @@ -20,7 +20,6 @@ #include "common_test_utils/test_control.hpp" #include "common_test_utils/test_tools.hpp" #include "gtest/gtest.h" -#include "onnx_import/onnx.hpp" #include "onnx_utils.hpp" #include "openvino/op/max_pool.hpp" diff --git a/src/frontends/onnx/tests/onnx_import_deprecated.in.cpp b/src/frontends/onnx/tests/onnx_import_deprecated.in.cpp index 79fefe8d80a612..f95925e0b442f4 100644 --- a/src/frontends/onnx/tests/onnx_import_deprecated.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_deprecated.in.cpp @@ -20,7 +20,6 @@ #include "common_test_utils/test_control.hpp" #include "common_test_utils/test_tools.hpp" #include "gtest/gtest.h" -#include "onnx_import/onnx.hpp" #include "onnx_utils.hpp" using namespace ov; diff --git a/src/frontends/onnx/tests/onnx_import_exceptions.cpp b/src/frontends/onnx/tests/onnx_import_exceptions.cpp index 3af1c53c9cc55d..09446f39738db7 100644 --- a/src/frontends/onnx/tests/onnx_import_exceptions.cpp +++ b/src/frontends/onnx/tests/onnx_import_exceptions.cpp @@ -8,7 +8,6 @@ #include "common_test_utils/type_prop.hpp" #include "exceptions.hpp" #include "gtest/gtest.h" -#include "onnx_import/onnx.hpp" #include "onnx_utils.hpp" using namespace ov; diff --git a/src/frontends/onnx/tests/onnx_import_library.cpp b/src/frontends/onnx/tests/onnx_import_library.cpp index f1523bc971e78b..7da8c44a67d244 100644 --- a/src/frontends/onnx/tests/onnx_import_library.cpp +++ b/src/frontends/onnx/tests/onnx_import_library.cpp @@ -7,12 +7,14 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_control.hpp" #include "gtest/gtest.h" -#include "ngraph/file_util.hpp" +#include "onnx_utils.hpp" -static std::string s_manifest = ngraph::file_util::path_join(ov::test::utils::getExecutableDirectory(), "${MANIFEST}"); +using namespace ov::frontend::onnx::tests; + +static std::string s_manifest = onnx_backend_manifest(MANIFEST); OPENVINO_TEST(onnx, check_ir_version_support) { - // It appears you've changed the ONNX library version used by nGraph. Please update the value + // It appears you've changed the ONNX library version used by OpenVINO. Please update the value // tested below to make sure it equals the current IR_VERSION enum value defined in ONNX headers // // You should also check the onnx_common/src/onnx_model_validator.cpp file and make sure that diff --git a/src/frontends/onnx/tests/onnx_import_org_openvino.in.cpp b/src/frontends/onnx/tests/onnx_import_org_openvino.in.cpp index 411bd4b448dbee..eb7edeccbb8198 100644 --- a/src/frontends/onnx/tests/onnx_import_org_openvino.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_org_openvino.in.cpp @@ -29,10 +29,8 @@ #include "common_test_utils/test_control.hpp" #include "common_test_utils/test_tools.hpp" #include "common_test_utils/type_prop.hpp" -#include "default_opset.hpp" #include "gtest/gtest.h" #include "onnx_import/core/null_node.hpp" -#include "onnx_import/onnx.hpp" #include "onnx_import/onnx_utils.hpp" #include "onnx_utils.hpp" diff --git a/src/frontends/onnx/tests/onnx_import_org_pytorch.in.cpp b/src/frontends/onnx/tests/onnx_import_org_pytorch.in.cpp index 0e055717a5cdeb..608fc3d050d5f2 100644 --- a/src/frontends/onnx/tests/onnx_import_org_pytorch.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_org_pytorch.in.cpp @@ -16,8 +16,6 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" -#include "default_opset.hpp" -#include "onnx_import/onnx.hpp" #include "onnx_utils.hpp" using namespace ov; diff --git a/src/frontends/onnx/tests/onnx_import_quant.in.cpp b/src/frontends/onnx/tests/onnx_import_quant.in.cpp index 643f41bafff2fa..4ff11a4cf9b780 100644 --- a/src/frontends/onnx/tests/onnx_import_quant.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_quant.in.cpp @@ -21,7 +21,6 @@ #include "common_test_utils/test_control.hpp" #include "common_test_utils/test_tools.hpp" #include "gtest/gtest.h" -#include "onnx_import/onnx.hpp" #include "onnx_utils.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/fake_quantize.hpp" diff --git a/src/frontends/onnx/tests/onnx_import_reshape.in.cpp b/src/frontends/onnx/tests/onnx_import_reshape.in.cpp index 8e9f032009c7ff..f3e8afc3aabe90 100644 --- a/src/frontends/onnx/tests/onnx_import_reshape.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_reshape.in.cpp @@ -19,7 +19,6 @@ #include "common_test_utils/test_control.hpp" #include "common_test_utils/test_tools.hpp" #include "gtest/gtest.h" -#include "onnx_import/onnx.hpp" #include "onnx_utils.hpp" using namespace ov; diff --git a/src/frontends/onnx/tests/onnx_import_rnn.in.cpp b/src/frontends/onnx/tests/onnx_import_rnn.in.cpp index f2434565cb7ae7..b6eeb60000cc3a 100644 --- a/src/frontends/onnx/tests/onnx_import_rnn.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_rnn.in.cpp @@ -21,7 +21,6 @@ #include "common_test_utils/test_control.hpp" #include "common_test_utils/test_tools.hpp" #include "gtest/gtest.h" -#include "onnx_import/onnx.hpp" #include "onnx_utils.hpp" #include "openvino/op/gru_sequence.hpp" #include "openvino/op/lstm_sequence.hpp" @@ -33,7 +32,7 @@ using namespace ov::frontend::onnx::tests; static std::string s_manifest = onnx_backend_manifest("${MANIFEST}"); static std::string s_device = backend_name_to_device("${BACKEND_NAME}"); -// ONNX LSTM tests (implemented by nGraph LSTMCell and LSTMSequence) +// ONNX LSTM tests (implemented by OpenVINO LSTMCell and LSTMSequence) OPENVINO_TEST(${BACKEND_NAME}, onnx_model_lstm_fwd_default_const) { auto model = convert_model("lstm_fwd_default_const.onnx"); diff --git a/src/frontends/onnx/tests/onnx_import_signal.in.cpp b/src/frontends/onnx/tests/onnx_import_signal.in.cpp index b4dd623db867a6..14cf03b1dfb886 100644 --- a/src/frontends/onnx/tests/onnx_import_signal.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_signal.in.cpp @@ -16,7 +16,6 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" -#include "onnx_import/onnx.hpp" #include "onnx_utils.hpp" using namespace ov; diff --git a/src/frontends/onnx/tests/onnx_tensor_names.cpp b/src/frontends/onnx/tests/onnx_tensor_names.cpp index 12a4d567d0edeb..fcfef4c386f1bf 100644 --- a/src/frontends/onnx/tests/onnx_tensor_names.cpp +++ b/src/frontends/onnx/tests/onnx_tensor_names.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/test_case.hpp" #include "common_test_utils/test_control.hpp" #include "gtest/gtest.h" -#include "onnx_import/onnx.hpp" #include "onnx_import/onnx_utils.hpp" #include "onnx_utils.hpp" #include "openvino/op/abs.hpp" diff --git a/src/frontends/onnx/tests/op_extension.cpp b/src/frontends/onnx/tests/op_extension.cpp index 12ffbfdb807746..81b6b8db6590fb 100644 --- a/src/frontends/onnx/tests/op_extension.cpp +++ b/src/frontends/onnx/tests/op_extension.cpp @@ -176,38 +176,3 @@ TEST(ONNXOpExtensionViaCommonConstructor, onnx_op_extension_via_ov_type_name_wit std::shared_ptr model; EXPECT_NO_THROW(fe->convert(input_model)); } - -OPENVINO_SUPPRESS_DEPRECATED_START -// Old API test - -namespace { -class OldApiNode : public InferenceEngine::IExtension { - void GetVersion(const InferenceEngine::Version*& versionInfo) const noexcept override { - static InferenceEngine::Version ext_desc = {{1, 0}, "1.0", "old_api_node"}; - versionInfo = &ext_desc; - } - std::map getOpSets() override { - static std::map opsets; - if (opsets.empty()) { - ngraph::OpSet opset; - opset.insert(); - opsets["util"] = opset; - } - return opsets; - } - void Unload() noexcept override{}; -}; -} // namespace - -TEST(ONNXOpExtensionViaCommonConstructor, onnx_op_extension_mixed_legacy_and_new_api) { - const auto input_model_path = ov::test::utils::getModelFromTestModelZoo( - ov::util::path_join({TEST_ONNX_MODELS_DIRNAME, "relu_custom_domain.onnx"})); - ov::Core core; - core.add_extension(std::make_shared()); - const auto new_api_ext = - std::make_shared>("CustomRelu", "my_custom_domain"); - core.add_extension(new_api_ext); - EXPECT_NO_THROW(core.read_model(input_model_path)); -} - -OPENVINO_SUPPRESS_DEPRECATED_END diff --git a/src/frontends/onnx/tests/runtime/ie/unit_test.manifest b/src/frontends/onnx/tests/runtime/ie/unit_test.manifest index dabc8d7bfe95d4..b2e87a2588ec65 100644 --- a/src/frontends/onnx/tests/runtime/ie/unit_test.manifest +++ b/src/frontends/onnx/tests/runtime/ie/unit_test.manifest @@ -1,12 +1,12 @@ #------------------------------------------------------------------------------- # -# Inference Engine all plugins excludes +# OpenVINO all plugins excludes # #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- # -# nGraph ONNX C++ Importer unit tests +# OpenVINO ONNX C++ FrontEnd unit tests # #------------------------------------------------------------------------------- @@ -25,7 +25,7 @@ onnx_model_tile onnx_model_tile_static onnx_model_logsoftmax_0D -# nGraph function's output number 0 was not found in the CNNNetwork built from it. +# Model's output number 0 was not found in the ov::Model. onnx_model_split_equal_parts_2d onnx_model_split_variable_parts_2d onnx_top_k_opset_10_const_k @@ -167,7 +167,7 @@ IE_CPU.onnx_roi_align_f32 #------------------------------------------------------------------------------- # -# Inference Engine CPU plugin excludes +# OpenVINO CPU plugin excludes # #------------------------------------------------------------------------------- @@ -175,7 +175,7 @@ IE_CPU.onnx_roi_align_f32 IE_CPU.onnx_model_reverse_sequence_1_batch_0 IE_CPU.onnx_model_reverse_sequence_0_batch_1 -# nGraph Interpolate operation with name: Y cannot be converted to Interpolate layer with name: +# OpenVINO Interpolate operation with name: Y cannot be converted to Interpolate layer with name: # Y because output with index 0 contains dynamic shapes: {?,?,?,?} IE_CPU.onnx_resize11_scales_nearest_asymmetric_floor_dynamic_sizes IE_CPU.onnx_resize11_up_sizes_cubic_half_pixel_dynamic_sizes @@ -193,7 +193,7 @@ IE_CPU.onnx_model_reduce_sum_dynamic_rank_input # Axes has zero dimension which is not allowed IE_CPU.onnx_model_reduce_sum_13_axes_as_0_dim_input -# nGraph test infrastructure doesn't support (b)float16 input data in IE_CPU-tests +# OpenVINO test infrastructure doesn't support (b)float16 input data in IE_CPU-tests IE_CPU.onnx_constant_sparse_tensor_float16_3x4 IE_CPU.onnx_float16_tensor_as_int32 @@ -210,7 +210,7 @@ onnx_model_mm_nms_rotated #------------------------------------------------------------------------------- # -# Inference Engine GPU plugin excludes +# OpenVINO GPU plugin excludes # #------------------------------------------------------------------------------- diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index da2f79d7fb6c0a..29103b45819241 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -698,13 +698,9 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None ( xfail_issue_125485, "OnnxBackendNodeModelTest.test_affine_grid_2d_align_corners_cpu", - "OnnxBackendNodeModelTest.test_affine_grid_2d_align_corners_expanded_cpu", "OnnxBackendNodeModelTest.test_affine_grid_2d_cpu", - "OnnxBackendNodeModelTest.test_affine_grid_2d_expanded_cpu", "OnnxBackendNodeModelTest.test_affine_grid_3d_align_corners_cpu", - "OnnxBackendNodeModelTest.test_affine_grid_3d_align_corners_expanded_cpu", "OnnxBackendNodeModelTest.test_affine_grid_3d_cpu", - "OnnxBackendNodeModelTest.test_affine_grid_3d_expanded_cpu", ), ( xfail_issue_125486, diff --git a/src/frontends/pytorch/README.md b/src/frontends/pytorch/README.md index b44014e7b9a1ae..54ae5e5f254e50 100644 --- a/src/frontends/pytorch/README.md +++ b/src/frontends/pytorch/README.md @@ -135,6 +135,148 @@ To test the entire suite of the PyTorch operation set support, run the following python -m pytest layer_tests/pytorch_tests ``` +### Investigation of accuracy issues + +Accuracy issues can be caused by incorrect graph returned by `torch.jit.trace` +that do not generalize to other inputs or shapes. Such issues can be solved by +using `torch.jit.script` for obtaining full graph or only script part of the +graph and trace other parts. More about how to do it is described in +[pytorch documentation](https://pytorch.org/docs/stable/jit.html#mixing-tracing-and-scripting). + +Other reasons for accuracy problems can be caused by the incorrect conversion +of specific operations. To identify such operation it is usually helpful to +obtain the original `TorchScript` graph. That can be done using tracing, +scripting or by manually creating `TorchScriptPythonDecoder`. + +```python +import torch +from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder + +model = SomeModel() +model.eval() +example = <...> # use the valid model inputs + +# get traced model graph +traced_model = torch.jit.trace(model, example) +print(traced_model.inlined_graph) + +# get scripted model graph +scripted_model = torch.jit.script(model) +print(scripted_model.inlined_graph) + +# get graph directly from TorchScriptPythonDecoder +decoder = TorchScriptPythonDecoder(model, example_input=example) +print(decoder.graph_element) +``` + +Also frequent reason for accuracy issues can be incorrect inputs provided to +model which is sensitive to input data. So it is recommended to provide real +image (audio, text) from dataset for accuracy validation. How to understand +which operation causes problems? Lets consider the following model: + +```python +import torch + +class example_model(torch.nn.Module): + def some_work(self, x): + return torch.randn_like(x) + + def forward(self, x): + y = x * x + z = self.some_work(x) + res = x + y + z + return res +``` + +It has the following inlined graph: + +``` +graph(%self : __torch__.example_model, + %x : Float(1, 3, 100, 100, strides=[30000, 10000, 100, 1], requires_grad=0, device=cpu)): + %y : Float(1, 3, 100, 100, strides=[30000, 10000, 100, 1], requires_grad=0, device=cpu) = aten::mul(%x, %x) # /home/user/example.py:9:0 + %3 : NoneType = prim::Constant() + %4 : NoneType = prim::Constant() + %5 : NoneType = prim::Constant() + %6 : bool = prim::Constant[value=0]() # /home/user/example.py:6:0 + %7 : NoneType = prim::Constant() + %z : Float(1, 3, 100, 100, strides=[30000, 10000, 100, 1], requires_grad=0, device=cpu) = aten::randn_like(%x, %3, %4, %5, %6, %7) # /home/user/example.py:6:0 + %9 : int = prim::Constant[value=1]() # /home/user/example.py:11:0 + %10 : Float(1, 3, 100, 100, strides=[30000, 10000, 100, 1], requires_grad=0, device=cpu) = aten::add(%x, %y, %9) # /home/user/example.py:11:0 + %11 : int = prim::Constant[value=1]() # /home/user/example.py:11:0 + %12 : Float(1, 3, 100, 100, strides=[30000, 10000, 100, 1], requires_grad=0, device=cpu) = aten::add(%10, %z, %11) # /home/user/example.py:11:0 + return (%12) +``` + +This model has random operation to demonstrate accuracy issues, by nature it +can't generate same results between OpenVINO and PyTorch, because random +numbers are generated differently. To compare the numbers obtained in `FP32` +inference scenario it is recommended to use `1e-4` as absolute threshold and +relative threshold. But if `FP16` is used or model is quantized the threshold +can be increased. To check our model, lets run the following code: + +``` +import openvino as ov +import numpy as np + +example = (torch.randn(1, 3, 100, 100),) +model = example_model() +ov_model = ov.convert_model(model, example_input=example) +core = ov.Core() +compiled = core.compile_model(ov_model, "CPU") + +pt_res = model(*example) +ov_res = compiled(example) +np.testing.assert_allclose(pt_res.detach().numpy(), ov_res[0], atol=1e-4, rtol=1e-4) +``` + +It produce the following output: + +``` +AssertionError: +Not equal to tolerance rtol=0.0001, atol=0.0001 + +Mismatched elements: 29996 / 30000 (100%) +Max absolute difference: 6.0375447 +Max relative difference: 16586.805 + x: array([[[[ 1.124452e+00, 6.839355e-01, -1.321532e+00, ..., + -4.090581e-01, 1.400993e+00, 2.823834e+00], + [-8.246053e-01, 2.376951e-01, 2.846813e+00, ...,... + y: array([[[[-3.556393e+00, 6.779741e-01, 6.177414e-01, ..., + -1.879819e+00, -3.007278e-01, 3.827740e+00], + [-1.255121e+00, 8.543051e-01, 3.162248e+00, ...,... +``` + +Issue in your model can be caused by random operation, but it can also be a +different issue. One possible way to find such operation in the model is to +create additional outputs from the graph. We can do it by changing `forward` +function to return `y` and `z` value. That will allow us to see that `y` is +returned with good accuracy and `z` has accuracy issues, we can see in inlined +graph that `z` is produced by line 6 of our code: + +``` + %z : Float(1, 3, 100, 100, strides=[30000, 10000, 100, 1], requires_grad=0, device=cpu) = aten::randn_like(%x, %3, %4, %5, %6, %7) # /home/user/example.py:6:0 +``` + +and we will see `torch.randn_like` function call on that line. + +#### Possible problems in existing operation translators + +Some operations can be translated incorrectly. For example PyTorch allow to +pass different data types in the operation while OpenVINO usually requires same +types for all inputs of the operation (more information about what types +OpenVINO operation can accept can be found in [documentation](https://docs.openvino.ai/2023.2/openvino_docs_operations_specifications.html)). +PyTorch has set rules for types alignment, to solve this issue PyTorch Frontend +has `align_eltwise_input_types` helper function which aligns types of two +inputs. If this function is not used when needed or if it used incorrectly that +can result in incorrectly converted operation. + +Other common problems are mutated outputs. PyTorch operations can modify input +tensors, which is not directly supported by OpenVINO, to workaround this problem +`NodeContext` has special function `mutate_input` it create new tensor with same +name as input tensor. However if `mutate_input` was omitted in translation +function, unchanged tensor will be returned after operation execution, so it is +very important to pay attention to this. + ## See Also * [OpenVINO README](../../../README.md) * [OpenVINO Core Components](../../README.md) diff --git a/src/frontends/tensorflow/src/op_table.cpp b/src/frontends/tensorflow/src/op_table.cpp index 864619b57c0c63..062d29cf8b06ac 100644 --- a/src/frontends/tensorflow/src/op_table.cpp +++ b/src/frontends/tensorflow/src/op_table.cpp @@ -157,6 +157,7 @@ const std::map get_supported_ops() { {"ClipByValue", CreatorFunction(translate_clip_by_value_op)}, {"Complex", CreatorFunction(translate_complex_op)}, {"ComplexAbs", CreatorFunction(translate_complex_abs_op)}, + {"ConjugateTranspose", CreatorFunction(translate_conj_transpose_op)}, {"Concat", CreatorFunction(translate_concat_op)}, {"ConcatV2", CreatorFunction(translate_concat_op)}, {"Const", CreatorFunction(translate_const_op)}, diff --git a/src/frontends/tensorflow_common/include/common_op_table.hpp b/src/frontends/tensorflow_common/include/common_op_table.hpp index 95e79f001ca28d..c2f756b4aecc62 100644 --- a/src/frontends/tensorflow_common/include/common_op_table.hpp +++ b/src/frontends/tensorflow_common/include/common_op_table.hpp @@ -50,6 +50,7 @@ OP_CONVERTER(translate_clip_by_value_op); OP_CONVERTER(translate_complex_op); OP_CONVERTER(translate_complex_abs_op); OP_CONVERTER(translate_concat_op); +OP_CONVERTER(translate_conj_transpose_op); OP_CONVERTER(translate_const_op); OP_CONVERTER(translate_conv_2d_op); OP_CONVERTER(translate_conv_2d_backprop_input_op); diff --git a/src/frontends/tensorflow_common/src/op/conj_transpose.cpp b/src/frontends/tensorflow_common/src/op/conj_transpose.cpp new file mode 100644 index 00000000000000..8b4a51bdcbc828 --- /dev/null +++ b/src/frontends/tensorflow_common/src/op/conj_transpose.cpp @@ -0,0 +1,65 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_op_table.hpp" +#include "helper_ops/complex_type_mark.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/negative.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/transpose.hpp" + +using namespace std; +using namespace ov::op; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { + +OutputVector translate_conj_transpose_op(const NodeContext& node) { + default_op_checks(node, 2, {"ConjugateTranspose"}, true); + + auto x = node.get_input(0); + auto perm = node.get_input(1); + + auto complex_type_mark = as_type_ptr(x.get_node_shared_ptr()); + if (complex_type_mark) { + element::Type complex_part_type = complex_type_mark->get_complex_part_type(); + auto x = complex_type_mark->input_value(0); + + auto real_index = make_shared(element::i32, Shape{1}, 0); + auto imag_index = make_shared(element::i32, Shape{1}, 1); + + auto gather_axis = make_shared(element::i32, Shape{1}, -1); + + auto real = make_shared(x, real_index, gather_axis)->output(0); + auto imag = make_shared(x, imag_index, gather_axis)->output(0); + + imag = make_shared(imag); + + auto conj_tensor = make_shared(OutputVector{real, imag}, -1)->output(0); + + OutputVector concat_inputs; + concat_inputs.push_back(perm); + concat_inputs.push_back(make_shared(perm, perm.get_element_type())); + + auto concat = make_shared(concat_inputs, 0); + auto conj_transpose = make_shared(conj_tensor, concat); + + set_node_name(node.get_name(), conj_transpose); + auto complex_transpose = make_shared(conj_transpose, complex_part_type); + return {complex_transpose->output(0)}; + } + + auto conj_transpose = make_shared(x, perm); + set_node_name(node.get_name(), conj_transpose); + return {conj_transpose}; +} + +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index 85caa7eccaf217..ba0df9d418ba84 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -108,15 +108,9 @@ bool FullyConnected::isSupportedOperation(const std::shared_ptr& errorMessage = "Only Constant operation on 'bias' input is supported"; return false; } - const auto inRank = fc->get_input_partial_shape(DATA_ID).size(); const auto weightRank = fc->get_input_partial_shape(WEIGHTS_ID).size(); - if (!one_of(inRank, 2u, 3u, 4u)) { - errorMessage = "Doesn't support 'data' input with rank: " + std::to_string(inRank); - return false; - } - if ((one_of(inRank, 2u, 3u) && weightRank != 2) || (inRank == 4 && weightRank != 4)) { - errorMessage = "Doesn't support 'data' input with rank: " + std::to_string(inRank) + - " and 'weight' input with rank: " + std::to_string(weightRank); + if (weightRank != 2) { + errorMessage = "Doesn't support 'weight' input with rank: " + std::to_string(weightRank); return false; } } catch (...) { @@ -160,16 +154,9 @@ VectorDims FullyConnected::makeDummyInputDims() const { auto inMinDims = inShape.getMinDims(); auto inMaxDims = inShape.getMaxDims(); + inMinDims.back() = weightDims.back(); + inMaxDims.back() = weightDims.back(); - if (inMinDims.size() == 3) { - inMinDims.back() = weightDims.back(); - inMaxDims.back() = weightDims.back(); - } else { - for (size_t i = 1; i < inMinDims.size(); i++) { - inMinDims[i] = weightDims[i]; - inMaxDims[i] = weightDims[i]; - } - } return MemoryDescUtils::makeDummyShape(Shape(inMinDims, inMaxDims)).getStaticDims(); } @@ -394,6 +381,11 @@ createDescriptorInternalForConv(DnnlMemoryDescCPtr inputDescPtr, } } +template +static std::vector normalizeDims(const std::vector& dims) { + return {std::accumulate(dims.begin(), dims.end() - 1, (T)1, std::multiplies()), dims[dims.size() - 1]}; +} + static dnnl::primitive_desc createPrimitiveDesc(const FCKey& key, const dnnl::engine& engine) { // use conv1x1 primitive for computation if (key.useConv1x1) { @@ -407,17 +399,18 @@ static dnnl::primitive_desc createPrimitiveDesc(const FCKey& key, const dnnl::en // fallback to normal inner product primitive auto inDesc = key.inp0->getDnnlDesc(); const auto& inDims = inDesc.get_dims(); // @TODO query + copy might be slow - if (inDims.size() == 3) { - auto normalizedInDims = {inDims[0] * inDims[1], inDims[2]}; + if (inDims.size() > 2) { + dnnl::memory::dims normalizedInDims = normalizeDims(inDims); inDesc = inDesc.reshape(normalizedInDims); } + auto outDesc = key.out->getDnnlDesc(); const auto& outDims = outDesc.get_dims(); // @TODO query + copy might be slow - - if (outDims.size() == 3) { - auto normalizedOutDims = { outDims[0] * outDims[1], outDims[2] }; + if (outDims.size() > 2) { + dnnl::memory::dims normalizedOutDims = normalizeDims(outDims); outDesc = outDesc.reshape(normalizedOutDims); } + dnnl::memory::desc weiDesc; if (key.useSparseWeights) { weiDesc = key.inp1->getDnnlDesc(); @@ -673,10 +666,10 @@ void FullyConnected::execute(dnnl::stream strm) { auto updateMemoryPtr = [this](int argType) { auto param = primArgs.find(argType); if (param != primArgs.end()) { - if (argType == DNNL_ARG_SRC && (getInputShapeAtPort(DATA_ID).getRank() == 3 || useConv1x1)) { + if (argType == DNNL_ARG_SRC && (getInputShapeAtPort(DATA_ID).getRank() > 2 || useConv1x1)) { primArgs.at(argType).set_data_handle(getParentEdgesAtPort(0)[0]->getMemoryPtr()->getData()); } - if (argType == DNNL_ARG_DST && (getOutputShapeAtPort(0).getRank() == 3 || useConv1x1)) { + if (argType == DNNL_ARG_DST && (getOutputShapeAtPort(0).getRank() > 2 || useConv1x1)) { primArgs.at(argType).set_data_handle(getChildEdgesAtPort(0)[0]->getMemoryPtr()->getData()); } } @@ -708,17 +701,7 @@ void FullyConnected::setPostOps(dnnl::primitive_attr& attr, const VectorDims& di // 2D: [X,Y] [Y,Z] => [X,Z] with N=X,IC=Y,OC=Z // 3D: [B,X,Y] [Y,Z] => [B,X,Z] with N=B*X,IC=Y,OC=Z - VectorDims dims; - if (dims_ext.size() == 2) { - // 2D - dims = dims_ext; - } else if (dims_ext.size() == 3) { - // 3D - dims.push_back(dims_ext[0] * dims_ext[1]); - dims.push_back(dims_ext[2]); - } else { - OPENVINO_THROW("Unexpected rank(", dims_ext.size(), ") for output tensor of node: ", getName()); - } + VectorDims dims = normalizeDims(dims_ext); DnnlPostOpsComposer dnnlpoc(getEngine(), attr, ops, postOpsArgs, dims, dims.size() - 1, canBeExecutedInInt8(), 1 << 0, getDQScales(), withBiases); @@ -802,11 +785,11 @@ const std::vector& FullyConnected::getDefaultImplPriority() { void FullyConnected::createDescriptorInternal(const dnnl::memory::desc &inputDesc, const dnnl::memory::desc &outputDesc) { auto create2Dcandidate = [](const dnnl::memory::desc &desc) { - if (desc.get_dims().size() != 3) // already 2D + if (desc.get_dims().size() == 2) // already 2D return desc; auto inDims = desc.get_dims(); - auto normalizedInDims = {inDims[0] * inDims[1], inDims[2]}; + dnnl::memory::dims normalizedInDims = normalizeDims(inDims); return dnnl::memory::desc(normalizedInDims, desc.get_data_type(), DnnlExtensionUtils::GetPlainFormatByRank(normalizedInDims.size())); @@ -967,7 +950,7 @@ void FullyConnected::initSupportedPrimitiveDescriptors() { std::shared_ptr FullyConnected::getSrcMemDesc(const dnnl::primitive_desc &prim_desc, size_t idx) const { auto desc = idx > 0 ? prim_desc.weights_desc(idx - 1) : prim_desc.src_desc(idx); - if (getInputShapeAtPort(idx).getRank() == 3 + if (getInputShapeAtPort(idx).getRank() != 2 // report original plain layout for weight since it needs to be reordered dynamically at runtime || (idx == 1 && !useSparseWeights)) { return std::make_shared( @@ -984,7 +967,7 @@ std::shared_ptr FullyConnected::getSrcMemDesc(const dnnl::primitive_ std::shared_ptr FullyConnected::getDstMemDesc(const dnnl::primitive_desc &prim_desc, size_t idx) const { auto desc = prim_desc.dst_desc(idx); - if (getOutputShapeAtPort(idx).getRank() == 3) { + if (getOutputShapeAtPort(idx).getRank() != 2) { return std::make_shared( DnnlExtensionUtils::DataTypeToElementType(desc.get_data_type()), getOutputShapeAtPort(idx)); } diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp index 8079461b73a803..b4fdda2c4cde40 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp @@ -53,8 +53,7 @@ ov::intel_cpu::ConvertMatMulToFC::ConvertMatMulToFC() { auto rank_b = shape_b.rank().get_length(); // Transformation to FC is not supported for 1D inputs - if (rank_a == 1 || rank_b == 1 || - rank_a > 3 || rank_b > 3) { + if (rank_a == 1 || rank_b == 1) { return false; } diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/reshape_fc_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/reshape_fc_fusion.cpp deleted file mode 100644 index 727764a7fb4ce0..00000000000000 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/reshape_fc_fusion.cpp +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "reshape_fc_fusion.hpp" -#include "transformations/cpu_opset/common/op/fully_connected.hpp" -#include -#include "openvino/opsets/opset1.hpp" -#include "openvino/core/rt_info.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" -#include "openvino/pass/pattern/op/or.hpp" - -#include "itt.hpp" - -ov::intel_cpu::ReshapeFullyConnectedFusion::ReshapeFullyConnectedFusion() { - MATCHER_SCOPE(ReshapeFullyConnectedFusion); - auto m_reshape = ov::pass::pattern::wrap_type({ov::pass::pattern::any_input(ov::pass::pattern::has_static_shape()), - ov::pass::pattern::any_input()}, - ov::pass::pattern::has_static_shape()); - ov::OutputVector fcInputs = {m_reshape, ov::pass::pattern::any_input()}; - auto fc = ov::pass::pattern::wrap_type(fcInputs, ov::pass::pattern::has_static_shape()); - - ov::matcher_pass_callback callback = [](ov::pass::pattern::Matcher &m) { - auto fc = std::dynamic_pointer_cast(m.get_match_root()); - if (!fc) - return false; - auto reshape = std::dynamic_pointer_cast(fc->get_input_node_shared_ptr(0)); - if (!reshape) - return false; - - // Check that Reshape reshapes 4D tensor to 2D or input shape = output shape - auto shape_in = reshape->input_value(0).get_shape(); - auto shape_out = reshape->get_shape(); - if (!((shape_in.size() == 4 && reshape->get_shape().size() == 2) || (shape_in == shape_out && !shape_in.empty()))) { - return false; - } - - // Check that Weights[O, C*H*W] consistent with Input[N, C, H, W] - auto shape_w = fc->input_value(1).get_shape(); - if (shape_in[0] != shape_out[0] || std::accumulate(shape_in.begin() + 1, shape_in.end(), size_t{1}, std::multiplies()) != shape_w[1]) { - return false; - } - - ov::NodeVector new_ops; - auto weightInput = fc->input(1).get_source_output(); - ov::Shape newWeightsShape; - const auto outShape = fc->get_shape(); - if (shape_in.size() == 3) { - newWeightsShape = ov::Shape({outShape[2], shape_in[2]}); - } else { - newWeightsShape.push_back(outShape[1]); - for (size_t i = 1; i < shape_in.size(); i++) - newWeightsShape.push_back(shape_in[i]); - } - - if (newWeightsShape != weightInput.get_shape()) { - auto newShape = std::make_shared(ov::element::i64, ov::Shape{newWeightsShape.size()}, newWeightsShape); - weightInput = std::make_shared(weightInput, newShape, true); - new_ops.push_back(weightInput.get_node_shared_ptr()); - } - - std::shared_ptr new_fc = std::make_shared( - reshape->input_value(0), - weightInput, - ov::Rank(outShape.size()), - fc->output(0).get_element_type()); - new_ops.push_back(new_fc); - new_fc->set_friendly_name(fc->get_friendly_name()); - ov::copy_runtime_info({reshape, fc}, new_ops); - ov::replace_node(fc, new_fc); - return true; - }; - - auto m = std::make_shared(fc, matcher_name); - register_matcher(m, callback); -} diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/reshape_fc_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/reshape_fc_fusion.hpp deleted file mode 100644 index 8bf7026ab19816..00000000000000 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/reshape_fc_fusion.hpp +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/pass/graph_rewrite.hpp" - -namespace ov { -namespace intel_cpu { - -class ReshapeFullyConnectedFusion : public ov::pass::MatcherPass { -public: - OPENVINO_RTTI("ReshapeFullyConnectedFusion", "0"); - ReshapeFullyConnectedFusion(); -}; - -} // namespace intel_cpu -} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp index f0f5c3e44d0dd0..e334d11babe0ee 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp @@ -5,7 +5,6 @@ #include "openvino/pass/constant_folding.hpp" #include "openvino/op/fake_quantize.hpp" #include "openvino/pass/manager.hpp" -#include "common/pass/reshape_fc_fusion.hpp" #include "common/pass/align_matmul_input_ranks.hpp" #include "transformations/common_optimizations/reshape_prelu.hpp" #include "common/pass/convert_broadcast_to_tiles.hpp" @@ -42,9 +41,6 @@ inline void ConvertToCPUSpecificOpset(std::shared_ptr &nGraphFunc) { CPU_REGISTER_PASS_COMMON(manager, ConvertToLeakyRelu); CPU_REGISTER_PASS_COMMON(manager, ConvertToSwishCPU); CPU_REGISTER_PASS_COMMON(manager, OptimizeSequenceTransposes); - if (!ov::op::util::has_op_with_type(nGraphFunc)) { - CPU_REGISTER_PASS_COMMON(manager, ReshapeFullyConnectedFusion); - } // after transformation "MoveEltwiseUpThroughDataMov" there can be reshaped sequences that should be eliminated or fused CPU_REGISTER_PASS_COMMON(manager, ov::pass::ReshapeSequenceFusion); CPU_REGISTER_PASS_COMMON(manager, ov::pass::ConstantFolding); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp index 552631fc33242e..2aa4269a850d08 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/mat_mul_with_constant_transformation.cpp @@ -51,7 +51,7 @@ std::vector testValues = { { std::vector(4 * 2, 2.f), ngraph::element::f32, ngraph::Shape{ 2, 4 } }, { 256ul, {{1}, {1}, {2, 1}, {2, 1}}, {-128.f}, {127.f}, {-128.f, -12.8f}, {127.f, 12.7f} }, { {}, {}, {} }, - "MatMul", + "FullyConnected", "u8" }, // 4D with Dq on weights @@ -61,7 +61,7 @@ std::vector testValues = { { std::vector(4 * 2, 2.f), ngraph::element::i8, ngraph::Shape{ 2, 4 } }, {}, { ngraph::element::f32, {}, {{0.1f, 0.01f}, ngraph::element::f32, ngraph::Shape{ 2, 1 }} }, - "MatMul", + "FullyConnected", "u8" }, // 3D with the same values diff --git a/src/plugins/intel_cpu/tests/unit/transformations/convert_matmul_test.cpp b/src/plugins/intel_cpu/tests/unit/transformations/convert_matmul_test.cpp index b1edcce5ae4fd7..cace5696980a1b 100644 --- a/src/plugins/intel_cpu/tests/unit/transformations/convert_matmul_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/transformations/convert_matmul_test.cpp @@ -249,6 +249,96 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest14) { } } +TEST_F(TransformationTestsF, ConvertMatMulToFCTest_4d_1) { + { + auto input1 = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto input2 = ov::opset1::Constant::create(ov::element::f32, ov::Shape{ 6, 5 }, { 1 }); + auto matmul = std::make_shared(input1, input2, false, true); + + model = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); + manager.register_pass(); + } + { + auto input1 = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto input2 = ov::opset1::Constant::create(ov::element::f32, ov::Shape{ 6, 5 }, { 1 }); + auto fc = std::make_shared(input1, input2, ov::Rank(4), ov::element::f32); + + model_ref = std::make_shared(ov::NodeVector{fc}, ov::ParameterVector{input1}); + } +} + +TEST_F(TransformationTestsF, ConvertMatMulToFCTest_4d_2) { + { + auto input1 = std::make_shared(ov::element::f32, ov::PartialShape{-1, -1, 1, 5}); + auto input2 = ov::opset1::Constant::create(ov::element::f32, ov::Shape{1, 10, 5}, {1}); + auto fc = std::make_shared(input1, input2, false, true); + + model = std::make_shared(ov::NodeVector{fc}, ov::ParameterVector{input1}); + manager.register_pass(); + } + { + auto input1 = std::make_shared(ov::element::f32, ov::PartialShape{-1, -1, 1, 5}); + auto input2 = ov::opset1::Constant::create(ov::element::f32, ov::Shape{10, 5}, {1}); + auto fc = std::make_shared(input1, input2, ov::Rank(4)); + + model_ref = std::make_shared(ov::NodeVector{fc}, ov::ParameterVector{input1}); + } +} + +TEST_F(TransformationTestsF, ConvertMatMulToFCTest_4d_3) { + { + auto input1 = std::make_shared(ov::element::f32, ov::Shape{2, 4}); + auto input2 = ov::opset1::Constant::create(ov::element::f32, ov::Shape{1, 1, 5, 4}, { 1 }); + auto matmul = std::make_shared(input1, input2, false, true); + + model = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); + manager.register_pass(); + } + { + auto input1 = std::make_shared(ov::element::f32, ov::Shape{2, 4}); + auto input2 = ov::opset1::Constant::create(ov::element::f32, ov::Shape{5, 4}, { 1 }); + auto fc = std::make_shared(input1, input2, ov::Rank(4), ov::element::f32); + + model_ref = std::make_shared(ov::NodeVector{fc}, ov::ParameterVector{input1}); + } +} + +TEST_F(TransformationTestsF, ConvertMatMulToFCTest_4d_4) { + { + auto input1 = std::make_shared(ov::element::f32, ov::Shape{3, 2, 4}); + auto input2 = ov::opset1::Constant::create(ov::element::f32, ov::Shape{1, 1, 5, 4}, { 1 }); + auto matmul = std::make_shared(input1, input2, false, true); + + model = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); + manager.register_pass(); + } + { + auto input1 = std::make_shared(ov::element::f32, ov::Shape{3, 2, 4}); + auto input2 = ov::opset1::Constant::create(ov::element::f32, ov::Shape{5, 4}, { 1 }); + auto fc = std::make_shared(input1, input2, ov::Rank(4), ov::element::f32); + + model_ref = std::make_shared(ov::NodeVector{fc}, ov::ParameterVector{input1}); + } +} + +TEST_F(TransformationTestsF, ConvertMatMulToFCTest_4d_5) { + { + auto input1 = std::make_shared(ov::element::f32, ov::Shape{2, 3, 2, 4}); + auto input2 = ov::opset1::Constant::create(ov::element::f32, ov::Shape{1, 1, 5, 4}, { 1 }); + auto matmul = std::make_shared(input1, input2, false, true); + + model = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); + manager.register_pass(); + } + { + auto input1 = std::make_shared(ov::element::f32, ov::Shape{2, 3, 2, 4}); + auto input2 = ov::opset1::Constant::create(ov::element::f32, ov::Shape{5, 4}, { 1 }); + auto fc = std::make_shared(input1, input2, ov::Rank(4), ov::element::f32); + + model_ref = std::make_shared(ov::NodeVector{fc}, ov::ParameterVector{input1}); + } +} + TEST_F(TransformationTestsF, ConvertMatMulToFCTest_second_input_rank_adj_1) { { auto input1 = std::make_shared(ov::element::f32, ov::Shape{5, 2, 3}); @@ -385,4 +475,4 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest_compressed_u8_weights) { model_ref = std::make_shared(ov::NodeVector{ matmul }, ov::ParameterVector{ data }); } -} \ No newline at end of file +} diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp index 0e8b92e5d63d85..94f56e8b926d39 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp @@ -103,6 +103,7 @@ inline void ForceExit() { std::_Exit(-1); } +void convert_and_copy(const ov::ITensor* src, cldnn::memory::ptr dst, cldnn::stream& stream); void convert_and_copy(const cldnn::memory::ptr src, ov::ITensor const* dst, const cldnn::stream& stream); void convert_and_copy(const ov::ITensor* src, ov::ITensor const* dst, const cldnn::stream& stream); diff --git a/src/plugins/intel_gpu/src/plugin/common_utils.cpp b/src/plugins/intel_gpu/src/plugin/common_utils.cpp index 0375aa495f2de8..bf6bb5d79cd01a 100644 --- a/src/plugins/intel_gpu/src/plugin/common_utils.cpp +++ b/src/plugins/intel_gpu/src/plugin/common_utils.cpp @@ -94,6 +94,27 @@ void convert_and_copy(const void* src_ptr, ov::element::Type src_et, void* dst_p namespace ov { namespace intel_gpu { +void convert_and_copy(const ov::ITensor* src, cldnn::memory::ptr dst, cldnn::stream& stream) { + const bool blocking = true; + auto src_et = src->get_element_type(); + auto dst_et = dst->get_layout().data_type; + + if (dst_et == src_et) { + if (auto remote = dynamic_cast(src)) { + auto mem = remote->get_original_memory(); + dst->copy_from(stream, *mem, blocking); + } else { + dst->copy_from(stream, src->data(), blocking); + return; + } + } + + size_t size = ov::shape_size(src->get_shape()); + ov::Tensor tmp_tensor(dst_et, src->get_shape()); + ::convert_and_copy(src->data(), src_et, tmp_tensor.data(), dst_et, size, cldnn::layout({}, ov::element::undefined, cldnn::format::bfyx, cldnn::padding())); + dst->copy_from(stream, tmp_tensor.data(), blocking); +} + void convert_and_copy(const cldnn::memory::ptr src, ov::ITensor const* dst, const cldnn::stream& stream) { auto src_et = src->get_layout().data_type; auto dst_et = dst->get_element_type(); diff --git a/src/plugins/intel_gpu/src/plugin/variable_state.cpp b/src/plugins/intel_gpu/src/plugin/variable_state.cpp index c5fc0df49872cd..1d89991826b94a 100644 --- a/src/plugins/intel_gpu/src/plugin/variable_state.cpp +++ b/src/plugins/intel_gpu/src/plugin/variable_state.cpp @@ -54,17 +54,9 @@ void VariableState::set_layout(const cldnn::layout& new_layout) { } void VariableState::set_state(const ov::SoPtr& state) { - const bool blocking = true; - auto remote_ptr = std::dynamic_pointer_cast(state._ptr); m_layout.set_partial_shape(state->get_shape()); update_device_buffer(); - if (remote_ptr != nullptr) { - auto user_memory = remote_ptr->get_memory(); - m_memory->copy_from(m_context->get_engine().get_service_stream(), *user_memory, blocking); - } else { - auto data = state->data(); - m_memory->copy_from(m_context->get_engine().get_service_stream(), data, blocking); - } + convert_and_copy(state._ptr.get(), m_memory, m_context->get_engine().get_service_stream()); set(); } diff --git a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp index 07be7cc8c6ee4c..72c54e42b2d250 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/ov_tensor_utils.hpp" #include "common_test_utils/test_common.hpp" #include "common_test_utils/common_utils.hpp" #include "common_test_utils/node_builders/activation.hpp" @@ -238,4 +239,39 @@ TEST(VariablesTest, smoke_canSetStateTensor) { ASSERT_NO_THROW(request.infer()); } -} // namespace \ No newline at end of file + +TEST(VariablesTest, smoke_set_get_state_with_convert) { + auto build_model = [](ov::element::Type type, const ov::PartialShape& shape) { + auto param = std::make_shared(type, shape); + const ov::op::util::VariableInfo variable_info { shape, type, "v0" }; + auto variable = std::make_shared(variable_info); + auto read_value = std::make_shared(param, variable); + auto add = std::make_shared(read_value, param); + auto assign = std::make_shared(add, variable); + auto res = std::make_shared(add); + return std::make_shared(ov::ResultVector { res }, ov::SinkVector { assign }, ov::ParameterVector{param}, "StateTestModel"); + }; + + auto ov = ov::Core(); + const ov::Shape virable_shape = {1, 3, 2, 4}; + const ov::Shape input_shape = {1, 3, 2, 4}; + const ov::element::Type et = ov::element::f32; + auto model = build_model(et, input_shape); + auto compiled_model = ov.compile_model(model, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f16)); + auto request = compiled_model.create_infer_request(); + + auto variables = request.query_state(); + ASSERT_EQ(variables.size(), 1); + auto variable = variables.front(); + ASSERT_EQ(variable.get_name(), "v0"); + auto state_tensor = variable.get_state(); + ASSERT_EQ(state_tensor.get_shape(), virable_shape); + ASSERT_EQ(state_tensor.get_element_type(), et); + + auto tensor_to_set = ov::test::utils::create_and_fill_tensor(et, state_tensor.get_shape()); + variable.set_state(tensor_to_set); + state_tensor = variable.get_state(); + + ov::test::utils::compare(tensor_to_set, state_tensor, 1e-5f, 1e-5f); +} +} // namespace diff --git a/src/plugins/template/backend/CMakeLists.txt b/src/plugins/template/backend/CMakeLists.txt index 0dc03242b554af..4af6c8664fcb2b 100644 --- a/src/plugins/template/backend/CMakeLists.txt +++ b/src/plugins/template/backend/CMakeLists.txt @@ -13,8 +13,8 @@ set (SRC backend.hpp executable.cpp executable.hpp - int_backend.cpp - int_executable.cpp + int_backend.cpp + int_executable.cpp evaluates_map.cpp ) @@ -39,7 +39,7 @@ target_compile_definitions(${TARGET_NAME} SHARED_LIB_PREFIX="${CMAKE_SHARED_LIBRARY_PREFIX}" SHARED_LIB_SUFFIX="${OV_BUILD_POSTFIX}${CMAKE_SHARED_LIBRARY_SUFFIX}" ) -target_link_libraries(${TARGET_NAME} PRIVATE openvino::builders openvino::reference openvino::util openvino::runtime::dev openvino::shape_inference) +target_link_libraries(${TARGET_NAME} PRIVATE openvino::reference openvino::util openvino::runtime::dev openvino::shape_inference) target_include_directories(${TARGET_NAME} PUBLIC $ $ diff --git a/src/tests/ov_helpers/ov_models/CMakeLists.txt b/src/tests/ov_helpers/ov_models/CMakeLists.txt index 69631bd82ba2a0..8203855b2bde43 100644 --- a/src/tests/ov_helpers/ov_models/CMakeLists.txt +++ b/src/tests/ov_helpers/ov_models/CMakeLists.txt @@ -23,6 +23,8 @@ ov_add_target( openvino::runtime::dev common_test_utils ADD_CLANG_FORMAT + EXCLUDED_SOURCE_PATHS + "${CMAKE_CURRENT_SOURCE_DIR}/ov_builders" ) ov_build_target_faster(${TARGET_NAME} @@ -34,3 +36,5 @@ ov_build_target_faster(${TARGET_NAME} ov_developer_package_export_targets(TARGET ${TARGET_NAME} INSTALL_INCLUDE_DIRECTORIES "${PUBLIC_HEADERS_DIR}/") + +add_subdirectory(ov_builders) \ No newline at end of file diff --git a/src/core/builder/CMakeLists.txt b/src/tests/ov_helpers/ov_models/ov_builders/CMakeLists.txt similarity index 76% rename from src/core/builder/CMakeLists.txt rename to src/tests/ov_helpers/ov_models/ov_builders/CMakeLists.txt index ee87ece0365d60..fa65f42d554a15 100644 --- a/src/core/builder/CMakeLists.txt +++ b/src/tests/ov_helpers/ov_models/ov_builders/CMakeLists.txt @@ -1,8 +1,8 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -set(TARGET_NAME "openvino_builders") +set(TARGET_NAME ov_builders) set(BUILDER_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include/) @@ -21,9 +21,7 @@ add_library(${TARGET_NAME} STATIC ${LIBRARY_SRC} ${PUBLIC_HEADERS}) add_library(openvino::builders ALIAS ${TARGET_NAME}) set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME builders) -ov_build_target_faster(${TARGET_NAME} - UNITY - PCH PRIVATE "src/precomp.hpp") +ov_build_target_faster(${TARGET_NAME} UNITY) target_include_directories(${TARGET_NAME} PUBLIC $ @@ -33,11 +31,13 @@ if(NOT BUILD_SHARED_LIBS) target_compile_definitions(${TARGET_NAME} PUBLIC OPENVINO_STATIC_LIBRARY) endif() +target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime) + ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}) # install & export -ov_install_static_lib(openvino_builders ${OV_CPACK_COMP_CORE}) +ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) -ov_developer_package_export_targets(TARGET openvino::builders - INSTALL_INCLUDE_DIRECTORIES "${BUILDER_INCLUDE_DIR}/") +ov_developer_package_export_targets(TARGET ${TARGET_NAME} + INSTALL_INCLUDE_DIRECTORIES "${BUILDER_INCLUDE_DIR}/") \ No newline at end of file diff --git a/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/broadcast.hpp b/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/broadcast.hpp new file mode 100644 index 00000000000000..364198387fc982 --- /dev/null +++ b/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/broadcast.hpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/node.hpp" + +namespace ov { +namespace op { +namespace util { +Output make_broadcast(const Output& node, const Shape& target_shape, const AxisSet& broadcast_axes); + +Output make_broadcast(const Output& node, const Shape& target_shape, std::size_t start_match_axis); +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/norm.hpp b/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/norm.hpp new file mode 100644 index 00000000000000..964becc2f0db04 --- /dev/null +++ b/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/norm.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include + +#include "openvino/core/node.hpp" + +namespace ov { +namespace op { +namespace util { +/// \brief Creates node which calculates L-p norm on input tensor. +/// +/// \param[in] value The input tensor. +/// \param[in] reduction_axes The axes along which we calculate norm. +/// \param[in] p_norm The p norm to calculate. +/// \param[in] bias The bias added to the calculated sum. +/// \param[in] keep_dims The flag indicates if axes will be removed or kept. +/// +/// \return L-p norm of value. The output sub-graph is composed of v1 ops. +/// +std::shared_ptr lp_norm(const Output& value, + const Output& reduction_axes, + std::size_t p_norm = 2, + float bias = 0.f, + bool keep_dims = false); +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/reshape.hpp b/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/reshape.hpp new file mode 100644 index 00000000000000..0e1be4a7c761df --- /dev/null +++ b/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/reshape.hpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/core/node.hpp" + +namespace ov { +namespace op { +namespace util { +/// \brief Change shape of a value +/// +/// \param[in] value The value to be reshaped. +/// \param[in] shape The new shape. +/// +/// \return Reshape:v1 op. +std::shared_ptr reshape(const Output& value, const Shape& shape); + +/// \brief Permute axes according to specified axes_order parameter. +/// +/// \param The vlaue whose axes we want to permute. +/// \param axes_order The permutation of axes. +/// +/// \return Transpose:v1 op. +std::shared_ptr reorder_axes(const Output& value, std::vector axes_order = {}); + +/// \brief Return transposed value (with axes in reversed order). +/// +/// \param Value to transpose. +/// +/// \return Transpose:v1 op. +std::shared_ptr transpose(const Output& value); + +/// \brief Flatten a value into a 2D matrix, with a static dividing axis. +/// +/// \param The tensor to be flattened. +/// \param The axis dividing shape. +/// +/// \return The new value will be a 2D matrix representing the flattened input +/// node. +std::shared_ptr flatten(const Output& value, int axis); +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/core/builder/include/ngraph/builder/split.hpp b/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/split.hpp similarity index 53% rename from src/core/builder/include/ngraph/builder/split.hpp rename to src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/split.hpp index 7bf1ebd18b1fb3..086cf635d78f2f 100644 --- a/src/core/builder/include/ngraph/builder/split.hpp +++ b/src/tests/ov_helpers/ov_models/ov_builders/include/ov_models/ov_builders/split.hpp @@ -1,43 +1,13 @@ // Copyright (C) 2018-2022 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // - #pragma once -#include - -#include "ngraph/node.hpp" - -namespace ngraph { -namespace builder { -/// \brief Split value on specified axis into multiple parts. -/// -/// \param value The value to be split. -/// \param length_parts The vector defining the lengths of each split part. -/// \param axis The axis we split input node on. Default value is zero axis. -/// -/// \return The vector containing multiple nodes we split input node into. -/// -NGRAPH_DEPRECATED("This builder was deprecated.") -OutputVector split(const Output& value, const std::vector& length_parts, int64_t axis = 0); -/// \brief Split node on specified axis into multiple parts. -/// -/// \param value The value to split. -/// \param split_parts The number of parts we want to split output at given -/// axis. The length of the axis to split must be divisible by -/// this value. -/// \param axis The axis we split input node on. Default value is zero axis. -/// -/// \note This implementation supports negative `axis` values (similar to NumPy -/// indexing). This means that the axis to split on will be counted from -/// the back of the tensor (negative values are subtracted from its rank). -/// -/// \return The vector containing multiple outputs we split input node into. -/// -NGRAPH_DEPRECATED("This builder was deprecated.") -OutputVector split(const Output& value, int64_t split_parts, int axis = 0); +#include "openvino/core/node.hpp" -namespace opset1 { +namespace ov { +namespace op { +namespace util { /// \brief Split value on specified axis into multiple parts. /// /// \param value The value to be split. @@ -70,6 +40,6 @@ OutputVector split(const Output& value, const std::vector& split_ /// The vector is output of VariadicSplit:v1 op /// OutputVector split(const Output& value, int64_t num_splits, int64_t axis = 0); -} // namespace opset1 -} // namespace builder -} // namespace ngraph +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/ov_builders/src/broadcast.cpp b/src/tests/ov_helpers/ov_models/ov_builders/src/broadcast.cpp new file mode 100644 index 00000000000000..3b6e6c8be448ed --- /dev/null +++ b/src/tests/ov_helpers/ov_models/ov_builders/src/broadcast.cpp @@ -0,0 +1,89 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ov_models/ov_builders/broadcast.hpp" + +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "ov_models/ov_builders/reshape.hpp" + +namespace ov { +namespace op { +namespace util { +namespace { +/// +/// \brief Reconstructs axes mapping vector for Broadcast:v1 operation. +/// +/// \param[in] output_shape The output shape of Broadcast operation. +/// \param[in] broadcast_axes The broadcast axes used for Broadcast:v0 operator. +/// +/// \return The vector with axes indexes mapping . +/// +std::vector get_axes_mapping(const Shape& output_shape, const AxisSet& broadcast_axes) { + NGRAPH_CHECK((broadcast_axes.size() <= output_shape.size())); + std::vector axes_mapping(output_shape.size()); + iota(axes_mapping.begin(), axes_mapping.end(), 0); + for (auto i = broadcast_axes.rbegin(); i != broadcast_axes.rend(); ++i) { + axes_mapping.erase(axes_mapping.begin() + *i); + } + return axes_mapping; +} + +/// +/// \brief Creates Node returning the axes mapping for Broadcast:v1 operation. +/// +/// \param[in] output_shape The output shape of Broadcast operation. +/// \param[in] broadcast_axes The broadcast axes used for Broadcast:v0 operator. +/// +/// \return The Output object with Node returning axes mapping. +/// +Output get_axes_mapping_output(const Shape& output_shape, const AxisSet& broadcast_axes) { + std::vector axes_mapping{get_axes_mapping(output_shape, broadcast_axes)}; + return ov::op::v0::Constant::create(element::i64, Shape{axes_mapping.size()}, axes_mapping); +} + +static Output get_axes_mapping_output(const PartialShape& output_shape, + const Output& input_shape, + std::size_t start_match_axis) { + const auto one_node = ov::op::v0::Constant::create(element::i64, Shape{}, {1}); + const auto zero_node = ov::op::v0::Constant::create(element::i64, Shape{}, {0}); + const auto start_match_axis_node = ov::op::v0::Constant::create(element::i64, Shape{}, {start_match_axis}); + const auto target_shape_rank_node = + ov::op::util::reshape(std::make_shared(input_shape), Shape{}); + + const auto range_node = + std::make_shared(zero_node, target_shape_rank_node, one_node, element::i64); + + // workaround for GPU plugin type incompatibility + const auto range_node_converted = + std::make_shared(range_node, start_match_axis_node->get_element_type()); + // end of workaround + + const auto result = std::make_shared(range_node_converted, start_match_axis_node); + return result; +} +} // namespace + +Output make_broadcast(const Output& node, const Shape& target_shape, const AxisSet& broadcast_axes) { + return std::make_shared( + node, + ov::op::v0::Constant::create(element::i64, Shape{target_shape.size()}, target_shape), + get_axes_mapping_output(target_shape, broadcast_axes)); +} + +Output make_broadcast(const Output& node, const Shape& target_shape, size_t start_match_axis) { + const auto node_shape = std::make_shared(node); + return std::make_shared( + node, + ov::op::v0::Constant::create(element::i64, Shape{target_shape.size()}, target_shape), + get_axes_mapping_output(target_shape, node_shape, start_match_axis)); +} +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/ov_builders/src/norm.cpp b/src/tests/ov_helpers/ov_models/ov_builders/src/norm.cpp new file mode 100644 index 00000000000000..7719443e4a8570 --- /dev/null +++ b/src/tests/ov_helpers/ov_models/ov_builders/src/norm.cpp @@ -0,0 +1,164 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ov_models/ov_builders/norm.hpp" + +#include "openvino/op/abs.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/maximum.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/not_equal.hpp" +#include "openvino/op/power.hpp" +#include "openvino/op/reduce_sum.hpp" +#include "openvino/op/sqrt.hpp" + +namespace ov { +namespace op { +namespace util { +namespace { +/// \brief Specifies method of bias application to avoid numerical problems +enum class BiasMode { + // Add bias to intermediate result + ADD, + // Calculate max of intermediate result and bias + MAX +}; + +std::shared_ptr lp_norm(const Output& value, + size_t p_norm, + const Output& reduction_axes, + float bias, + bool keep_dims) { + // In general "entrywise" lp-norm for matrix `A` is defined as following double + // sum: + // ||A||_p = ||vec(A)||_p = [sum_{i=1}^m sum_{j=1}^n abs(a_{i,j})^p]^{1/p} + std::shared_ptr abs_values{std::make_shared(value)}; + std::shared_ptr p_node = ov::op::v0::Constant::create(value.get_element_type(), Shape{}, {p_norm}); + + // Get inner part of equation: abs_values^p_node, then sum over reduction_axes. + std::shared_ptr values{std::make_shared(abs_values, p_node)}; + values = std::make_shared(values, reduction_axes, keep_dims); + + std::shared_ptr bias_node{ov::op::v0::Constant::create(values->get_element_type(), Shape{}, {bias})}; + + values = std::make_shared(values, bias_node); + + // Get outer part of equation: raise values to 1/p_norm exponent. + std::shared_ptr inv_p_node = + ov::op::v0::Constant::create(values->get_element_type(), Shape{}, {1.f / p_norm}); + + return {std::make_shared(values, inv_p_node)}; +} + +/// \brief Calculates L-0 norm of input tensor. +/// +/// \note The L-0 norm represents the cardinality of elements different +/// from zero. This actually is not a "true" norm. +/// +/// \param[in] value The input tensor. +/// \param[in] reduction_axes The axes along which we calculate norm. +/// \param[in] keep_dims The flag indicates if axes will be removed or kept. +/// +/// \return L-0 norm of value. The output sub-graph is composed of v1 ops. +/// +std::shared_ptr l0_norm(const Output& value, const Output& reduction_axes, bool keep_dims) { + // L0 norm returns number of elements different from zero. + const std::shared_ptr zero_node{ov::op::v0::Constant::create(value.get_element_type(), Shape{}, {0.f})}; + + // Convert bool values to input node data type. + const std::shared_ptr non_zero_values = + std::make_shared(std::make_shared(value, zero_node), + value.get_element_type()); + + return std::make_shared(non_zero_values, reduction_axes, keep_dims); +} + +/// \brief Calculates L-1 norm of a value. +/// +/// \note The L-1 norm represents the sum of absolute values. +/// +/// \param[in] value The input tensor. +/// \param[in] reduction_axes The axes along which we calculate norm. +/// \param[in] bias The bias added to the calculated sum. +/// \param[in] keep_dims The flag indicates if axes will be removed or kept. +/// +/// \return L-1 norm of value. The output sub-graph is composed of v1 ops. +/// +std::shared_ptr l1_norm(const Output& value, + const Output& reduction_axes, + float bias, + bool keep_dims) { + const std::shared_ptr values{ + std::make_shared(std::make_shared(value), reduction_axes, keep_dims)}; + + const std::shared_ptr bias_node{ov::op::v0::Constant::create(values->get_element_type(), Shape{}, {bias})}; + + return std::make_shared(values, bias_node); +} + +/// \brief Calculates L-2 norm of input tensor. +/// +/// \note The L-2 norm represents the square root of sum of squares of each +/// individual element. +/// +/// \param[in] value The input tensor. +/// \param[in] reduction_axes The axes along which we calculate norm. +/// \param[in] bias The bias combined with calculated sum. +/// \param[in] bias_mode The method of bias application. +/// \param[in] keep_dims The flag indicates if axes will be removed or kept. +/// +/// \return L-2 norm of value. The output sub-graph is composed of v1 ops. +/// +std::shared_ptr l2_norm(const Output& value, + const Output& reduction_axes, + float bias, + BiasMode bias_mode, + bool keep_dims) { + std::shared_ptr pow = std::make_shared( + value, + std::make_shared(value.get_element_type(), Shape{}, 2)); + std::shared_ptr values{std::make_shared(pow, reduction_axes, keep_dims)}; + + std::shared_ptr bias_node{ov::op::v0::Constant::create(values->get_element_type(), Shape{}, {bias})}; + std::shared_ptr result; + switch (bias_mode) { + case BiasMode::MAX: { + result = std::make_shared(std::make_shared(values, bias_node)); + break; + } + case BiasMode::ADD: + default: + result = std::make_shared(std::make_shared(values, bias_node)); + } + return result; +} +} // namespace + +std::shared_ptr lp_norm(const Output& value, + const Output& reduction_axes, + size_t p_norm, + float bias, + bool keep_dims) { + // The number of non-zero elements + if (p_norm == 0) { + return l0_norm(value, reduction_axes, keep_dims); + } + // sum of absolute values. + else if (p_norm == 1) { + return l1_norm(value, reduction_axes, bias, keep_dims); + } + // sqrt of sum of squares - Euclidean norm + else if (p_norm == 2) { + return l2_norm(value, reduction_axes, bias, BiasMode::ADD, keep_dims); + } + // generic case + else { + return lp_norm(value, p_norm, reduction_axes, bias, keep_dims); + } +} +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/ov_builders/src/reshape.cpp b/src/tests/ov_helpers/ov_models/ov_builders/src/reshape.cpp new file mode 100644 index 00000000000000..ef3471bf819043 --- /dev/null +++ b/src/tests/ov_helpers/ov_models/ov_builders/src/reshape.cpp @@ -0,0 +1,126 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ov_models/ov_builders/reshape.hpp" + +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reduce_prod.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/strided_slice.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/op/transpose.hpp" +#include "openvino/op/variadic_split.hpp" + +namespace ov { +namespace op { +namespace util { +std::shared_ptr reshape(const Output& value, const Shape& shape) { + if (value.get_partial_shape().same_scheme(shape)) { + return value.get_node_shared_ptr(); + } else if (is_scalar(shape)) { + auto value_rank = value.get_shape().size(); + AxisVector axes_vector(value_rank); + std::iota(axes_vector.begin(), axes_vector.end(), 0); + auto axes = ov::op::v0::Constant::create(element::i64, Shape{value_rank}, axes_vector); + return std::make_shared(value, axes); + } else { + auto out_pattern = ov::op::v0::Constant::create(element::i64, + Shape{shape.size()}, + std::vector(shape.begin(), shape.end())); + + return std::make_shared(value, out_pattern, false); + } +} + +std::shared_ptr reorder_axes(const Output& value, std::vector axes_order) { + const auto axes_order_const = + ov::op::v0::Constant::create(element::i64, + Shape{axes_order.size()}, + std::vector(axes_order.begin(), axes_order.end())); + return std::make_shared(value, axes_order_const); +} + +std::shared_ptr transpose(const Output& value) { + // This part is left to preserve backward compatibility and ensure passing ONNX tests. + if (value.get_partial_shape().is_static()) { + std::vector axes_order(value.get_shape().size()); + std::iota(begin(axes_order), end(axes_order), 0); + std::reverse(begin(axes_order), end(axes_order)); + return reorder_axes(value, axes_order); + } + + const auto input_rank = std::make_shared(std::make_shared(value)); + const auto neg_one = ov::op::v0::Constant::create(element::i64, Shape{}, {-1}); + const auto start_node = std::make_shared(input_rank, neg_one); + const auto reverse_axes_order = std::make_shared(reshape(start_node, Shape{}), // start + neg_one, // stop (exclusive) + neg_one); // step + return std::make_shared(value, reverse_axes_order); +} + +namespace { +/// +/// \brief Return the node representing normalized axis with respect to +/// provided rank. +/// +/// \param[in] node_rank The node representing rank used for normalization. +/// \param[in] axis The axis value to be normalized. +/// +/// \return The new Constant node representing normalized axis value. +/// +std::shared_ptr get_normalized_axis_node(const std::shared_ptr node_rank, int64_t axis) { + auto axis_node = ov::op::v0::Constant::create(element::i64, Shape{1}, {axis}); + // shortcut for already positive value + if (axis >= 0) { + return axis_node; + } + + // TODO: What if axis value is beyond acceptable values? [-node_rank, + // node_rank-1] + return std::make_shared(node_rank, axis_node); +} +} // namespace + +std::shared_ptr flatten(const Output& value, int axis) { + // First dimension of output tensor is the product of [d_0, ... d_{axis-1}] dimensions of + // input tensor. The last dimension is the product of the rest of input tensor dimensions: + // [d_{axis}, ..., d_n] + std::shared_ptr output_shape; + if (axis == 0) { + output_shape = ov::op::v0::Constant::create(element::i64, Shape{2}, {1, -1}); + } else if (axis == 1) { + output_shape = ov::op::v0::Constant::create(element::i64, Shape{2}, {0, -1}); + } else { + const auto value_shape = std::make_shared(value); + const auto value_rank = std::make_shared(value_shape); + const auto axis_node = get_normalized_axis_node(value_rank, axis); + + const auto first_part_dims = + std::make_shared(value_shape, + ov::op::v0::Constant::create(element::i64, {1}, {0}), + axis_node, + std::vector{0}, + std::vector{0}); + const auto first_part_dims_length = + std::make_shared(first_part_dims, + ov::op::v0::Constant::create(element::i64, {}, {0}), + true); + + const auto remaining_part_length = ov::op::v0::Constant::create(element::i64, {1}, {-1}); + + output_shape = + std::make_shared(OutputVector{first_part_dims_length, remaining_part_length}, 0); + } + return std::make_shared(value, output_shape, true); +} +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/ov_builders/src/split.cpp b/src/tests/ov_helpers/ov_models/ov_builders/src/split.cpp new file mode 100644 index 00000000000000..af87cf8781f715 --- /dev/null +++ b/src/tests/ov_helpers/ov_models/ov_builders/src/split.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ov_models/ov_builders/split.hpp" + +#include "openvino/op/constant.hpp" +#include "openvino/op/split.hpp" +#include "openvino/op/variadic_split.hpp" + +namespace ov { +namespace op { +namespace util { +OutputVector split(const Output& value, const std::vector& split_lengths, int64_t axis) { + const auto axis_node = ov::op::v0::Constant::create(element::i64, Shape{}, {axis}); + const auto split_lengths_node = + ov::op::v0::Constant::create(element::i64, Shape{split_lengths.size()}, split_lengths); + const auto variadic_split = std::make_shared(value, axis_node, split_lengths_node); + + return variadic_split->outputs(); +} + +OutputVector split(const Output& value, int64_t num_splits, int64_t axis) { + const auto axis_node = ov::op::v0::Constant::create(element::i64, Shape{}, {axis}); + const auto split = std::make_shared(value, axis_node, num_splits); + + return split->outputs(); +} +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/src/ov_builder/broadcast.cpp b/src/tests/ov_helpers/ov_models/src/ov_builder/broadcast.cpp new file mode 100644 index 00000000000000..3b6e6c8be448ed --- /dev/null +++ b/src/tests/ov_helpers/ov_models/src/ov_builder/broadcast.cpp @@ -0,0 +1,89 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ov_models/ov_builders/broadcast.hpp" + +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "ov_models/ov_builders/reshape.hpp" + +namespace ov { +namespace op { +namespace util { +namespace { +/// +/// \brief Reconstructs axes mapping vector for Broadcast:v1 operation. +/// +/// \param[in] output_shape The output shape of Broadcast operation. +/// \param[in] broadcast_axes The broadcast axes used for Broadcast:v0 operator. +/// +/// \return The vector with axes indexes mapping . +/// +std::vector get_axes_mapping(const Shape& output_shape, const AxisSet& broadcast_axes) { + NGRAPH_CHECK((broadcast_axes.size() <= output_shape.size())); + std::vector axes_mapping(output_shape.size()); + iota(axes_mapping.begin(), axes_mapping.end(), 0); + for (auto i = broadcast_axes.rbegin(); i != broadcast_axes.rend(); ++i) { + axes_mapping.erase(axes_mapping.begin() + *i); + } + return axes_mapping; +} + +/// +/// \brief Creates Node returning the axes mapping for Broadcast:v1 operation. +/// +/// \param[in] output_shape The output shape of Broadcast operation. +/// \param[in] broadcast_axes The broadcast axes used for Broadcast:v0 operator. +/// +/// \return The Output object with Node returning axes mapping. +/// +Output get_axes_mapping_output(const Shape& output_shape, const AxisSet& broadcast_axes) { + std::vector axes_mapping{get_axes_mapping(output_shape, broadcast_axes)}; + return ov::op::v0::Constant::create(element::i64, Shape{axes_mapping.size()}, axes_mapping); +} + +static Output get_axes_mapping_output(const PartialShape& output_shape, + const Output& input_shape, + std::size_t start_match_axis) { + const auto one_node = ov::op::v0::Constant::create(element::i64, Shape{}, {1}); + const auto zero_node = ov::op::v0::Constant::create(element::i64, Shape{}, {0}); + const auto start_match_axis_node = ov::op::v0::Constant::create(element::i64, Shape{}, {start_match_axis}); + const auto target_shape_rank_node = + ov::op::util::reshape(std::make_shared(input_shape), Shape{}); + + const auto range_node = + std::make_shared(zero_node, target_shape_rank_node, one_node, element::i64); + + // workaround for GPU plugin type incompatibility + const auto range_node_converted = + std::make_shared(range_node, start_match_axis_node->get_element_type()); + // end of workaround + + const auto result = std::make_shared(range_node_converted, start_match_axis_node); + return result; +} +} // namespace + +Output make_broadcast(const Output& node, const Shape& target_shape, const AxisSet& broadcast_axes) { + return std::make_shared( + node, + ov::op::v0::Constant::create(element::i64, Shape{target_shape.size()}, target_shape), + get_axes_mapping_output(target_shape, broadcast_axes)); +} + +Output make_broadcast(const Output& node, const Shape& target_shape, size_t start_match_axis) { + const auto node_shape = std::make_shared(node); + return std::make_shared( + node, + ov::op::v0::Constant::create(element::i64, Shape{target_shape.size()}, target_shape), + get_axes_mapping_output(target_shape, node_shape, start_match_axis)); +} +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/tests/ov_helpers/ov_models/src/ov_builder/split.cpp b/src/tests/ov_helpers/ov_models/src/ov_builder/split.cpp new file mode 100644 index 00000000000000..af87cf8781f715 --- /dev/null +++ b/src/tests/ov_helpers/ov_models/src/ov_builder/split.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ov_models/ov_builders/split.hpp" + +#include "openvino/op/constant.hpp" +#include "openvino/op/split.hpp" +#include "openvino/op/variadic_split.hpp" + +namespace ov { +namespace op { +namespace util { +OutputVector split(const Output& value, const std::vector& split_lengths, int64_t axis) { + const auto axis_node = ov::op::v0::Constant::create(element::i64, Shape{}, {axis}); + const auto split_lengths_node = + ov::op::v0::Constant::create(element::i64, Shape{split_lengths.size()}, split_lengths); + const auto variadic_split = std::make_shared(value, axis_node, split_lengths_node); + + return variadic_split->outputs(); +} + +OutputVector split(const Output& value, int64_t num_splits, int64_t axis) { + const auto axis_node = ov::op::v0::Constant::create(element::i64, Shape{}, {axis}); + const auto split = std::make_shared(value, axis_node, num_splits); + + return split->outputs(); +} +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/CMakeLists.txt b/src/tests/test_utils/common_test_utils/CMakeLists.txt index 1112ccd08558af..7514aa476adae3 100644 --- a/src/tests/test_utils/common_test_utils/CMakeLists.txt +++ b/src/tests/test_utils/common_test_utils/CMakeLists.txt @@ -31,6 +31,7 @@ function(add_common_utils ADD_TARGET_NAME) ov_models openvino::runtime openvino::runtime::dev + openvino::builders PRIVATE openvino::util openvino::shape_inference diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ConjugateTranspose.py b/tests/layer_tests/tensorflow_tests/test_tf_ConjugateTranspose.py new file mode 100644 index 00000000000000..a0f78096c02d50 --- /dev/null +++ b/tests/layer_tests/tensorflow_tests/test_tf_ConjugateTranspose.py @@ -0,0 +1,122 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import numpy as np +import tensorflow as tf +from common.tf_layer_test_class import CommonTFLayerTest + +# Testing operation ConjugateTranspose +# Documentation: https://www.tensorflow.org/api_docs/python/tf/raw_ops/ConjugateTranspose + + +class TestComplexConjugateTranspose(CommonTFLayerTest): + + def _prepare_input(self, inputs_info): + + rng = np.random.default_rng() + assert 'real_part' in inputs_info + real_part_shape = inputs_info['real_part'] + assert 'imag_part' in inputs_info + imag_part_shape = inputs_info['imag_part'] + + inputs_data = {} + inputs_data['real_part'] = 4 * rng.random(real_part_shape).astype(np.float32) - 2 + inputs_data['imag_part'] = 4 * rng.random(imag_part_shape).astype(np.float32) - 2 + + return inputs_data + + def create_complex_conjugate_transpose_net(self, input_shape, perm): + """ + TensorFlow net IR net + + Placeholder->ConjugateTranspose => Placeholder->Transpose->Conjugate->Transpose + """ + + tf.compat.v1.reset_default_graph() + + # Create the graph and model + with tf.compat.v1.Session() as sess: + real_part = tf.compat.v1.placeholder(np.float32, input_shape, 'real_part') + imag_part = tf.compat.v1.placeholder(np.float32, input_shape, 'imag_part') + + complex_input = tf.raw_ops.Complex(real=real_part, imag=imag_part) + + conj_tranpose = tf.raw_ops.ConjugateTranspose(x=complex_input, perm=perm, name = "Operation") + real = tf.raw_ops.Real(input=conj_tranpose) + img = tf.raw_ops.Imag(input=conj_tranpose) + + tf.compat.v1.global_variables_initializer() + tf_net = sess.graph_def + + ref_net = None + + return tf_net, ref_net + + + test_data = [ + (dict(input_shape=[1, 2], perm=[1, 0])), + (dict(input_shape=[1, 2, 3], perm=[2, 1, 0])), + (dict(input_shape=[1, 2, 3, 4], perm=[0, 3, 2, 1])), + (dict(input_shape=[1, 2, 3, 4, 5, 6], perm=[0, 2, 1, 3, 4, 5])), + ] + + @pytest.mark.parametrize("params", test_data) + @pytest.mark.precommit_tf_fe + @pytest.mark.nightly + def test_conjugate_transpose(self, params, ie_device, precision, ir_version, temp_dir, + use_new_frontend, use_old_api): + self._test(*self.create_complex_conjugate_transpose_net(**params), + ie_device, precision, ir_version, temp_dir=temp_dir, + use_new_frontend=use_new_frontend, use_old_api=use_old_api) + + +class TestConjugateTranspose(CommonTFLayerTest): + + def _prepare_input(self, inputs_info): + + assert 'input' in inputs_info + input_shape = inputs_info['input'] + + inputs_data = {} + inputs_data['input'] = np.random.default_rng().random(input_shape).astype(np.float32) + + return inputs_data + + def create_conjugate_transpose_net(self, input_shape, perm): + """ + TensorFlow net IR net + + Placeholder->ConjugateTranspose => Placeholder->Transpose->Conjugate->Transpose + """ + + tf.compat.v1.reset_default_graph() + + # Create the graph and model + with tf.compat.v1.Session() as sess: + input = tf.compat.v1.placeholder(np.float32, input_shape, 'input') + + tf.raw_ops.ConjugateTranspose(x=input, perm=perm, name = "Operation") + + tf.compat.v1.global_variables_initializer() + tf_net = sess.graph_def + + ref_net = None + + return tf_net, ref_net + + test_data = [ + (dict(input_shape=[1, 2], perm=[1, 0])), + (dict(input_shape=[1, 2, 3], perm=[2, 1, 0])), + (dict(input_shape=[1, 2, 3, 4], perm=[0, 3, 2, 1])), + (dict(input_shape=[1, 2, 3, 4, 5, 6], perm=[0, 2, 1, 3, 4, 5])), + ] + + @pytest.mark.parametrize("params", test_data) + @pytest.mark.precommit_tf_fe + @pytest.mark.nightly + def test_conjugate_transpose(self, params, ie_device, precision, ir_version, temp_dir, + use_new_frontend, use_old_api): + self._test(*self.create_conjugate_transpose_net(**params), + ie_device, precision, ir_version, temp_dir=temp_dir, + use_new_frontend=use_new_frontend, use_old_api=use_old_api) \ No newline at end of file diff --git a/tests/model_hub_tests/torch_tests/test_hf_transformers.py b/tests/model_hub_tests/torch_tests/test_hf_transformers.py index 3bd3d525d3c2d7..a47730d5ba1aab 100644 --- a/tests/model_hub_tests/torch_tests/test_hf_transformers.py +++ b/tests/model_hub_tests/torch_tests/test_hf_transformers.py @@ -113,6 +113,8 @@ def load_model(self, name, type): if is_gptq: self.cuda_available, self.gptq_postinit = patch_gptq() model_kwargs["torch_dtype"] = torch.float32 + if "bart" in mi.tags: + model_kwargs["attn_implementation"] = "eager" try: auto_model = mi.transformersInfo['auto_model'] if "processor" in mi.transformersInfo: @@ -509,7 +511,7 @@ def teardown_method(self): ("google/tapas-large-finetuned-wtq", "tapas"), ("gpt2", "gpt2"), ("openai/clip-vit-large-patch14", "clip"), - ("OpenVINO/opt-125m-gptq", 'opt') + ("OpenVINO/opt-125m-gptq", "opt") ]) @pytest.mark.precommit def test_convert_model_precommit(self, name, type, ie_device): diff --git a/tests/model_hub_tests/torch_tests/test_timm.py b/tests/model_hub_tests/torch_tests/test_timm.py index 7e8d8dcc1e05e1..5805bfb82dd4bf 100644 --- a/tests/model_hub_tests/torch_tests/test_timm.py +++ b/tests/model_hub_tests/torch_tests/test_timm.py @@ -13,23 +13,29 @@ def filter_timm(timm_list: list) -> list: - unique_models = set() + unique_models = dict() filtered_list = [] - ignore_set = {"base", "mini", "small", "xxtiny", "xtiny", "tiny", "lite", "nano", "pico", "medium", "big", - "large", "xlarge", "xxlarge", "huge", "gigantic", "giant", "enormous", "xs", "xxs", "s", "m", "l", - "xl"} + ignore_list = ["base", "xxtiny", "xxs", "pico", "xtiny", "xs", "nano", "tiny", "s", "mini", "small", "lite", + "medium", "m", "big", "large", "l", "xlarge", "xl", "huge", "xxlarge", "gigantic", "giant", "enormous"] + ignore_set = set(ignore_list) for name in sorted(timm_list): # first: remove datasets name_parts = name.split(".") _name = "_".join(name.split(".")[:-1]) if len(name_parts) > 1 else name # second: remove sizes name_set = set([n for n in _name.split("_") if not n.isnumeric()]) + size_set = name_set.intersection(ignore_set) + size_idx = 100 + if len(size_set) > 0: + size_idx = ignore_list.index(list(sorted(size_set))[0]) name_set = name_set.difference(ignore_set) - name_join = "_".join(name_set) + name_join = "_".join(sorted(name_set)) if name_join not in unique_models: - unique_models.add(name_join) + unique_models[name_join] = (size_idx, name) filtered_list.append(name) - return filtered_list + elif unique_models[name_join][0] > size_idx: + unique_models[name_join] = (size_idx, name) + return sorted([v[1] for v in unique_models.values()]) def get_all_models() -> list: diff --git a/tests/model_hub_tests/torch_tests/timm_models b/tests/model_hub_tests/torch_tests/timm_models index 4b23afec81e77d..8608d8e8ea2d34 100644 --- a/tests/model_hub_tests/torch_tests/timm_models +++ b/tests/model_hub_tests/torch_tests/timm_models @@ -13,11 +13,11 @@ cait_s36_384.fb_dist_in1k,None cait_xs24_384.fb_dist_in1k,None cait_xxs24_224.fb_dist_in1k,None cait_xxs36_224.fb_dist_in1k,None -coat_lite_medium.in1k,None -coatnet_0_rw_224.sw_in1k,None +coat_tiny.in1k,None coatnet_bn_0_rw_224.sw_in1k,None +coatnet_nano_rw_224.sw_in1k,None coatnet_rmlp_1_rw2_224.sw_in12k,None -coatnet_rmlp_1_rw_224.sw_in1k,None +coatnet_rmlp_nano_rw_224.sw_in1k,None coatnext_nano_rw_224.sw_in1k,None convformer_b36.sail_in1k,None convformer_m36.sail_in1k,None @@ -32,15 +32,15 @@ convnext_base.clip_laion2b,None convnext_femto.d1_in1k,None convnext_femto_ols.d1_in1k,None convnext_large_mlp.clip_laion2b_augreg,None -convnext_nano_ols.d1h_in1k,None +convnext_pico_ols.d1_in1k,None convnext_tiny_hnf.a2h_in1k,None convnextv2_atto.fcmae,None convnextv2_base.fcmae,None convnextv2_femto.fcmae,None -crossvit_15_240.in1k,None crossvit_15_dagger_240.in1k,None -cs3darknet_focus_l.c2ns_in1k,None -cs3darknet_l.c2ns_in1k,None +crossvit_base_240.in1k,None +cs3darknet_focus_m.c2ns_in1k,None +cs3darknet_m.c2ns_in1k,None cs3darknet_x.c2ns_in1k,None cs3edgenet_x.c2_in1k,None cs3se_edgenet_x.c2ns_in1k,None @@ -127,7 +127,7 @@ efficientnet_em.ra2_in1k,None efficientnet_es.ra_in1k,None efficientnet_es_pruned.in1k,None efficientnet_lite0.ra_in1k,None -efficientnetv2_rw_m.agc_in1k,None +efficientnetv2_rw_s.ra2_in1k,None efficientnetv2_rw_t.ra2_in1k,None efficientvit_b0.r224_in1k,None efficientvit_b1.r224_in1k,None @@ -146,9 +146,9 @@ ese_vovnet19b_dw.ra_in1k,None ese_vovnet39b.ra_in1k,None eva02_base_patch14_224.mim_in22k,None eva02_base_patch16_clip_224.merged2b,None -eva02_enormous_patch14_clip_224.laion2b,None -eva_giant_patch14_224.clip_ft_in1k,None +eva02_large_patch14_clip_224.merged2b,None eva_giant_patch14_clip_224.laion400m,None +eva_large_patch14_196.in22k_ft_in1k,None fastvit_ma36.apple_dist_in1k,None fastvit_s12.apple_dist_in1k,None fastvit_sa12.apple_dist_in1k,None @@ -163,15 +163,15 @@ fbnetv3_g.ra2_in1k,None flexivit_base.1000ep_in21k,None focalnet_base_lrf.ms_in1k,None focalnet_base_srf.ms_in1k,None -focalnet_huge_fl3.ms_in22k,None -focalnet_huge_fl4.ms_in22k,None +focalnet_large_fl3.ms_in22k,None +focalnet_large_fl4.ms_in22k,None gc_efficientnetv2_rw_t.agc_in1k,None gcresnet33ts.ra2_in1k,None,xfail,Descriptors shape is incompatible with provided dimensions gcresnet50t.ra2_in1k,None,xfail,Descriptors shape is incompatible with provided dimensions gcresnext26ts.ch_in1k,None,xfail,Descriptors shape is incompatible with provided dimensions gcresnext50ts.ch_in1k,None,xfail,Descriptors shape is incompatible with provided dimensions gcvit_base.in1k,None -gernet_l.idstcv_in1k,None +gernet_s.idstcv_in1k,None ghostnet_100.in1k,None ghostnetv2_100.in1k,None gmixer_24_224.ra3_in1k,None @@ -186,7 +186,7 @@ hardcorenas_c.miil_green_in1k,None hardcorenas_d.miil_green_in1k,None hardcorenas_e.miil_green_in1k,None hardcorenas_f.miil_green_in1k,None -hrnet_w18.ms_aug_in1k,None +hrnet_w18_small.gluon_in1k,None hrnet_w18_small_v2.gluon_in1k,None hrnet_w18_ssld.paddle_in1k,None hrnet_w30.ms_in1k,None @@ -227,19 +227,19 @@ maxxvitv2_nano_rw_256.sw_in1k,None maxxvitv2_rmlp_base_rw_224.sw_in12k,None mixer_b16_224.goog_in21k,None mixer_l16_224.goog_in21k,None -mixnet_l.ft_in1k,None -mnasnet_100.rmsp_in1k,None +mixnet_s.ft_in1k,None +mnasnet_small.lamb_in1k,None mobilenetv2_050.lamb_in1k,None mobilenetv2_110d.ra_in1k,None mobilenetv2_120d.ra_in1k,None -mobilenetv3_large_100.miil_in21k,None mobilenetv3_rw.rmsp_in1k,None +mobilenetv3_small_050.lamb_in1k,None mobileone_s0.apple_in1k,None mobileone_s1.apple_in1k,None mobileone_s2.apple_in1k,None mobileone_s3.apple_in1k,None mobileone_s4.apple_in1k,None -mobilevit_s.cvnets_in1k,None +mobilevit_xxs.cvnets_in1k,None mobilevitv2_050.cvnets_in1k,None mvitv2_base.fb_in1k,None mvitv2_base_cls.fb_inw21k,None @@ -250,10 +250,10 @@ nf_resnet50.ra2_in1k,None nfnet_l0.ra2_in1k,None pit_b_224.in1k,None pit_b_distilled_224.in1k,None -pit_s_224.in1k,None -pit_s_distilled_224.in1k,None pit_ti_224.in1k,None pit_ti_distilled_224.in1k,None +pit_xs_224.in1k,None +pit_xs_distilled_224.in1k,None pnasnet5large.tf_in1k,None poolformer_m36.sail_in1k,None poolformer_m48.sail_in1k,None @@ -311,7 +311,7 @@ res2net50_26w_8s.in1k,None res2net50_48w_2s.in1k,None res2net50d.in1k,None res2next50.in1k,None -resmlp_12_224.fb_dino,None +resmlp_big_24_224.fb_distilled_in1k,None resnest101e.in1k,None resnest14d.gluon_in1k,None resnest200e.in1k,None @@ -385,7 +385,7 @@ selecsls60.in1k,None selecsls60b.in1k,None semnasnet_075.rmsp_in1k,None senet154.gluon_in1k,None -sequencer2d_l.in1k,None +sequencer2d_s.in1k,None seresnet152d.ra2_in1k,None seresnet33ts.ra2_in1k,None seresnet50.a1_in1k,None @@ -412,7 +412,7 @@ swinv2_base_window12to24_192to384.ms_in22k_ft_in1k,None swinv2_base_window16_256.ms_in1k,None swinv2_base_window8_256.ms_in1k,None swinv2_cr_small_224.sw_in1k,None -swinv2_cr_small_ns_224.sw_in1k,None +swinv2_cr_tiny_ns_224.sw_in1k,None tf_efficientnet_b0.aa_in1k,None tf_efficientnet_b1.aa_in1k,None tf_efficientnet_b2.aa_in1k,None @@ -438,10 +438,10 @@ tf_efficientnetv2_b0.in1k,None tf_efficientnetv2_b1.in1k,None tf_efficientnetv2_b2.in1k,None tf_efficientnetv2_b3.in1k,None -tf_efficientnetv2_l.in1k,None -tf_mixnet_l.in1k,None -tf_mobilenetv3_large_075.in1k,None -tf_mobilenetv3_large_minimal_100.in1k,None +tf_efficientnetv2_s.in1k,None +tf_mixnet_s.in1k,None +tf_mobilenetv3_small_075.in1k,None +tf_mobilenetv3_small_minimal_100.in1k,None tiny_vit_11m_224.dist_in22k,None tiny_vit_21m_224.dist_in22k,None tiny_vit_5m_224.dist_in22k,None @@ -451,7 +451,7 @@ tinynet_c.in1k,None tinynet_d.in1k,None tinynet_e.in1k,None tnt_s_patch16_224,None -tresnet_l.miil_in1k,None +tresnet_m.miil_in1k,None tresnet_v2_l.miil_in21k,None twins_pcpvt_base.in1k,None twins_svt_base.in1k,None @@ -463,7 +463,7 @@ vgg16.tv_in1k,None vgg16_bn.tv_in1k,None vgg19.tv_in1k,None vgg19_bn.tv_in1k,None -visformer_small.in1k,None +visformer_tiny.in1k,None vit_base_patch14_dinov2.lvd142m,None vit_base_patch14_reg4_dinov2.lvd142m,None vit_base_patch16_224.augreg2_in21k_ft_in1k,None @@ -477,12 +477,12 @@ vit_base_patch32_clip_224.datacompxl,None vit_base_patch32_clip_quickgelu_224.metaclip_2pt5b,None vit_base_patch8_224.augreg2_in21k_ft_in1k,None vit_base_r50_s16_224.orig_in21k,None -vit_giant_patch14_clip_224.laion2b,None -vit_giant_patch16_gap_224.in22k_ijepa,None vit_huge_patch14_224.mae,None -vit_huge_patch14_clip_quickgelu_224.dfn5b,None vit_huge_patch14_gap_224.in1k_ijepa,None +vit_large_patch14_clip_224.datacompxl,None +vit_large_patch14_clip_quickgelu_224.dfn2b,None vit_large_r50_s32_224.augreg_in21k,None +vit_medium_patch16_gap_240.sw_in12k,None vit_relpos_base_patch16_224.sw_in1k,None vit_relpos_base_patch16_clsgap_224.sw_in1k,None vit_relpos_base_patch32_plus_rpn_256.sw_in1k,None @@ -490,7 +490,7 @@ vit_relpos_medium_patch16_cls_224.sw_in1k,None vit_relpos_medium_patch16_rpn_224.sw_in1k,None vit_small_r26_s32_224.augreg_in21k,None vit_so400m_patch14_siglip_224.webli,None -vit_srelpos_medium_patch16_224.sw_in1k,None +vit_srelpos_small_patch16_224.sw_in1k,None vit_tiny_r_s16_p8_224.augreg_in21k,None volo_d1_224.sail_in1k,None,xfail,Unsupported aten::col2im volo_d2_224.sail_in1k,None,xfail,Unsupported aten::col2im @@ -504,5 +504,5 @@ xception41p.ra3_in1k,None xception65.ra3_in1k,None xception65p.ra3_in1k,None xception71.tf_in1k,None -xcit_large_24_p16_224.fb_dist_in1k,None -xcit_large_24_p8_224.fb_dist_in1k,None +xcit_nano_12_p16_224.fb_dist_in1k,None +xcit_nano_12_p8_224.fb_dist_in1k,None diff --git a/tests/model_hub_tests/torch_tests/torchbench_models b/tests/model_hub_tests/torch_tests/torchbench_models index 8c2a2ee93fdfa0..8634635574b400 100644 --- a/tests/model_hub_tests/torch_tests/torchbench_models +++ b/tests/model_hub_tests/torch_tests/torchbench_models @@ -4,8 +4,8 @@ Background_Matting,None LearningToPaint,None Super_SloMo,None,xfail,Unsupported ops aten::l1_loss aten::mse_loss #alexnet,None - Already tested by torchvision tests -basic_gnn_edgecnn,None,xfail,Accuracy validation failed -basic_gnn_gcn,None,xfail,Unsupported ops aten::pow_ +basic_gnn_edgecnn,None +basic_gnn_gcn,None,xfail,CPU failed to allocate basic_gnn_gin,None basic_gnn_sage,None #cm3leon_generate,None,skip,No install.py is found diff --git a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/CMakeLists.txt b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/CMakeLists.txt index cb86b4016ee6bd..0fff4428700d8e 100644 --- a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/CMakeLists.txt +++ b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/CMakeLists.txt @@ -15,7 +15,7 @@ add_library(${TARGET_FE_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS}) target_include_directories(${TARGET_FE_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) -target_link_libraries(${TARGET_FE_NAME} PUBLIC openvino::runtime PRIVATE openvino::builders) +target_link_libraries(${TARGET_FE_NAME} PUBLIC openvino::runtime) ov_add_clang_format_target(${TARGET_FE_NAME}_clang FOR_TARGETS ${TARGET_FE_NAME})