diff --git a/src/frontends/tensorflow_common/src/op/addN.cpp b/src/frontends/tensorflow_common/src/op/addN.cpp index e7b260b5070cf7..0daf83f29712fd 100644 --- a/src/frontends/tensorflow_common/src/op/addN.cpp +++ b/src/frontends/tensorflow_common/src/op/addN.cpp @@ -2,13 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/add.hpp" using namespace std; -using namespace ov::opset8; +using namespace ov; +using namespace ov::op; namespace ov { namespace frontend { @@ -16,18 +15,16 @@ namespace tensorflow { namespace op { OutputVector translate_add_n_op(const NodeContext& node) { - OutputVector ng_arg_vec; - for (size_t i = 0; i < node.get_input_size(); i++) { - ng_arg_vec.push_back(node.get_input(static_cast(i))); + default_op_checks(node, 1, {"AddN", "ADD_N"}); + int num_size = static_cast(node.get_input_size()); + + Output result = node.get_input(0); + for (int ind = 1; ind < num_size; ++ind) { + result = make_shared(result, node.get_input(ind)); } - auto res = std::accumulate(std::next(ng_arg_vec.begin()), - ng_arg_vec.end(), - ng_arg_vec.at(0), - [](const Output& a, const Output& b) -> shared_ptr { - return make_shared(a, b); - }); - set_node_name(node.get_name(), res.get_node_shared_ptr()); - return {res}; + + set_node_name(node.get_name(), result.get_node_shared_ptr()); + return {result}; } } // namespace op } // namespace tensorflow diff --git a/src/frontends/tensorflow_common/src/op/arg_min_max.cpp b/src/frontends/tensorflow_common/src/op/arg_min_max.cpp index 42cfe8867dac53..c9d72018e1e976 100644 --- a/src/frontends/tensorflow_common/src/op/arg_min_max.cpp +++ b/src/frontends/tensorflow_common/src/op/arg_min_max.cpp @@ -3,10 +3,12 @@ // #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/topk.hpp" using namespace std; -using namespace ov::opset8; +using namespace ov::op; namespace ov { namespace frontend { @@ -14,13 +16,14 @@ namespace tensorflow { namespace op { OutputVector translate_arg_min_max(const NodeContext& node, std::string mode) { + default_op_checks(node, 1, {"ArgMax", "ArgMin", "ARG_MAX", "ARG_MIN"}); auto input = node.get_input(0); // TensorFlow uses axis with default value equal to zero int64_t axis = 0; if (node.get_input_size() > 1) { TENSORFLOW_OP_VALIDATION(node, - std::dynamic_pointer_cast(node.get_input(1).get_node_shared_ptr()), + as_type_ptr(node.get_input(1).get_node_shared_ptr()), "ArgMax/ArgMin is not supported with non-constant axis input"); std::vector axes; get_const_input(node, 1, &axes); @@ -30,12 +33,12 @@ OutputVector translate_arg_min_max(const NodeContext& node, std::string mode) { auto output_type = node.get_attribute("output_type", element::i64); // compute indices of max/min values using TopK - auto k = make_shared(element::i64, Shape{}, 1); + auto k = make_shared(element::i64, Shape{}, 1); // TODO: define sort attribute for TensorFlow case - auto top_k = std::make_shared(input, k, axis, mode, "none", output_type); + auto top_k = std::make_shared(input, k, axis, mode, "none", output_type); - auto axis_to_remove = make_shared(element::i64, Shape{1}, std::vector({axis})); - auto res = make_shared(top_k->output(1), axis_to_remove); + auto axis_to_remove = make_shared(element::i64, Shape{1}, vector({axis})); + auto res = make_shared(top_k->output(1), axis_to_remove); set_node_name(node.get_name(), res); return {res}; } diff --git a/src/frontends/tensorflow_common/src/op/binary_op.cpp b/src/frontends/tensorflow_common/src/op/binary_op.cpp index a1a056f048d066..0ea26fb3cb9946 100644 --- a/src/frontends/tensorflow_common/src/op/binary_op.cpp +++ b/src/frontends/tensorflow_common/src/op/binary_op.cpp @@ -15,11 +15,12 @@ namespace op { OutputVector translate_binary_op(const NodeContext& node, const std::function(Output&, Output&)>& create_binary_op) { - auto ng_lhs = node.get_input(0); - auto ng_rhs = node.get_input(1); - auto ng_node = create_binary_op(ng_lhs, ng_rhs); - set_node_name(node.get_name(), ng_node.get_node_shared_ptr()); - return {ng_node}; + default_op_checks(node, 2, {}); + auto lhs = node.get_input(0); + auto rhs = node.get_input(1); + auto result = create_binary_op(lhs, rhs); + set_node_name(node.get_name(), result.get_node_shared_ptr()); + return {result}; } OutputVector translate_floor_div_op(const NodeContext& node) { diff --git a/src/frontends/tensorflow_common/src/op/cast.cpp b/src/frontends/tensorflow_common/src/op/cast.cpp index 7e0b268dc17927..da21bc7396d679 100644 --- a/src/frontends/tensorflow_common/src/op/cast.cpp +++ b/src/frontends/tensorflow_common/src/op/cast.cpp @@ -3,10 +3,10 @@ // #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/convert.hpp" using namespace std; -using namespace ov::opset8; +using namespace ov::op; namespace ov { namespace frontend { @@ -14,10 +14,12 @@ namespace tensorflow { namespace op { OutputVector translate_cast_op(const NodeContext& node) { - auto ng_input = node.get_input(0); + default_op_checks(node, 1, {"Cast", "CAST"}); + auto x = node.get_input(0); + + auto dst_type = node.get_attribute("DstT"); + auto res = make_shared(x, dst_type); - auto ng_et = node.get_attribute("DstT"); - auto res = make_shared(ng_input, ng_et); set_node_name(node.get_name(), res); return res->outputs(); } diff --git a/src/frontends/tensorflow_common/src/op/const.cpp b/src/frontends/tensorflow_common/src/op/const.cpp index 1228d42b486264..96d21e721365fc 100644 --- a/src/frontends/tensorflow_common/src/op/const.cpp +++ b/src/frontends/tensorflow_common/src/op/const.cpp @@ -5,10 +5,10 @@ #include "common_op_table.hpp" #include "helper_ops/string_constant.hpp" #include "helper_ops/unsupported_constant.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/constant.hpp" using namespace std; -using namespace ov::opset8; +using namespace ov::op; using namespace ov; namespace ov { @@ -17,6 +17,8 @@ namespace tensorflow { namespace op { OutputVector translate_const_op(const NodeContext& node) { + default_op_checks(node, 0, {"Const"}); + auto ov_type = node.get_attribute_as_any("dtype"); std::shared_ptr const_node; if (!ov_type.is() || ov_type.as() == ov::element::dynamic || @@ -28,7 +30,7 @@ OutputVector translate_const_op(const NodeContext& node) { } } else { auto tensor = node.get_attribute("value"); - const_node = std::make_shared(tensor); + const_node = std::make_shared(tensor); } set_node_name(node.get_name(), const_node); return {const_node}; diff --git a/src/frontends/tensorflow_common/src/op/einsum.cpp b/src/frontends/tensorflow_common/src/op/einsum.cpp index 591ca5dc5e2fed..431f1c4f51ba41 100644 --- a/src/frontends/tensorflow_common/src/op/einsum.cpp +++ b/src/frontends/tensorflow_common/src/op/einsum.cpp @@ -2,20 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/einsum.hpp" + #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" using namespace std; -using namespace ov::opset8; +using namespace ov::op; namespace ov { namespace frontend { namespace tensorflow { namespace op { OutputVector translate_einsum_op(const NodeContext& node) { + default_op_checks(node, 1, {"Einsum"}); + auto op_type = node.get_op_type(); TENSORFLOW_OP_VALIDATION(node, op_type == "Einsum", "Internal error: incorrect usage of translate_einsum_op."); - auto equation = node.get_attribute("equation"); + auto equation = node.get_attribute("equation"); int input_size = static_cast(node.get_input_size()); OutputVector inputs; @@ -23,7 +26,7 @@ OutputVector translate_einsum_op(const NodeContext& node) { inputs.push_back(node.get_input(input_ind)); } - auto einsum = make_shared(inputs, equation); + auto einsum = make_shared(inputs, equation); set_node_name(node.get_name(), einsum); return {einsum}; } diff --git a/src/frontends/tensorflow_common/src/op/elu.cpp b/src/frontends/tensorflow_common/src/op/elu.cpp index a2833000141856..b2a35a99402ee3 100644 --- a/src/frontends/tensorflow_common/src/op/elu.cpp +++ b/src/frontends/tensorflow_common/src/op/elu.cpp @@ -2,12 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/elu.hpp" + #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" using namespace std; using namespace ov; -using namespace ov::opset8; +using namespace ov::op; namespace ov { namespace frontend { @@ -15,9 +16,11 @@ namespace tensorflow { namespace op { OutputVector translate_elu_op(const NodeContext& node) { + default_op_checks(node, 1, {"Elu", "ELU"}); auto input = node.get_input(0); auto alpha = node.get_attribute("alpha", 1.0); - auto res = make_shared(input, alpha); + auto res = make_shared(input, alpha); + set_node_name(node.get_name(), res); return res->outputs(); } diff --git a/src/frontends/tensorflow_common/src/op/fill.cpp b/src/frontends/tensorflow_common/src/op/fill.cpp index bce098eb5bd35d..3edfbcf382cade 100644 --- a/src/frontends/tensorflow_common/src/op/fill.cpp +++ b/src/frontends/tensorflow_common/src/op/fill.cpp @@ -3,10 +3,10 @@ // #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/broadcast.hpp" using namespace std; -using namespace ov::opset8; +using namespace ov::op; namespace ov { namespace frontend { @@ -14,9 +14,11 @@ namespace tensorflow { namespace op { OutputVector translate_fill_op(const NodeContext& node) { - auto ng_dims = node.get_input(0); - auto ng_value = node.get_input(1); - auto res = make_shared(ng_value, ng_dims); + default_op_checks(node, 2, {"Fill", "FILL"}); + auto dims = node.get_input(0); + auto value = node.get_input(1); + + auto res = make_shared(value, dims); set_node_name(node.get_name(), res); return res->outputs(); } diff --git a/src/frontends/tensorflow_common/src/op/matmul.cpp b/src/frontends/tensorflow_common/src/op/matmul.cpp index 21a0591d109b69..5b3f57f6f8506a 100644 --- a/src/frontends/tensorflow_common/src/op/matmul.cpp +++ b/src/frontends/tensorflow_common/src/op/matmul.cpp @@ -2,11 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/matmul.hpp" + #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" using namespace std; -using namespace ov::opset8; +using namespace ov::op; namespace ov { namespace frontend { @@ -14,29 +15,35 @@ namespace tensorflow { namespace op { OutputVector translate_mat_mul_op(const NodeContext& node) { + default_op_checks(node, 2, {"MatMul"}); + auto a = node.get_input(0); auto b = node.get_input(1); auto transpose_a = node.get_attribute("transpose_a", false); auto transpose_b = node.get_attribute("transpose_b", false); - auto res = make_shared(a, b, transpose_a, transpose_b); + auto res = make_shared(a, b, transpose_a, transpose_b); set_node_name(node.get_name(), res); return res->outputs(); } OutputVector translate_batch_mat_mul_op(const NodeContext& node) { + default_op_checks(node, 2, {"BatchMatMul", "BatchMatMulV2", "BATCH_MATMUL"}); + auto x = node.get_input(0); auto y = node.get_input(1); auto adj_x = node.get_attribute("adj_x", false); auto adj_y = node.get_attribute("adj_y", false); - auto result = make_shared(x, y, adj_x, adj_y); + auto result = make_shared(x, y, adj_x, adj_y); set_node_name(node.get_name(), result); return result->outputs(); } OutputVector translate_batch_mat_mul_with_type_op(const NodeContext& node) { + default_op_checks(node, 2, {"BatchMatMulV3"}); + auto x = node.get_input(0); auto y = node.get_input(1); @@ -46,10 +53,10 @@ OutputVector translate_batch_mat_mul_with_type_op(const NodeContext& node) { auto adj_y = node.get_attribute("adj_y", false); auto t_out = node.get_attribute("Tout", input_type); - auto result = make_shared(x, y, adj_x, adj_y)->output(0); + auto result = make_shared(x, y, adj_x, adj_y)->output(0); if (t_out != input_type) { - result = make_shared(result, t_out); + result = make_shared(result, t_out); } set_node_name(node.get_name(), result.get_node_shared_ptr()); diff --git a/src/frontends/tensorflow_common/src/op/placeholder.cpp b/src/frontends/tensorflow_common/src/op/placeholder.cpp index 4b5b6aa00d45fd..574e92eff5cf63 100644 --- a/src/frontends/tensorflow_common/src/op/placeholder.cpp +++ b/src/frontends/tensorflow_common/src/op/placeholder.cpp @@ -3,10 +3,10 @@ // #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/parameter.hpp" using namespace std; -using namespace ov::opset8; +using namespace ov::op; namespace ov { namespace frontend { @@ -14,24 +14,28 @@ namespace tensorflow { namespace op { OutputVector translate_placeholder_op(const NodeContext& node) { - auto dtype = node.get_attribute("dtype"); - auto shape = node.get_attribute("shape", ov::PartialShape::dynamic()); + default_op_checks(node, 0, {}); + + auto dtype = node.get_attribute("dtype"); + auto shape = node.get_attribute("shape", PartialShape::dynamic()); if (shape.rank().is_static() && shape.rank().get_length() == 0 && node.has_attribute("_output_shapes")) { // we know some cases when Placeholder operation has empty scalar `shape` attribute value // and non-empty `_output_shapes` attribute value. // `_output_shapes` attribute value turns to be correct in this case - auto output_shapes = node.get_attribute>("_output_shapes"); + auto output_shapes = node.get_attribute>("_output_shapes"); if (output_shapes.size() == 1 && output_shapes[0].rank().is_static()) { shape = output_shapes[0]; } } - auto res = std::make_shared(dtype, shape); + auto res = make_shared(dtype, shape); set_node_name(node.get_name(), res); return res->outputs(); } OutputVector translate_placeholder_with_default_op(const NodeContext& node) { + default_op_checks(node, 0, {}); + // For parity with legacy frontend, it creates a constant node with the default value // As a rule, PlaceholderWithDefault is mainly used for is_training variables in the model TENSORFLOW_OP_VALIDATION(node, diff --git a/src/frontends/tensorflow_common/src/op/relu_6.cpp b/src/frontends/tensorflow_common/src/op/relu_6.cpp index d30339ca4c7ea9..4fd7da65b01932 100644 --- a/src/frontends/tensorflow_common/src/op/relu_6.cpp +++ b/src/frontends/tensorflow_common/src/op/relu_6.cpp @@ -3,18 +3,20 @@ // #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/clamp.hpp" using namespace std; -using namespace ov::opset8; +using namespace ov::op; namespace ov { namespace frontend { namespace tensorflow { namespace op { -ov::OutputVector translate_relu_6_op(const NodeContext& node) { +OutputVector translate_relu_6_op(const NodeContext& node) { + default_op_checks(node, 1, {}); + auto data = node.get_input(0); - auto res = std::make_shared(data, 0.0, 6.0f); + auto res = make_shared(data, 0.0, 6.0f); set_node_name(node.get_name(), res); return res->outputs(); } diff --git a/src/frontends/tensorflow_common/src/op/round.cpp b/src/frontends/tensorflow_common/src/op/round.cpp index 1e911e68af40c0..d888860bc9900f 100644 --- a/src/frontends/tensorflow_common/src/op/round.cpp +++ b/src/frontends/tensorflow_common/src/op/round.cpp @@ -2,11 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/round.hpp" + #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" using namespace std; -using namespace ov::opset8; +using namespace ov::op; namespace ov { namespace frontend { @@ -14,11 +15,13 @@ namespace tensorflow { namespace op { OutputVector translate_round_op(const NodeContext& node) { + default_op_checks(node, 1, {"Round", "ROUND"}); + auto input = node.get_input(0); // using default round mode "half_to_even" in openvino, // as TF has only that mode - auto round_mode = Round::RoundMode::HALF_TO_EVEN; - auto res = make_shared(input, round_mode); + auto round_mode = v5::Round::RoundMode::HALF_TO_EVEN; + auto res = make_shared(input, round_mode); set_node_name(node.get_name(), res); return res->outputs(); } diff --git a/src/frontends/tensorflow_common/src/op/square.cpp b/src/frontends/tensorflow_common/src/op/square.cpp index 94d9c4a623d1c7..1aacdf74926455 100644 --- a/src/frontends/tensorflow_common/src/op/square.cpp +++ b/src/frontends/tensorflow_common/src/op/square.cpp @@ -3,10 +3,10 @@ // #include "common_op_table.hpp" -#include "openvino/opsets/opset8.hpp" +#include "openvino/op/multiply.hpp" using namespace std; -using namespace ov::opset8; +using namespace ov::op; namespace ov { namespace frontend { @@ -14,8 +14,10 @@ namespace tensorflow { namespace op { OutputVector translate_square_op(const NodeContext& node) { + default_op_checks(node, 1, {"Square", "SQUARE"}); + auto n = node.get_input(0); - auto res = make_shared(n, n); + auto res = make_shared(n, n); set_node_name(node.get_name(), res); return res->outputs(); } diff --git a/src/frontends/tensorflow_common/src/op/unary_op.cpp b/src/frontends/tensorflow_common/src/op/unary_op.cpp index 16c279bd63e9d4..588d1863e3dda7 100644 --- a/src/frontends/tensorflow_common/src/op/unary_op.cpp +++ b/src/frontends/tensorflow_common/src/op/unary_op.cpp @@ -50,6 +50,8 @@ namespace op { OutputVector translate_unary_op(const NodeContext& op, const function(Output)>& create_unary_op) { + default_op_checks(op, 1, {}); + auto input = op.get_input(0); auto res = create_unary_op(input); set_node_name(op.get_name(), res); diff --git a/src/frontends/tensorflow_common/src/utils.cpp b/src/frontends/tensorflow_common/src/utils.cpp index adf736d3b2cf84..2d5293a095ea70 100644 --- a/src/frontends/tensorflow_common/src/utils.cpp +++ b/src/frontends/tensorflow_common/src/utils.cpp @@ -248,12 +248,17 @@ void default_op_checks(const frontend::NodeContext& node, const vector& supported_ops, bool supported_complex) { auto op_type = node.get_op_type(); - TENSORFLOW_OP_VALIDATION(node, - find(supported_ops.begin(), supported_ops.end(), op_type) != supported_ops.end(), - op_type + " is not supported for conversion."); - TENSORFLOW_OP_VALIDATION(node, - node.get_input_size() >= min_input_size, - op_type + " must have at least " + to_string(min_input_size) + " inputs."); + + // we can skip these checks if translator wrapper can be used for multiple operations + // check only if supported_ops is defined + if (supported_ops.size() > 0) { + TENSORFLOW_OP_VALIDATION(node, + find(supported_ops.begin(), supported_ops.end(), op_type) != supported_ops.end(), + op_type + " is not supported for conversion."); + TENSORFLOW_OP_VALIDATION(node, + node.get_input_size() >= min_input_size, + op_type + " must have at least " + to_string(min_input_size) + " inputs."); + } // check if it supports complex type in case complex type input bool has_input_complex_type = false;