Skip to content

Commit

Permalink
[core] Preserve friendly name and tensor names in PPP (#23713)
Browse files Browse the repository at this point in the history
### Details:
- For models with `version > 10` the node's friendly name and tensor's
names will be not moved from original node. If conversion node added by
PPP or convert precision then new friendly name will be created based on
previous node. The new node name will have format
`[previous_node_name].[port_number]`.
- Align `ConvertPrecision` transformation to with PPP
- The issue with lost tensor names when set them on model's
inputss/outputs has been solved in #25954
- For model version 10 the old behavior is preserved as legacy
compatibility mode

### Tickets:
 - [CVS-127482](https://jira.devtools.intel.com/browse/CVS-127482)

### Depends on:
- #25954 - implements tensor names handling for model outputs (should be
used here)

---------

Signed-off-by: Raasz, Pawel <[email protected]>
Co-authored-by: Michal Lukaszewski <[email protected]>
Co-authored-by: Anastasia Kuporosova <[email protected]>
  • Loading branch information
3 people authored Dec 13, 2024
1 parent c21bbee commit 2796330
Show file tree
Hide file tree
Showing 8 changed files with 173 additions and 55 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ static void regclass_graph_PreProcessSteps(py::module m) {
:param pads_end: Number of elements matches the number of indices in data attribute. Specifies the number of padding elements at the ending of each axis.
:type pads_end: 1D tensor of type T_INT.
:param value: All new elements are populated with this value or with 0 if input not provided. Shouldn’t be set for other pad_mode values.
:type value: scalar tensor of type T.
:type value: scalar tensor of type T.
:param mode: pad_mode specifies the method used to generate new element values.
:type mode: string
:return: Reference to itself, allows chaining of calls in client's code in a builder-like manner.
Expand Down Expand Up @@ -219,7 +219,7 @@ static void regclass_graph_PreProcessSteps(py::module m) {
:param pads_end: Number of elements matches the number of indices in data attribute. Specifies the number of padding elements at the ending of each axis.
:type pads_end: 1D tensor of type T_INT.
:param value: All new elements are populated with this value or with 0 if input not provided. Shouldn’t be set for other pad_mode values.
:type value: scalar tensor of type T.
:type value: scalar tensor of type T.
:param mode: pad_mode specifies the method used to generate new element values.
:type mode: string
:return: Reference to itself, allows chaining of calls in client's code in a builder-like manner.
Expand Down Expand Up @@ -308,7 +308,8 @@ static void regclass_graph_InputTensorInfo(py::module m) {
},
py::arg("layout"),
R"(
Set layout for input tensor info
Set layout for input tensor info
:param layout: layout to be set
:type layout: Union[str, openvino.runtime.Layout]
)");
Expand Down Expand Up @@ -422,7 +423,8 @@ static void regclass_graph_OutputTensorInfo(py::module m) {
},
py::arg("layout"),
R"(
Set layout for output tensor info
Set layout for output tensor info
:param layout: layout to be set
:type layout: Union[str, openvino.runtime.Layout]
)");
Expand Down Expand Up @@ -475,7 +477,8 @@ static void regclass_graph_OutputModelInfo(py::module m) {
},
py::arg("layout"),
R"(
Set layout for output model info
Set layout for output model info
:param layout: layout to be set
:type layout: Union[str, openvino.runtime.Layout]
)");
Expand Down
3 changes: 2 additions & 1 deletion src/bindings/python/tests/test_runtime/test_input_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,8 @@ def test_input_get_source_output(device):
net_input = compiled_model.output(0)
input_node = net_input.get_node().inputs()[0]
name = input_node.get_source_output().get_node().get_friendly_name()
assert name == "relu"
# Expected ReLu node name can be changed if conversion precision applied (new Convert node added)
assert name in ("relu", "relu.0")


def test_input_get_tensor(device):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,8 @@ bool convert_function_precision(const std::shared_ptr<Model>& f,
bool is_changed,
bool is_subgraph,
bool convert_input_output_precision,
bool store_original_precision_as_rt_attribute) {
bool store_original_precision_as_rt_attribute,
bool names_compatibility_mode) {
bool is_output_precision_changed = false;

ov::element::TypeVector orig_result_types;
Expand Down Expand Up @@ -277,7 +278,8 @@ bool convert_function_precision(const std::shared_ptr<Model>& f,
is_changed || is_output_precision_changed,
true,
true,
store_original_precision_as_rt_attribute) ||
store_original_precision_as_rt_attribute,
names_compatibility_mode) ||
is_changed;
}
}
Expand Down Expand Up @@ -325,18 +327,21 @@ bool convert_function_precision(const std::shared_ptr<Model>& f,
if (result->get_input_element_type(0) != orig_result_types[i]) {
auto result_input = result->input_value(0);
const auto convert = std::make_shared<ov::op::v0::Convert>(result_input, orig_result_types[i]);
if (result_input.get_node()->get_output_size() > 1) {
convert->set_friendly_name(result_input.get_node()->get_friendly_name() + "." +
std::to_string(result_input.get_index()));

auto convert_f_name = result_input.get_node()->get_friendly_name();
if (names_compatibility_mode) {
if (result_input.get_node()->get_output_size() > 1) {
convert_f_name += '.' + std::to_string(result_input.get_index());
} else {
result_input.get_node()->set_friendly_name("");
}

convert->get_output_tensor(0).set_names(result_input.get_names());
} else {
convert->set_friendly_name(result_input.get_node()->get_friendly_name());
result_input.get_node()->set_friendly_name("");
convert_f_name += '.' + std::to_string(result_input.get_index());
}
convert->set_friendly_name(convert_f_name);

auto& convert_output_tensor = convert->get_output_tensor(0);
convert_output_tensor.set_names(result_input.get_names());

result_input.set_names({});
result->input(0).replace_source_output(convert->output(0));
result->revalidate_and_infer_types();
}
Expand All @@ -359,6 +364,8 @@ bool convert_precision(ov::pass::PassBase& pass,
// changing precision we need to understand which Constant consumers belongs
// to the current ov::Model
std::unordered_map<const ov::Node*, std::vector<Input<Node>>> const_to_internal_output;

const auto names_compatibility_mode = f->has_rt_info("version") && f->get_rt_info<int64_t>("version") < 11;
return convert_function_precision(f,
type_to_fuse,
type_to_extend,
Expand All @@ -369,7 +376,8 @@ bool convert_precision(ov::pass::PassBase& pass,
false,
false,
convert_input_output_precision,
store_original_precision_as_rt_attribute);
store_original_precision_as_rt_attribute,
names_compatibility_mode);
}

using precisions_set_t = std::unordered_set<ov::element::Type_t, EnumClassHash>;
Expand Down
21 changes: 15 additions & 6 deletions src/common/transformations/tests/utils/convert_precision.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2197,8 +2197,9 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsForParameterAndResult)
auto param_1 = make_shared<opset10::Parameter>(element::f64, Shape{3});
auto converted_param = make_shared<opset10::Convert>(param_1, element::f32);
auto sin = make_shared<opset10::Sin>(converted_param);
sin->get_output_tensor(0).add_names({"sine:0"});
auto converted_sin = make_shared<opset10::Convert>(sin, element::f64);
converted_sin->get_output_tensor(0).add_names({"sine:0"});
converted_sin->set_friendly_name("sine.0");
auto result_sin = make_shared<opset10::Result>(converted_sin);
model_ref = make_shared<Model>(result_sin, ParameterVector{param_1});
}
Expand All @@ -2208,7 +2209,7 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsForParameterAndResult)
ASSERT_TRUE(result.valid) << result.message;

const auto& results = model->get_results();
ASSERT_EQ("sine", results[0]->get_input_node_ptr(0)->get_friendly_name());
ASSERT_EQ("sine.0", results[0]->get_input_node_ptr(0)->get_friendly_name());
}

TEST(TransformationTests, ConvertPrecisionExplicitConvertsMultiParam) {
Expand Down Expand Up @@ -2272,8 +2273,8 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsMultiParam) {
auto converted_mul = make_shared<opset10::Convert>(mul, element::f64);
auto sin = make_shared<opset10::Sin>(convert_1);

converted_add->get_output_tensor(0).add_names({"add:0"});
converted_mul->get_output_tensor(0).add_names({"mul:0"});
add->get_output_tensor(0).add_names({"add:0"});
mul->get_output_tensor(0).add_names({"mul:0"});
sin->get_output_tensor(0).add_names({"sine:0"});

auto result_add = make_shared<opset10::Result>(converted_add);
Expand All @@ -2289,8 +2290,8 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsMultiParam) {
ASSERT_TRUE(result.valid) << result.message;

const auto& results = model->get_results();
ASSERT_EQ("add", results[0]->get_input_node_ptr(0)->get_friendly_name());
ASSERT_EQ("mul", results[1]->get_input_node_ptr(0)->get_friendly_name());
ASSERT_EQ("add.0", results[0]->get_input_node_ptr(0)->get_friendly_name());
ASSERT_EQ("mul.0", results[1]->get_input_node_ptr(0)->get_friendly_name());
ASSERT_EQ("sine", results[2]->get_input_node_ptr(0)->get_friendly_name());
}

Expand All @@ -2306,6 +2307,8 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsSingleNodeMultipleOutp
split->get_output_tensor(1).add_names({"split:1"});
split->get_output_tensor(2).add_names({"split:2"});
model = make_shared<Model>(split->outputs(), ParameterVector{param_1});
// set version 10 to use names compatibility mode
model->get_rt_info()["version"] = static_cast<int64_t>(10);

type_to_fuse_map empty_type_to_fuse_map = {};
bool keep_precision_sensitive_in_fp32 = false;
Expand All @@ -2322,6 +2325,9 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsSingleNodeMultipleOutp
auto convert_1 = make_shared<opset10::Convert>(param_1, element::f32);
auto axis = opset10::Constant::create(element::i32, Shape{}, {0});
auto split = make_shared<opset10::Split>(convert_1, axis, 3);
split->get_output_tensor(0).add_names({"split:0"});
split->get_output_tensor(1).add_names({"split:1"});
split->get_output_tensor(2).add_names({"split:2"});

auto convert_split_0 = make_shared<opset10::Convert>(split->output(0), element::f64);
auto convert_split_1 = make_shared<opset10::Convert>(split->output(1), element::f64);
Expand Down Expand Up @@ -2390,6 +2396,8 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsMultiSubgraphs) {
result.get_node()->set_friendly_name("if_result");
result.add_names({"if_result:0"});
model = make_shared<Model>(OutputVector{result}, ParameterVector{cond, param_1, param_2});
// set version 10 to use names compatibility mode
model->get_rt_info()["version"] = static_cast<int64_t>(10);

type_to_fuse_map empty_type_to_fuse_map = {};
bool keep_precision_sensitive_in_fp32 = false;
Expand Down Expand Up @@ -2443,6 +2451,7 @@ TEST(TransformationTests, ConvertPrecisionExplicitConvertsMultiSubgraphs) {
if_op->set_input(convert_1, param_1_then, param_1_else);
if_op->set_input(convert_2, param_2_then, param_2_else);
auto result = if_op->set_output(result_then, result_else);
result.add_names({"if_result:0"});
auto converted_result = make_shared<opset10::Convert>(result, element::f64);
converted_result->get_output_tensor(0).add_names({"if_result:0"});

Expand Down
5 changes: 5 additions & 0 deletions src/core/src/preprocess/pre_post_process.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,10 @@ struct PrePostProcessor::PrePostProcessorImpl {
PrePostProcessorImpl() = default;
explicit PrePostProcessorImpl(const std::shared_ptr<ov::Model>& f) : m_function(f) {
OPENVINO_ASSERT(f, "Model can't be nullptr for PrePostProcessor");

// if IR version < 11, set compatibility mode
const auto names_mode = m_function->has_rt_info("version") && m_function->get_rt_info<int64_t>("version") < 11;

for (size_t i = 0; i < m_function->inputs().size(); ++i) {
auto info = InputInfo();
info.m_impl->m_resolved_param = m_function->get_parameters()[i];
Expand All @@ -64,6 +68,7 @@ struct PrePostProcessor::PrePostProcessorImpl {
for (size_t i = 0; i < m_function->outputs().size(); ++i) {
auto info = OutputInfo();
info.m_impl->m_output_node = m_function->output(i);
info.m_impl->get_tensor_data()->set_names_compatibility_mode(names_mode);
m_outputs.push_back(std::move(info));
}
}
Expand Down
50 changes: 30 additions & 20 deletions src/core/src/preprocess/preprocess_impls.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -370,30 +370,40 @@ void OutputInfo::OutputInfoImpl::build(ov::ResultVector& results) {
}

auto orig_parent = result->get_input_source_output(0).get_node_shared_ptr();
// Move result tensor names from previous input to new
const auto result_input_names = result->get_input_tensor(0).get_names();
result->get_input_tensor(0).set_names({});
node.get_tensor().set_names(result_input_names);

if (!post_processing_applied) {
return;
}

if (orig_parent->get_output_size() == 1) {
node.get_node_shared_ptr()->set_friendly_name(orig_parent->get_friendly_name());
if (get_tensor_data()->get_names_compatibility_mode()) {
// Move result tensor names from previous input to new
const auto result_input_names = result->get_input_tensor(0).get_names();
result->get_input_tensor(0).set_names({});
node.get_tensor().set_names(result_input_names);

if (!post_processing_applied) {
return;
}

// Reset friendly name of input node to avoid names collision
// when there is at a new node inserted by post-processing steps
// If no new nodes are inserted by post-processing, then we need to preserve friendly name of input
// as it's required for old API correct work
result->get_input_source_output(0).get_node_shared_ptr()->set_friendly_name("");
if (orig_parent->get_output_size() == 1) {
node.get_node_shared_ptr()->set_friendly_name(orig_parent->get_friendly_name());

// Reset friendly name of input node to avoid names collision
// when there is at a new node inserted by post-processing steps
// If no new nodes are inserted by post-processing, then we need to preserve friendly name of input
// as it's required for old API correct work
result->get_input_source_output(0).get_node_shared_ptr()->set_friendly_name("");
} else if (node.get_node_shared_ptr() != orig_parent) {
// Result node is changed - add ".<idx>" suffix
node.get_node_shared_ptr()->set_friendly_name(
orig_parent->get_friendly_name() + "." +
std::to_string(result->get_input_source_output(0).get_index()));
}
result->input(0).replace_source_output(node);
result->revalidate_and_infer_types();
} else if (node.get_node_shared_ptr() != orig_parent) {
// Result node is changed - add ".<idx>" suffix
node.get_node_shared_ptr()->set_friendly_name(orig_parent->get_friendly_name() + "." +
std::to_string(result->get_input_source_output(0).get_index()));
const auto suffix = std::string(".") + std::to_string(result->get_input_source_output(0).get_index());
node.get_node_shared_ptr()->set_friendly_name(orig_parent->get_friendly_name() + suffix);

result->input(0).replace_source_output(node);
result->revalidate_and_infer_types();
}
result->input(0).replace_source_output(node);
result->revalidate_and_infer_types();

// Update layout
if (!context.layout().empty()) {
Expand Down
9 changes: 9 additions & 0 deletions src/core/src/preprocess/preprocess_impls.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,12 +122,21 @@ class TensorInfoImplBase {
return m_layout;
}

void set_names_compatibility_mode(const bool compatiblity_mode) {
m_names_compatiblity_mode = compatiblity_mode;
}

const bool get_names_compatibility_mode() const {
return m_names_compatiblity_mode;
}

protected:
element::Type m_type = element::dynamic;
bool m_type_set = false;

Layout m_layout = Layout();
bool m_layout_set = false;
bool m_names_compatiblity_mode = false;
};

class OutputTensorInfo::OutputTensorInfoImpl : public TensorInfoImplBase {};
Expand Down
Loading

0 comments on commit 2796330

Please sign in to comment.