diff --git a/modules/custom_operations/user_ie_extensions/tokenizer/python/tests/tokenizers_test.py b/modules/custom_operations/user_ie_extensions/tokenizer/python/tests/tokenizers_test.py index 7828e5c3f..332a6c5a2 100644 --- a/modules/custom_operations/user_ie_extensions/tokenizer/python/tests/tokenizers_test.py +++ b/modules/custom_operations/user_ie_extensions/tokenizer/python/tests/tokenizers_test.py @@ -249,8 +249,6 @@ def test_sentencepiece_model_detokenizer(sentencepice_tokenizers, test_string): token_ids = hf_tokenizer(test_string, return_tensors="np").input_ids hf_output = hf_tokenizer.batch_decode(token_ids, skip_special_tokens=True) - print('--- HERE 1 ---') - print(token_ids.shape) ov_output = unpack_strings(ov_detokenizer(token_ids.astype("int32"))["string_output"]) assert ov_output == hf_output