From c27589fa751e461511bb3d762139456ad1e30e49 Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Wed, 17 Jul 2024 17:19:17 +0400 Subject: [PATCH] cleanup --- .../intel_gpu/plugin/program_builder.hpp | 1 - .../graph/graph_optimizer/prepare_padding.cpp | 113 ++++++++---------- .../graph/impls/ocl/kernel_selector_helper.h | 1 - .../intel_gpu/src/plugin/ops/gather tree.cpp | 29 +---- .../intel_gpu/src/plugin/ops/gather.cpp | 25 ---- .../intel_gpu/src/plugin/program_builder.cpp | 31 ----- .../fusions/fully_connected_fusion_test.cpp | 1 - .../tests/unit/passes/kernels_cache_test.cpp | 1 - .../unit/passes/post_optimize_weights.cpp | 1 - .../passes/prepare_buffer_fusing_test.cpp | 4 - .../passes/select_preferred_formats_test.cpp | 1 - .../unit/test_cases/convolution_gpu_test.cpp | 1 - .../test_cases/dynamic_quantize_gpu_test.cpp | 2 - .../unit/test_cases/eltwise_gpu_test.cpp | 5 +- .../test_cases/fully_connected_gpu_test.cpp | 2 - .../unit/test_cases/quantize_gpu_test.cpp | 1 - .../unit/test_cases/reorder_gpu_test.cpp | 1 - 17 files changed, 56 insertions(+), 164 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/program_builder.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/program_builder.hpp index e0b94070249524..2e094fa71f8771 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/program_builder.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/program_builder.hpp @@ -138,7 +138,6 @@ class ProgramBuilder final { void add_primitive(const ov::Node& op, std::shared_ptr prim, std::vector aliases = {}); - bool requires_new_shape_infer(const std::shared_ptr& op) const; bool is_inner_program() const { return m_is_inner_program; } bool is_query_mode() { return queryMode; } diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp index 0daffb74fdf0b5..6aeac050714a8e 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp @@ -13,6 +13,53 @@ using namespace cldnn; using namespace ov::intel_gpu; +namespace { + +template +padding convert_paddings(const padding& current_pad, const T& pad_begin, const T& pad_end, size_t spatial_rank) { + tensor::value_type pb_z = std::max(pad_begin.size() >= 3 ? pad_begin[pad_begin.size() - 3] : 0, 0); + tensor::value_type pb_y = std::max(pad_begin.size() >= 2 ? pad_begin[pad_begin.size() - 2] : 0, 0); + tensor::value_type pb_x = std::max(pad_begin.size() >= 1 ? pad_begin[pad_begin.size() - 1] : 0, 0); + + tensor::value_type pe_z = std::max(pad_end.size() >= 3 ? pad_end[pad_end.size() - 3] : 0, 0); + tensor::value_type pe_y = std::max(pad_end.size() >= 2 ? pad_end[pad_end.size() - 2] : 0, 0); + tensor::value_type pe_x = std::max(pad_end.size() >= 1 ? pad_end[pad_end.size() - 1] : 0, 0); + + const auto& lower_sizes = current_pad._lower_size; + const auto& upper_sizes = current_pad._upper_size; + + std::vector needed_lpad, needed_upad; + needed_lpad.push_back(lower_sizes[0]); + needed_lpad.push_back(lower_sizes[1]); + + needed_upad.push_back(upper_sizes[0]); + needed_upad.push_back(upper_sizes[1]); + if (spatial_rank == 3) { + needed_lpad.push_back(std::max(pb_z, lower_sizes[2])); + needed_lpad.push_back(std::max(pb_y, lower_sizes[3])); + needed_lpad.push_back(std::max(pb_x, lower_sizes[4])); + + needed_upad.push_back(std::max(pe_z, upper_sizes[2])); + needed_upad.push_back(std::max(pe_y, upper_sizes[3])); + needed_upad.push_back(std::max(pe_x, upper_sizes[4])); + } else if (spatial_rank == 2) { + needed_lpad.push_back(std::max(pb_y, lower_sizes[2])); + needed_lpad.push_back(std::max(pb_x, lower_sizes[3])); + + needed_upad.push_back(std::max(pe_y, upper_sizes[2])); + needed_upad.push_back(std::max(pe_x, upper_sizes[3])); + } else { + needed_lpad.push_back(std::max(pb_x, lower_sizes[2])); + needed_upad.push_back(std::max(pb_x, upper_sizes[2])); + } + + padding needed_padding(needed_lpad, needed_upad); + + return needed_padding; +} + +} // namespace + void prepare_padding::run(program& p) { if (output_size_handling_enabled) { // Prepare upper padding for primitives that support output_size parameter. @@ -66,43 +113,7 @@ void prepare_padding::run(program& p) { auto padding_begin = prim->padding_begin; auto padding_end = prim->padding_end; - tensor::value_type pb_z = std::max(padding_begin.size() >= 3 ? padding_begin[padding_begin.size() - 3] : 0, 0); - tensor::value_type pb_y = std::max(padding_begin.size() >= 2 ? padding_begin[padding_begin.size() - 2] : 0, 0); - tensor::value_type pb_x = std::max(padding_begin.size() >= 1 ? padding_begin[padding_begin.size() - 1] : 0, 0); - - tensor::value_type pe_z = std::max(padding_end.size() >= 3 ? padding_end[padding_end.size() - 3] : 0, 0); - tensor::value_type pe_y = std::max(padding_end.size() >= 2 ? padding_end[padding_end.size() - 2] : 0, 0); - tensor::value_type pe_x = std::max(padding_end.size() >= 1 ? padding_end[padding_end.size() - 1] : 0, 0); - - const auto& lower_sizes = in_layout.data_padding._lower_size; - const auto& upper_sizes = in_layout.data_padding._upper_size; - - std::vector needed_lpad, needed_upad; - needed_lpad.push_back(lower_sizes[0]); - needed_lpad.push_back(lower_sizes[1]); - - needed_upad.push_back(upper_sizes[0]); - needed_upad.push_back(upper_sizes[1]); - if (spatial_rank == 3) { - needed_lpad.push_back(std::max(pb_z, lower_sizes[2])); - needed_lpad.push_back(std::max(pb_y, lower_sizes[3])); - needed_lpad.push_back(std::max(pb_x, lower_sizes[4])); - - needed_upad.push_back(std::max(pe_z, upper_sizes[2])); - needed_upad.push_back(std::max(pe_y, upper_sizes[3])); - needed_upad.push_back(std::max(pe_x, upper_sizes[4])); - } else if (spatial_rank == 2) { - needed_lpad.push_back(std::max(pb_y, lower_sizes[2])); - needed_lpad.push_back(std::max(pb_x, lower_sizes[3])); - - needed_upad.push_back(std::max(pe_y, upper_sizes[2])); - needed_upad.push_back(std::max(pe_x, upper_sizes[3])); - } else { - needed_lpad.push_back(std::max(pb_x, lower_sizes[2])); - needed_upad.push_back(std::max(pb_x, upper_sizes[2])); - } - - padding needed_padding(needed_lpad, needed_upad); + auto needed_padding = convert_paddings(in_layout.data_padding, padding_begin, padding_end, spatial_rank); add_required_padding(prim_node, needed_padding); } else if (node->is_type()) { @@ -131,33 +142,9 @@ void prepare_padding::run(program& p) { auto padding_begin = prim->pads_begin; auto padding_end = prim->pads_end; - tensor::value_type pb_z = std::max(padding_begin.size() >= 3 ? padding_begin[padding_begin.size() - 3] : 0, 0); - tensor::value_type pb_y = std::max(padding_begin.size() >= 2 ? padding_begin[padding_begin.size() - 2] : 0, 0); - tensor::value_type pb_x = std::max(padding_begin.size() >= 1 ? padding_begin[padding_begin.size() - 1] : 0, 0); - - tensor::value_type pe_z = std::max(padding_end.size() >= 3 ? padding_end[padding_end.size() - 3] : 0, 0); - tensor::value_type pe_y = std::max(padding_end.size() >= 2 ? padding_end[padding_end.size() - 2] : 0, 0); - tensor::value_type pe_x = std::max(padding_end.size() >= 1 ? padding_end[padding_end.size() - 1] : 0, 0); - - tensor pad_l = tensor(0); - tensor pad_u = tensor(0); - pad_l.spatial[0] = pb_x; - pad_l.spatial[1] = pb_y; - pad_l.spatial[2] = pb_z; - - pad_u.spatial[0] = pe_x; - pad_u.spatial[1] = pe_y; - pad_u.spatial[2] = pe_z; - auto in_layout = prim_node.get_input_layout(); - - const auto& actual_lpad = in_layout.data_padding.lower_size(); - const auto& actual_upad = in_layout.data_padding.upper_size(); - - auto needed_lpad = tensor::max(pad_l, actual_lpad); - auto needed_upad = tensor::max(pad_u, actual_upad); - - padding needed_padding(needed_lpad.sizes(), needed_upad.sizes()); + const auto spatial_rank = in_layout.get_spatial_rank(); + auto needed_padding = convert_paddings(in_layout.data_padding, padding_begin, padding_end, spatial_rank); add_required_padding(prim_node, needed_padding); } diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.h b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.h index 4229cd57128ebb..e63cf59a6f36c8 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.h +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kernel_selector_helper.h @@ -117,7 +117,6 @@ kernel_selector::dim_tensor convert_dim_vector(const tensor& t) { static_cast(sizes[5])}; } - inline kernel_selector::DimTensor convert_vec_to_dim_tensor(const std::vector& p, size_t out_rank, int32_t default_value) { auto sizes = p; auto format = cldnn::format::get_default_format(out_rank); diff --git a/src/plugins/intel_gpu/src/plugin/ops/gather tree.cpp b/src/plugins/intel_gpu/src/plugin/ops/gather tree.cpp index 1c81d311975916..2f1e7faf0b9de3 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/gather tree.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/gather tree.cpp @@ -18,32 +18,11 @@ static void CreateGatherTreeOp(ProgramBuilder& p, const std::shared_ptr reordered_inputs; - reordered_inputs.resize(inputs.size()); - - for (size_t portIndex = 0; portIndex < inputs.size(); portIndex++) { - auto inputDataType = cldnn::element_type_to_data_type(op->get_input_element_type(portIndex)); - if (inputDataType == cldnn::data_types::i64) { - // GPU primitive does not support i64 inputs, - // so we need additional reorders to convert them to i32 - auto reorderPrimName = inputs[portIndex].pid + "_" + op->get_friendly_name() + ProgramBuilder::m_preProcessTag; - auto targetFormat = cldnn::format::get_default_format(op->get_input_shape(portIndex).size()); - auto preprocessPrim = cldnn::reorder(reorderPrimName, - inputs[portIndex], - targetFormat, - cldnn::data_types::i32); - p.add_primitive(*op, preprocessPrim); - reordered_inputs[portIndex] = cldnn::input_info(reorderPrimName); - } else { - reordered_inputs[portIndex] = inputs[portIndex]; - } - } - auto gatherTreePrim = cldnn::gather_tree(layerName, - reordered_inputs[0], - reordered_inputs[1], - reordered_inputs[2], - reordered_inputs[3]); + inputs[0], + inputs[1], + inputs[2], + inputs[3]); p.add_primitive(*op, gatherTreePrim); } diff --git a/src/plugins/intel_gpu/src/plugin/ops/gather.cpp b/src/plugins/intel_gpu/src/plugin/ops/gather.cpp index 31458e54f760c5..01924850e6e07b 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/gather.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/gather.cpp @@ -39,7 +39,6 @@ void CreateGatherOpBase(ProgramBuilder& p, const std::shared_ptr& op, const i ov::Shape out_shape = is_static ? op->get_output_shape(0) : ov::Shape{}; // Update output_shape in case of scalar indice - bool need_reshape = false; auto out_shape_original = out_shape; // WA for NMS->Gather construction. NMS fills part of the output blob by the -1 if these values @@ -55,9 +54,6 @@ void CreateGatherOpBase(ProgramBuilder& p, const std::shared_ptr& op, const i // Set layer name for Gather auto reshapeName = layerName + ""; - if (need_reshape) { - layerName = layerName + "_reshape_output"; - } // Check if Gather could be converted to other primitive const auto input_shape = op->get_input_partial_shape(0); @@ -140,27 +136,6 @@ void CreateGatherOpBase(ProgramBuilder& p, const std::shared_ptr& op, const i p.add_primitive(*op, gatherPrim); } } - - // Add reorder and reshape for scalar indice - if (need_reshape) { - auto input = inputs[0]; - input.pid = layerName; - - auto targetFormat = cldnn::format::get_default_format(out_shape_original.size()); - if (targetFormat.value != cldnn::format::get_default_format(out_shape.size()).value) { - auto reorderName = layerName + "_cldnn_in_reorder"; - auto targetDatatype = cldnn::element_type_to_data_type(op->get_input_element_type(0)); - auto reorderPrim = cldnn::reorder(reorderName, - input, - targetFormat, - targetDatatype); - p.add_primitive(*op, reorderPrim); - input.pid = reorderName; - } - - auto reshapePrim = cldnn::reshape(reshapeName, input, tensor_from_dims(out_shape_original)); - p.add_primitive(*op, reshapePrim); - } } static void CreateGatherOp(ProgramBuilder& p, const std::shared_ptr& op) { diff --git a/src/plugins/intel_gpu/src/plugin/program_builder.cpp b/src/plugins/intel_gpu/src/plugin/program_builder.cpp index 263a7b92c0c771..1a6c1ebcbb4027 100644 --- a/src/plugins/intel_gpu/src/plugin/program_builder.cpp +++ b/src/plugins/intel_gpu/src/plugin/program_builder.cpp @@ -315,37 +315,6 @@ void ProgramBuilder::add_primitive(const ov::Node& op, std::shared_ptradd_primitive(prim); } -bool ProgramBuilder::requires_new_shape_infer(const std::shared_ptr& op) const { - if (op->is_dynamic()) { - return true; - } - - if (ov::is_type(op)) { - const auto body_function = std::static_pointer_cast(op)->get_function(); - if (body_function->is_dynamic()) - return true; - } - // When input node has dynamic shape with 4 dimension, this function return false - // because op.is_dynamic() which only checks input shapes return false. - // So, in the case of input data, we need to check output shape. - for (size_t i = 0; i < op->get_output_size(); i++) { - if (op->get_output_partial_shape(i).is_dynamic()) - return true; - } - - for (size_t i = 0; i < op->get_output_size(); i++) { - if (op->get_output_partial_shape(i).size() > 6) - return true; - } - - for (size_t i = 0; i < op->get_input_size(); i++) { - if (op->get_input_partial_shape(i).size() > 6) - return true; - } - - return false; -} - int64_t ProgramBuilder::get_parameter_index(const std::shared_ptr& parameter) const { return m_model->get_parameter_index(parameter); } diff --git a/src/plugins/intel_gpu/tests/unit/fusions/fully_connected_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/fusions/fully_connected_fusion_test.cpp index fecd9ec941c2a2..1adeaa41615eda 100644 --- a/src/plugins/intel_gpu/tests/unit/fusions/fully_connected_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/fusions/fully_connected_fusion_test.cpp @@ -614,7 +614,6 @@ TEST_P(fc_fp16_eltwise_prod_unfused_dynamic, basic) { ); bool is_dynamic = true; - cfg_not_fused.set_property(ov::intel_gpu::allow_new_shape_infer(is_dynamic)); tolerance = 1e-2f; execute(p, false, is_dynamic); } diff --git a/src/plugins/intel_gpu/tests/unit/passes/kernels_cache_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/kernels_cache_test.cpp index 07ef9381c62ab2..1d29dc56ce5665 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/kernels_cache_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/kernels_cache_test.cpp @@ -164,7 +164,6 @@ TEST(kernels_cache, reuse_kernels_property) { reorder("output", input_info("sum"), {{3, 2}, data_types::f16, format::bfyx})); ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); config.set_property(ov::intel_gpu::hint::enable_kernels_reuse(true)); auto prog = program::build_program(engine, topology, config, false, false); auto& cache = prog->get_kernels_cache(); diff --git a/src/plugins/intel_gpu/tests/unit/passes/post_optimize_weights.cpp b/src/plugins/intel_gpu/tests/unit/passes/post_optimize_weights.cpp index 25ee021e95d3bc..2b18676421ba92 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/post_optimize_weights.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/post_optimize_weights.cpp @@ -265,7 +265,6 @@ TEST(post_optimize_weights, onednn_group_conv_weights_reorder_test) { ExecutionConfig config = get_test_default_config(engine); config.set_property(ov::intel_gpu::optimize_data(true)); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); if (engine.get_device_info().supports_immad) { ov::intel_gpu::ImplementationDesc conv_impl = { format::b_fs_yx_fsv16, std::string(""), impl_types::onednn }; diff --git a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp index 04f9d330be1ac5..8a244edb339c85 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/prepare_buffer_fusing_test.cpp @@ -834,7 +834,6 @@ TEST(prepare_buffer_fusing, in_place_crop_dynamic_reshape_unsqueeze) { ); auto config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); config.set_property(ov::intel_gpu::optimize_data(true)); network network(engine, topology, config); @@ -904,7 +903,6 @@ TEST(prepare_buffer_fusing, in_place_crop_dynamic_reshape_squeeze_crop_axis) { ); auto config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); config.set_property(ov::intel_gpu::optimize_data(true)); network network(engine, topology, config); @@ -986,7 +984,6 @@ TEST(prepare_buffer_fusing, in_place_crop_dynamic_split_lengths) { ); auto config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); config.set_property(ov::intel_gpu::optimize_data(true)); network network(engine, topology, config); @@ -1068,7 +1065,6 @@ TEST(prepare_buffer_fusing, in_place_crop_dynamic_mvn) { ); auto config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); config.set_property(ov::intel_gpu::optimize_data(true)); network network(engine, topology, config); diff --git a/src/plugins/intel_gpu/tests/unit/passes/select_preferred_formats_test.cpp b/src/plugins/intel_gpu/tests/unit/passes/select_preferred_formats_test.cpp index eed75e26234040..bda86a2053f533 100644 --- a/src/plugins/intel_gpu/tests/unit/passes/select_preferred_formats_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/passes/select_preferred_formats_test.cpp @@ -83,7 +83,6 @@ TEST(test_select_preferred_formats, fsv2_fallback_to_byxf) { topology.add(convolution("conv1", input_info("reorder"), "weights", "", 2, {1, 1}, {1, 1}, {2, 1}, {0, 1}, true)); ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); ov::intel_gpu::ImplementationDesc impl = { format::b_fs_yx_fsv16, std::string(""), impl_types::onednn }; config.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ {"conv1", impl} })); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp index 98ce1a4f5291c4..8dc8d9d3488d26 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp @@ -4673,7 +4673,6 @@ TEST(convolution_int8_fw_gpu, quantized_convolution_u8s8f32_asymmetric_activatio reorder("out", input_info("conv"), format::bfyx, data_types::f32)); ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); ov::intel_gpu::ImplementationDesc conv_impl; if (engine.get_device_info().supports_immad) { conv_impl = { format::b_fs_yx_fsv16, "", impl_types::onednn }; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp index c1686e359e91a0..6075fabf20158c 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp @@ -62,7 +62,6 @@ class dynamic_quantization_gpu_tests: public ::testing::Test { ); auto config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); config.set_property(ov::intel_gpu::optimize_data(true)); ov::intel_gpu::ImplementationDesc dyn_quan_impl_desc = { format::bfyx, "dynamic_quantize_gpu_ref", impl_types::ocl }; @@ -87,7 +86,6 @@ class dynamic_quantization_gpu_tests: public ::testing::Test { ); auto config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); config.set_property(ov::intel_gpu::optimize_data(true)); network::ptr network = get_network(engine, topology, config, get_test_stream_ptr(), is_caching_test); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp index d94fe0dd7c05fe..85f971a448e637 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/eltwise_gpu_test.cpp @@ -249,7 +249,7 @@ void generic_eltwise_int_test(cldnn::format test_input_fmt, int input2_max_val) { static_assert(std::is_integral::value, "T must be an integral type"); static_assert(std::is_integral::value, "TOut must be an integral type"); - + tests::random_generator rg(GET_SUITE_NAME); VVVVF input1_rnd = rg.generate_random_4d(input_b, input_f, input_y, input_x, input1_min_val, input1_max_val); @@ -299,7 +299,7 @@ void generic_eltwise_int_test(cldnn::format test_input_fmt, bool test_is_correct = true; VF output_cpu_vec = flatten_4d(test_input_fmt, output_cpu); for (size_t i = 0; i < output_cpu_vec.size(); ++i) { - const TOut cpu_val = output_cpu_vec[i]; + const TOut cpu_val = output_cpu_vec[i]; const TOut gpu_val = output_ptr[i]; if (cpu_val != gpu_val) { test_is_correct = false; @@ -1680,7 +1680,6 @@ TEST(eltwise_gpu_f32, dynamic_padding) { 0.5f, 2.5f }); ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); // config.set_property(ov::intel_gpu::optimize_data(true)); network network(engine, topology, config); network.set_input_data("input1", input1); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp index 17a9cacd213376..be9aeeed651d52 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/fully_connected_gpu_test.cpp @@ -2574,7 +2574,6 @@ class fully_connected_gpu_tests: public ::testing::Test { ); auto config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); ov::intel_gpu::ImplementationDesc fc_impl_desc = { format::bfyx, "fully_connected_gpu_bf_tiled", impl_types::ocl }; config.set_property(ov::intel_gpu::force_implementations(ov::intel_gpu::ImplForcingMap{ {"fc_prim", fc_impl_desc} })); config.set_property(ov::hint::dynamic_quantization_group_size(0)); @@ -2600,7 +2599,6 @@ class fully_connected_gpu_tests: public ::testing::Test { ); auto config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); config.set_property(ov::intel_gpu::optimize_data(true)); config.set_property(ov::hint::dynamic_quantization_group_size(quantize_group_size)); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/quantize_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/quantize_gpu_test.cpp index da396e83d01040..560f4f90812991 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/quantize_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/quantize_gpu_test.cpp @@ -827,7 +827,6 @@ TEST(quantize_gpu, dynamic_fsv16) { ); ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); network network(engine, topology, config); network.set_input_data("input", input); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp index 1400229f67831e..7356e974b10ee0 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/reorder_gpu_test.cpp @@ -1553,7 +1553,6 @@ TEST(reorder_gpu_f32, dynamic_bfyx_to_fsv16) { reorder("output_reorder", input_info("relu"), format::bfyx, data_types::f32)); ExecutionConfig config = get_test_default_config(engine); - config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); network network(engine, topology, config); auto fsv16_reorder_inst = network.get_primitive("reorder");