From 9d16441e4fe4565a3e70522706a59b7085424f1d Mon Sep 17 00:00:00 2001 From: kjohew <33981793+kjohew@users.noreply.github.com> Date: Wed, 20 Nov 2024 18:25:37 +0800 Subject: [PATCH 01/36] Fix the memory usage issue of logits in generate() (#34813) --- src/transformers/generation/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 432b3142873..e3657550d0e 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -3246,7 +3246,7 @@ def _sample( # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration # (the clone itself is always small) - next_token_logits = outputs.logits.clone()[:, -1, :].float() + next_token_logits = outputs.logits[:, -1, :].clone().float() next_token_logits = next_token_logits.to(input_ids.device) # pre-process distribution From 8cadf76e1c72eabbff24099c5c0a2a98edbb00ef Mon Sep 17 00:00:00 2001 From: Phillip Kuznetsov Date: Wed, 20 Nov 2024 02:31:21 -0800 Subject: [PATCH 02/36] fix(DPT,Depth-Anything) `torch.export` (#34103) * Fix torch.export issue in dpt based models Signed-off-by: Phillip Kuznetsov * Simplify the if statements Signed-off-by: Phillip Kuznetsov * Move activation definitions of zoe_depth to init() Signed-off-by: Phillip Kuznetsov * Add test_export for dpt and zoedepth Signed-off-by: Phillip Kuznetsov * add depth anything Signed-off-by: Phillip Kuznetsov * Remove zoedepth non-automated zoedepth changes and zoedepth test Signed-off-by: Phillip Kuznetsov * [run_slow] dpt, depth_anything, zoedepth Signed-off-by: Phillip Kuznetsov --------- Signed-off-by: Phillip Kuznetsov --- .../depth_anything/modeling_depth_anything.py | 16 +++++------ src/transformers/models/dpt/modeling_dpt.py | 13 +++++---- .../models/zoedepth/modeling_zoedepth.py | 13 +++++---- .../test_modeling_depth_anything.py | 28 +++++++++++++++++++ tests/models/dpt/test_modeling_dpt.py | 22 +++++++++++++++ 5 files changed, 72 insertions(+), 20 deletions(-) diff --git a/src/transformers/models/depth_anything/modeling_depth_anything.py b/src/transformers/models/depth_anything/modeling_depth_anything.py index 59c62878632..4667c413457 100644 --- a/src/transformers/models/depth_anything/modeling_depth_anything.py +++ b/src/transformers/models/depth_anything/modeling_depth_anything.py @@ -224,16 +224,16 @@ def forward(self, hidden_states, size=None): hidden_states = hidden_states[::-1] fused_hidden_states = [] - # first layer only uses the last hidden_state - size = hidden_states[1].shape[2:] - fused_hidden_state = self.layers[0](hidden_states[0], size=size) - fused_hidden_states.append(fused_hidden_state) + fused_hidden_state = None - # looping from the last layer to the second - for idx, (hidden_state, layer) in enumerate(zip(hidden_states[1:], self.layers[1:])): - size = hidden_states[1:][idx + 1].shape[2:] if idx != (len(hidden_states[1:]) - 1) else None + for idx, (hidden_state, layer) in enumerate(zip(hidden_states, self.layers)): + size = hidden_states[idx + 1].shape[2:] if idx != (len(hidden_states) - 1) else None - fused_hidden_state = layer(fused_hidden_state, hidden_state, size=size) + if fused_hidden_state is None: + # first layer only uses the last hidden_state + fused_hidden_state = layer(hidden_state, size=size) + else: + fused_hidden_state = layer(fused_hidden_state, hidden_state, size=size) fused_hidden_states.append(fused_hidden_state) diff --git a/src/transformers/models/dpt/modeling_dpt.py b/src/transformers/models/dpt/modeling_dpt.py index 2d4654a234c..5886d288b88 100755 --- a/src/transformers/models/dpt/modeling_dpt.py +++ b/src/transformers/models/dpt/modeling_dpt.py @@ -689,12 +689,13 @@ def forward(self, hidden_states): hidden_states = hidden_states[::-1] fused_hidden_states = [] - # first layer only uses the last hidden_state - fused_hidden_state = self.layers[0](hidden_states[0]) - fused_hidden_states.append(fused_hidden_state) - # looping from the last layer to the second - for hidden_state, layer in zip(hidden_states[1:], self.layers[1:]): - fused_hidden_state = layer(fused_hidden_state, hidden_state) + fused_hidden_state = None + for hidden_state, layer in zip(hidden_states, self.layers): + if fused_hidden_state is None: + # first layer only uses the last hidden_state + fused_hidden_state = layer(hidden_state) + else: + fused_hidden_state = layer(fused_hidden_state, hidden_state) fused_hidden_states.append(fused_hidden_state) return fused_hidden_states diff --git a/src/transformers/models/zoedepth/modeling_zoedepth.py b/src/transformers/models/zoedepth/modeling_zoedepth.py index 979b78aba67..5cbbdcdc04b 100644 --- a/src/transformers/models/zoedepth/modeling_zoedepth.py +++ b/src/transformers/models/zoedepth/modeling_zoedepth.py @@ -185,12 +185,13 @@ def forward(self, hidden_states): hidden_states = hidden_states[::-1] fused_hidden_states = [] - # first layer only uses the last hidden_state - fused_hidden_state = self.layers[0](hidden_states[0]) - fused_hidden_states.append(fused_hidden_state) - # looping from the last layer to the second - for hidden_state, layer in zip(hidden_states[1:], self.layers[1:]): - fused_hidden_state = layer(fused_hidden_state, hidden_state) + fused_hidden_state = None + for hidden_state, layer in zip(hidden_states, self.layers): + if fused_hidden_state is None: + # first layer only uses the last hidden_state + fused_hidden_state = layer(hidden_state) + else: + fused_hidden_state = layer(fused_hidden_state, hidden_state) fused_hidden_states.append(fused_hidden_state) return fused_hidden_states diff --git a/tests/models/depth_anything/test_modeling_depth_anything.py b/tests/models/depth_anything/test_modeling_depth_anything.py index 344d949fa4f..6e7b423e9ec 100644 --- a/tests/models/depth_anything/test_modeling_depth_anything.py +++ b/tests/models/depth_anything/test_modeling_depth_anything.py @@ -18,6 +18,7 @@ from transformers import DepthAnythingConfig, Dinov2Config from transformers.file_utils import is_torch_available, is_vision_available +from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4 from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester @@ -290,3 +291,30 @@ def test_inference(self): ).to(torch_device) self.assertTrue(torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-4)) + + def test_export(self): + for strict in [True, False]: + with self.subTest(strict=strict): + if not is_torch_greater_or_equal_than_2_4: + self.skipTest(reason="This test requires torch >= 2.4 to run.") + model = ( + DepthAnythingForDepthEstimation.from_pretrained("LiheYoung/depth-anything-small-hf") + .to(torch_device) + .eval() + ) + image_processor = DPTImageProcessor.from_pretrained("LiheYoung/depth-anything-small-hf") + image = prepare_img() + inputs = image_processor(images=image, return_tensors="pt").to(torch_device) + + exported_program = torch.export.export( + model, + args=(inputs["pixel_values"],), + strict=strict, + ) + with torch.no_grad(): + eager_outputs = model(**inputs) + exported_outputs = exported_program.module().forward(inputs["pixel_values"]) + self.assertEqual(eager_outputs.predicted_depth.shape, exported_outputs.predicted_depth.shape) + self.assertTrue( + torch.allclose(eager_outputs.predicted_depth, exported_outputs.predicted_depth, atol=1e-4) + ) diff --git a/tests/models/dpt/test_modeling_dpt.py b/tests/models/dpt/test_modeling_dpt.py index 376ea8b3100..7f841fbb2ef 100644 --- a/tests/models/dpt/test_modeling_dpt.py +++ b/tests/models/dpt/test_modeling_dpt.py @@ -18,6 +18,7 @@ from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available +from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4 from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester @@ -410,3 +411,24 @@ def test_post_processing_depth_estimation(self): ).squeeze() self.assertTrue(output_enlarged.shape == expected_shape) self.assertTrue(torch.allclose(predicted_depth_l, output_enlarged, rtol=1e-3)) + + def test_export(self): + for strict in [True, False]: + with self.subTest(strict=strict): + if not is_torch_greater_or_equal_than_2_4: + self.skipTest(reason="This test requires torch >= 2.4 to run.") + model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device).eval() + image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade") + image = prepare_img() + inputs = image_processor(images=image, return_tensors="pt").to(torch_device) + + exported_program = torch.export.export( + model, + args=(inputs["pixel_values"],), + strict=strict, + ) + with torch.no_grad(): + eager_outputs = model(**inputs) + exported_outputs = exported_program.module().forward(inputs["pixel_values"]) + self.assertEqual(eager_outputs.logits.shape, exported_outputs.logits.shape) + self.assertTrue(torch.allclose(eager_outputs.logits, exported_outputs.logits, atol=1e-4)) From f297af55dfc27485189f352cd36b4683de12e0b3 Mon Sep 17 00:00:00 2001 From: Tibor Reiss <75096465+tibor-reiss@users.noreply.github.com> Date: Wed, 20 Nov 2024 11:32:07 +0100 Subject: [PATCH 03/36] Fix: take into account meta device (#34134) * Do not load for meta device * Make some minor improvements * Add test * Update tests/utils/test_modeling_utils.py Update test parameters Co-authored-by: Marc Sun <57196510+SunMarc@users.noreply.github.com> * Make the test simpler --------- Co-authored-by: Marc Sun <57196510+SunMarc@users.noreply.github.com> --- src/transformers/modeling_utils.py | 5 ++++- tests/utils/test_modeling_utils.py | 14 ++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index d68166d5268..6f2c6c194f2 100755 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -361,6 +361,9 @@ def check_support_param_buffer_assignment(model_to_load, state_dict, start_prefi Note: We fully disable this if we are using `deepspeed` """ + if model_to_load.device.type == "meta": + return False + if len([key for key in state_dict if key.startswith(start_prefix)]) == 0: return False @@ -375,7 +378,7 @@ def check_support_param_buffer_assignment(model_to_load, state_dict, start_prefi return False # If the model does, the incoming `state_dict` and the `model_to_load` must be the same dtype - first_key = list(model_to_load.state_dict().keys())[0] + first_key = next(iter(model_to_load.state_dict().keys())) if start_prefix + first_key in state_dict: return state_dict[start_prefix + first_key].dtype == model_to_load.state_dict()[first_key].dtype diff --git a/tests/utils/test_modeling_utils.py b/tests/utils/test_modeling_utils.py index 96a30df7e55..85e7c20dd52 100644 --- a/tests/utils/test_modeling_utils.py +++ b/tests/utils/test_modeling_utils.py @@ -14,6 +14,7 @@ # limitations under the License. import copy import glob +import itertools import json import os import os.path @@ -459,6 +460,19 @@ def test_model_from_config_torch_dtype_str(self): with self.assertRaises(ValueError): model = AutoModel.from_pretrained(TINY_T5, torch_dtype="int64") + @require_torch + def test_model_from_pretrained_meta_device(self): + def is_on_meta(model_id, dtype): + with torch.device("meta"): + model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=dtype) + return all(value.device.type == "meta" for value in model.state_dict().values()) + + model_ids = ("fxmarty/tiny-llama-fast-tokenizer", "fxmarty/small-llama-testing") + dtypes = (None, "auto", torch.float16) + + for model_id, dtype in itertools.product(model_ids, dtypes): + self.assertTrue(is_on_meta(model_id, dtype)) + def test_model_from_pretrained_torch_dtype(self): # test that the model can be instantiated with dtype of either # 1. explicit from_pretrained's torch_dtype argument From 67890de3b86c81fb4775f41b4690b2abaf2a19cf Mon Sep 17 00:00:00 2001 From: Marc Sun <57196510+SunMarc@users.noreply.github.com> Date: Wed, 20 Nov 2024 17:24:45 +0100 Subject: [PATCH 04/36] Torchao weights only + prequantized compability (#34355) * weights only compability * better tests from code review * ping torch version * add weights_only check --- src/transformers/modeling_utils.py | 6 +- .../quantizers/quantizer_torchao.py | 19 ++++ .../torchao_integration/test_torchao.py | 95 +++++++++++++++++++ 3 files changed, 119 insertions(+), 1 deletion(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 6f2c6c194f2..f679f7a190f 100755 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -3602,7 +3602,11 @@ def from_pretrained( if hf_quantizer is not None: hf_quantizer.validate_environment( - torch_dtype=torch_dtype, from_tf=from_tf, from_flax=from_flax, device_map=device_map + torch_dtype=torch_dtype, + from_tf=from_tf, + from_flax=from_flax, + device_map=device_map, + weights_only=weights_only, ) torch_dtype = hf_quantizer.update_torch_dtype(torch_dtype) device_map = hf_quantizer.update_device_map(device_map) diff --git a/src/transformers/quantizers/quantizer_torchao.py b/src/transformers/quantizers/quantizer_torchao.py index 9a03eb25f4d..e6c2dc1ce36 100644 --- a/src/transformers/quantizers/quantizer_torchao.py +++ b/src/transformers/quantizers/quantizer_torchao.py @@ -91,6 +91,15 @@ def validate_environment(self, *args, **kwargs): ) else: self.offload = True + if self.pre_quantized: + weights_only = kwargs.get("weights_only", None) + if weights_only: + torch_version = version.parse(importlib.metadata.version("torch")) + if torch_version < version.parse("2.5.0"): + raise RuntimeError( + f"In order to use torchao pre-quantized model, you need to have torch>=2.5.0. However, the current version is {torch_version}." + f" You can also set with `weights_only=False` in `from_pretrained` if you don't want to update torch" + ) def update_torch_dtype(self, torch_dtype): if self.quantization_config.quant_type == "int4_weight_only": @@ -103,6 +112,10 @@ def update_torch_dtype(self, torch_dtype): "Setting torch_dtype to torch.bfloat16 for int4_weight_only quantization since only bfloat16 is supported right now. Please set torch_dtype=torch.bfloat16 to remove this warning." ) torch_dtype = torch.bfloat16 + if self.quantization_config.quant_type == "int8_dynamic_activation_int8_weight": + if torch_dtype is None: + # we need to set the torch_dtype, otherwise we have dtype mismatch when performing the quantized linear op + torch_dtype = torch.float32 return torch_dtype def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype": @@ -198,6 +211,12 @@ def is_serializable(self, safe_serialization=None): ) if not _is_torchao_serializable: logger.warning("torchao quantized model is only serializable after huggingface_hub >= 0.25.0 ") + if self.offload and self.quantization_config.modules_to_not_convert is None: + logger.warning( + "The model contains offloaded modules and these modules are not quantized. We don't recommend saving the model as we won't be able to reload them." + "If you want to specify modules to not quantize, please specify modules_to_not_convert in the quantization_config." + ) + return False return _is_torchao_serializable @property diff --git a/tests/quantization/torchao_integration/test_torchao.py b/tests/quantization/torchao_integration/test_torchao.py index c3ab06ee61b..3733d6dcf42 100644 --- a/tests/quantization/torchao_integration/test_torchao.py +++ b/tests/quantization/torchao_integration/test_torchao.py @@ -14,6 +14,7 @@ # limitations under the License. import gc +import tempfile import unittest from transformers import AutoModelForCausalLM, AutoTokenizer, TorchAoConfig @@ -236,5 +237,99 @@ def test_int8_dynamic_activation_int8_weight_quant(self): self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), EXPECTED_OUTPUT) +@require_torch_gpu +@require_torchao +class TorchAoSerializationTest(unittest.TestCase): + input_text = "What are we having for dinner?" + max_new_tokens = 10 + ORIGINAL_EXPECTED_OUTPUT = "What are we having for dinner?\n- 1. What is the temperature outside" + # TODO: investigate why we don't have the same output as the original model for this test + SERIALIZED_EXPECTED_OUTPUT = "What are we having for dinner?\n\nJessica: (smiling)" + model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" + quant_config = TorchAoConfig("int4_weight_only", group_size=32) + device = "cuda:0" + + # called only once for all test in this class + @classmethod + def setUpClass(cls): + cls.quantized_model = AutoModelForCausalLM.from_pretrained( + cls.model_name, + torch_dtype=torch.bfloat16, + device_map=cls.device, + quantization_config=cls.quant_config, + ) + cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) + + def tearDown(self): + gc.collect() + torch.cuda.empty_cache() + gc.collect() + + def test_original_model_expected_output(self): + input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(self.device) + output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) + + self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.ORIGINAL_EXPECTED_OUTPUT) + + def check_serialization_expected_output(self, device, expected_output): + """ + Test if we can serialize and load/infer the model again on the same device + """ + with tempfile.TemporaryDirectory() as tmpdirname: + self.quantized_model.save_pretrained(tmpdirname, safe_serialization=False) + loaded_quantized_model = AutoModelForCausalLM.from_pretrained( + self.model_name, torch_dtype=torch.bfloat16, device_map=self.device + ) + input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(self.device) + + output = loaded_quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) + self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), expected_output) + + def test_serialization_expected_output(self): + self.check_serialization_expected_output(self.device, self.SERIALIZED_EXPECTED_OUTPUT) + + +class TorchAoSerializationW8A8Test(TorchAoSerializationTest): + quant_config = TorchAoConfig("int8_dynamic_activation_int8_weight") + ORIGINAL_EXPECTED_OUTPUT = "What are we having for dinner?\n\nJessica: (smiling)" + SERIALIZED_EXPECTED_OUTPUT = ORIGINAL_EXPECTED_OUTPUT + device = "cuda:0" + + +class TorchAoSerializationW8Test(TorchAoSerializationTest): + quant_config = TorchAoConfig("int8_weight_only") + ORIGINAL_EXPECTED_OUTPUT = "What are we having for dinner?\n\nJessica: (smiling)" + SERIALIZED_EXPECTED_OUTPUT = ORIGINAL_EXPECTED_OUTPUT + device = "cuda:0" + + +class TorchAoSerializationW8A8CPUTest(TorchAoSerializationTest): + quant_config = TorchAoConfig("int8_dynamic_activation_int8_weight") + ORIGINAL_EXPECTED_OUTPUT = "What are we having for dinner?\n\nJessica: (smiling)" + SERIALIZED_EXPECTED_OUTPUT = ORIGINAL_EXPECTED_OUTPUT + device = "cpu" + + def test_serialization_expected_output_cuda(self): + """ + Test if we can serialize on device (cpu) and load/infer the model on cuda + """ + new_device = "cuda:0" + self.check_serialization_expected_output(new_device, self.SERIALIZED_EXPECTED_OUTPUT) + + +class TorchAoSerializationW8CPUTest(TorchAoSerializationTest): + quant_config = TorchAoConfig("int8_weight_only") + ORIGINAL_EXPECTED_OUTPUT = "What are we having for dinner?\n\nJessica: (smiling)" + SERIALIZED_EXPECTED_OUTPUT = ORIGINAL_EXPECTED_OUTPUT + device = "cpu" + + def test_serialization_expected_output_cuda(self): + """ + Test if we can serialize on device (cpu) and load/infer the model on cuda + """ + new_device = "cuda:0" + self.check_serialization_expected_output(new_device, self.SERIALIZED_EXPECTED_OUTPUT) + + if __name__ == "__main__": unittest.main() From bf42c3bd4b088fd9df1086e63d47a8e33048e5e1 Mon Sep 17 00:00:00 2001 From: Corentin Royer Date: Wed, 20 Nov 2024 18:02:58 +0100 Subject: [PATCH 05/36] Fix hyperparameter search when optuna+deepseed (#34642) * Fix hyperparameter search when optuna+deepseed * Adding free_memory to the search setup --------- Co-authored-by: Corentin-Royer --- .../integrations/integration_utils.py | 23 ++++++++----------- src/transformers/trainer.py | 5 +++- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/transformers/integrations/integration_utils.py b/src/transformers/integrations/integration_utils.py index 4b236b9155f..0cc2685a552 100755 --- a/src/transformers/integrations/integration_utils.py +++ b/src/transformers/integrations/integration_utils.py @@ -208,7 +208,7 @@ def hp_params(trial): if is_optuna_available(): import optuna - if isinstance(trial, optuna.Trial): + if isinstance(trial, optuna.trial.BaseTrial): return trial.params if is_ray_tune_available(): if isinstance(trial, dict): @@ -230,7 +230,7 @@ def run_hp_search_optuna(trainer, n_trials: int, direction: str, **kwargs) -> Be if trainer.args.process_index == 0: - def _objective(trial, checkpoint_dir=None): + def _objective(trial: optuna.Trial, checkpoint_dir=None): checkpoint = None if checkpoint_dir: for subdir in os.listdir(checkpoint_dir): @@ -240,10 +240,11 @@ def _objective(trial, checkpoint_dir=None): if trainer.args.world_size > 1: if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") - trainer._hp_search_setup(trial) - args_main_rank_list = [pickle.dumps(trainer.args)] - torch.distributed.broadcast_object_list(args_main_rank_list, src=0) - trainer.train(resume_from_checkpoint=checkpoint) + trainer.hp_space(trial) + fixed_trial = optuna.trial.FixedTrial(trial.params, trial.number) + trial_main_rank_list = [fixed_trial] + torch.distributed.broadcast_object_list(trial_main_rank_list, src=0) + trainer.train(resume_from_checkpoint=checkpoint, trial=trial) else: trainer.train(resume_from_checkpoint=checkpoint, trial=trial) # If there hasn't been any evaluation during the training loop. @@ -268,15 +269,11 @@ def _objective(trial, checkpoint_dir=None): else: for i in range(n_trials): trainer.objective = None - args_main_rank_list = [None] + trial_main_rank_list = [None] if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") - torch.distributed.broadcast_object_list(args_main_rank_list, src=0) - args = pickle.loads(bytes(args_main_rank_list[0])) - for key, value in asdict(args).items(): - if key != "local_rank": - setattr(trainer.args, key, value) - trainer.train(resume_from_checkpoint=None) + torch.distributed.broadcast_object_list(trial_main_rank_list, src=0) + trainer.train(resume_from_checkpoint=None, trial=trial_main_rank_list[0]) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 129398e374b..f2e0a90acdd 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1725,6 +1725,9 @@ def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): if self.is_deepspeed_enabled: if self.args.deepspeed is None: raise ValueError("For sweeps with deepspeed, `args.deepspeed` must be set") + + self.accelerator.free_memory() + # Rebuild the deepspeed config to reflect the updated training parameters from accelerate.utils import DeepSpeedPlugin @@ -1748,7 +1751,7 @@ def _report_to_hp_search(self, trial: Union["optuna.Trial", Dict[str, Any]], ste if self.hp_search_backend == HPSearchBackend.OPTUNA: import optuna - if not trial.study._is_multi_objective(): + if hasattr(trial, "study") and not trial.study._is_multi_objective(): trial.report(self.objective, step) if trial.should_prune(): self.callback_handler.on_train_end(self.args, self.state, self.control) From 3cb8676a915c6fa8ad863afd8a2b1a6f4507f3ec Mon Sep 17 00:00:00 2001 From: Marc Sun <57196510+SunMarc@users.noreply.github.com> Date: Wed, 20 Nov 2024 20:28:51 +0100 Subject: [PATCH 06/36] Fix CI by tweaking torchao tests (#34832) --- src/transformers/utils/quantization_config.py | 9 +++++++-- .../quantization/torchao_integration/test_torchao.py | 11 ++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/transformers/utils/quantization_config.py b/src/transformers/utils/quantization_config.py index 2f04df97e86..ac81864e508 100755 --- a/src/transformers/utils/quantization_config.py +++ b/src/transformers/utils/quantization_config.py @@ -1264,8 +1264,13 @@ def post_init(self): r""" Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. """ - if not version.parse(importlib.metadata.version("torchao")) >= version.parse("0.4.0"): - raise ValueError("Requires torchao 0.4.0 version and above") + if is_torchao_available(): + if not version.parse(importlib.metadata.version("torchao")) >= version.parse("0.4.0"): + raise ValueError("Requires torchao 0.4.0 version and above") + else: + raise ValueError( + "TorchAoConfig requires torchao to be installed, please install with `pip install torchao`" + ) _STR_TO_METHOD = self._get_torchao_quant_type_to_method() if self.quant_type not in _STR_TO_METHOD.keys(): diff --git a/tests/quantization/torchao_integration/test_torchao.py b/tests/quantization/torchao_integration/test_torchao.py index 3733d6dcf42..d0263f45f18 100644 --- a/tests/quantization/torchao_integration/test_torchao.py +++ b/tests/quantization/torchao_integration/test_torchao.py @@ -246,12 +246,13 @@ class TorchAoSerializationTest(unittest.TestCase): # TODO: investigate why we don't have the same output as the original model for this test SERIALIZED_EXPECTED_OUTPUT = "What are we having for dinner?\n\nJessica: (smiling)" model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" - quant_config = TorchAoConfig("int4_weight_only", group_size=32) + quant_scheme, quant_scheme_kwargs = "int4_weight_only", {"group_size": 32} device = "cuda:0" # called only once for all test in this class @classmethod def setUpClass(cls): + cls.quant_config = TorchAoConfig(cls.quant_scheme, **cls.quant_scheme_kwargs) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.bfloat16, @@ -290,21 +291,21 @@ def test_serialization_expected_output(self): class TorchAoSerializationW8A8Test(TorchAoSerializationTest): - quant_config = TorchAoConfig("int8_dynamic_activation_int8_weight") + quant_scheme, quant_scheme_kwargs = "int8_dynamic_activation_int8_weight", {} ORIGINAL_EXPECTED_OUTPUT = "What are we having for dinner?\n\nJessica: (smiling)" SERIALIZED_EXPECTED_OUTPUT = ORIGINAL_EXPECTED_OUTPUT device = "cuda:0" class TorchAoSerializationW8Test(TorchAoSerializationTest): - quant_config = TorchAoConfig("int8_weight_only") + quant_scheme, quant_scheme_kwargs = "int8_weight_only", {} ORIGINAL_EXPECTED_OUTPUT = "What are we having for dinner?\n\nJessica: (smiling)" SERIALIZED_EXPECTED_OUTPUT = ORIGINAL_EXPECTED_OUTPUT device = "cuda:0" class TorchAoSerializationW8A8CPUTest(TorchAoSerializationTest): - quant_config = TorchAoConfig("int8_dynamic_activation_int8_weight") + quant_scheme, quant_scheme_kwargs = "int8_dynamic_activation_int8_weight", {} ORIGINAL_EXPECTED_OUTPUT = "What are we having for dinner?\n\nJessica: (smiling)" SERIALIZED_EXPECTED_OUTPUT = ORIGINAL_EXPECTED_OUTPUT device = "cpu" @@ -318,7 +319,7 @@ def test_serialization_expected_output_cuda(self): class TorchAoSerializationW8CPUTest(TorchAoSerializationTest): - quant_config = TorchAoConfig("int8_weight_only") + quant_scheme, quant_scheme_kwargs = "int8_weight_only", {} ORIGINAL_EXPECTED_OUTPUT = "What are we having for dinner?\n\nJessica: (smiling)" SERIALIZED_EXPECTED_OUTPUT = ORIGINAL_EXPECTED_OUTPUT device = "cpu" From 40821a247823b35d7ff10ba490d0d930fe8f5afa Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 20 Nov 2024 21:36:13 +0100 Subject: [PATCH 07/36] Fix CI slack reporting issue (#34833) * fix * fix * fix * fix * fix --------- Co-authored-by: ydshieh --- src/transformers/testing_utils.py | 20 ++++++++++++++++++++ utils/notification_service.py | 10 ++++++++++ 2 files changed, 30 insertions(+) diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 8d6c1b19377..49c2aefa092 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -2385,6 +2385,10 @@ def wrapper(*args, **kwargs): env = copy.deepcopy(os.environ) env["_INSIDE_SUB_PROCESS"] = "1" + # This prevents the entries in `short test summary info` given by the subprocess being truncated. so the + # full information can be passed to the parent pytest process. + # See: https://docs.pytest.org/en/stable/explanation/ci.html + env["CI"] = "true" # If not subclass of `unitTest.TestCase` and `pytestconfig` is used: try to grab and use the arguments if "pytestconfig" in kwargs: @@ -2402,6 +2406,22 @@ def wrapper(*args, **kwargs): subprocess.run(command, env=env, check=True, capture_output=True) except subprocess.CalledProcessError as e: exception_message = e.stdout.decode() + lines = exception_message.split("\n") + # Add a first line with more informative information instead of just `= test session starts =`. + # This makes the `short test summary info` section more useful. + if "= test session starts =" in lines[0]: + text = "" + for line in lines[1:]: + if line.startswith("FAILED "): + text = line[len("FAILED ") :] + text = "".join(text.split(" - ")[1:]) + elif line.startswith("=") and line.endswith("=") and " failed in " in line: + break + elif len(text) > 0: + text += f"\n{line}" + text = "(subprocess) " + text + lines = [text] + lines + exception_message = "\n".join(lines) raise pytest.fail(exception_message, pytrace=False) return wrapper diff --git a/utils/notification_service.py b/utils/notification_service.py index 039ee8b29a3..6c9eab3a853 100644 --- a/utils/notification_service.py +++ b/utils/notification_service.py @@ -1076,6 +1076,11 @@ def prepare_reports(title, header, reports, to_truncate=True): for line in artifact["summary_short"].split("\n"): if line.startswith("FAILED "): + # Avoid the extra `FAILED` entry given by `run_test_using_subprocess` causing issue when calling + # `stacktraces.pop` below. + # See `run_test_using_subprocess` in `src/transformers/testing_utils.py` + if " - Failed: (subprocess)" in line: + continue line = line[len("FAILED ") :] line = line.split()[0].replace("\n", "") @@ -1186,6 +1191,11 @@ def prepare_reports(title, header, reports, to_truncate=True): if failed: for line in artifact["summary_short"].split("\n"): if line.startswith("FAILED "): + # Avoid the extra `FAILED` entry given by `run_test_using_subprocess` causing issue when calling + # `stacktraces.pop` below. + # See `run_test_using_subprocess` in `src/transformers/testing_utils.py` + if " - Failed: (subprocess)" in line: + continue line = line[len("FAILED ") :] line = line.split()[0].replace("\n", "") From 28fb02fc05d52811be27e85ea725682e23887dbc Mon Sep 17 00:00:00 2001 From: Raushan Turganbay Date: Thu, 21 Nov 2024 11:00:22 +0100 Subject: [PATCH 08/36] VLMs: enable generation tests - last batch (#34484) * add tests for 3 more vlms * fix fuyu back * skip test --- src/transformers/models/fuyu/modeling_fuyu.py | 13 +++- .../pix2struct/configuration_pix2struct.py | 6 ++ tests/generation/test_utils.py | 11 +-- tests/models/fuyu/test_modeling_fuyu.py | 16 ++++- tests/models/kosmos2/test_modeling_kosmos2.py | 68 ++++++++++++++++++- .../pix2struct/test_modeling_pix2struct.py | 24 ++++++- 6 files changed, 129 insertions(+), 9 deletions(-) diff --git a/src/transformers/models/fuyu/modeling_fuyu.py b/src/transformers/models/fuyu/modeling_fuyu.py index c8c758e6888..2df5dbc8b29 100644 --- a/src/transformers/models/fuyu/modeling_fuyu.py +++ b/src/transformers/models/fuyu/modeling_fuyu.py @@ -346,7 +346,7 @@ def prepare_inputs_for_generation( ): # Overwritten -- in specific circumstances we don't want to forward image inputs to the model - if past_key_values: + if past_key_values is not None: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) @@ -355,7 +355,7 @@ def prepare_inputs_for_generation( position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: - position_ids = position_ids[:, -1].unsqueeze(-1) + position_ids = position_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: @@ -377,3 +377,12 @@ def prepare_inputs_for_generation( } ) return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past diff --git a/src/transformers/models/pix2struct/configuration_pix2struct.py b/src/transformers/models/pix2struct/configuration_pix2struct.py index d74bb84ce6a..3b6ec9b2d84 100644 --- a/src/transformers/models/pix2struct/configuration_pix2struct.py +++ b/src/transformers/models/pix2struct/configuration_pix2struct.py @@ -91,6 +91,10 @@ class Pix2StructTextConfig(PretrainedConfig): "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", + "decoder_attention_heads": "num_heads", + "encoder_attention_heads": "num_heads", + "encoder_layers": "num_layers", + "decoder_layers": "num_layers", } def __init__( @@ -354,6 +358,8 @@ def __init__( vision_config = {} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values.") + text_config["is_encoder_decoder"] = is_encoder_decoder + text_config["tie_word_embeddings"] = tie_word_embeddings self.text_config = Pix2StructTextConfig(**text_config) self.vision_config = Pix2StructVisionConfig(**vision_config) diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 76dc23ed9bf..34adc132f88 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -1382,19 +1382,22 @@ def test_generate_with_head_masking(self): attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] for model_class in self.all_generative_model_classes: config, inputs_dict = self.prepare_config_and_inputs_for_generate() + text_config = config.get_text_config() # We want to test only encoder-decoder models - if not config.is_encoder_decoder: + if not text_config.is_encoder_decoder: continue model = model_class(config).to(torch_device) head_masking = { - "head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device), + "head_mask": torch.zeros( + text_config.encoder_layers, text_config.encoder_attention_heads, device=torch_device + ), "decoder_head_mask": torch.zeros( - config.decoder_layers, config.decoder_attention_heads, device=torch_device + text_config.decoder_layers, text_config.decoder_attention_heads, device=torch_device ), "cross_attn_head_mask": torch.zeros( - config.decoder_layers, config.decoder_attention_heads, device=torch_device + text_config.decoder_layers, text_config.decoder_attention_heads, device=torch_device ), } diff --git a/tests/models/fuyu/test_modeling_fuyu.py b/tests/models/fuyu/test_modeling_fuyu.py index 4bd66ab945f..bcac135be72 100644 --- a/tests/models/fuyu/test_modeling_fuyu.py +++ b/tests/models/fuyu/test_modeling_fuyu.py @@ -17,12 +17,15 @@ import io import unittest +import pytest import requests +from parameterized import parameterized from transformers import FuyuConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from transformers.utils import cached_property +from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin @@ -263,8 +266,9 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class FuyuModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): +class FuyuModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FuyuForCausalLM,) if is_torch_available() else () + all_generative_model_classes = (FuyuForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( {"text-generation": FuyuForCausalLM, "image-text-to-text": FuyuForCausalLM} if is_torch_available() else {} ) @@ -296,6 +300,16 @@ def test_training_gradient_checkpointing_use_reentrant(self): def test_training_gradient_checkpointing_use_reentrant_false(self): pass + @pytest.mark.generate + @parameterized.expand([("random",), ("same",)]) + @unittest.skip("Fuyu doesn't support assisted generation due to the need to crop/extend image patches indices") + def test_assisted_decoding_matches_greedy_search(self): + pass + + @unittest.skip("Fuyu doesn't support assisted generation due to the need to crop/extend image patches indices") + def test_assisted_decoding_sample(self): + pass + # TODO: Fix me (once this model gets more usage) @unittest.skip(reason="Does not work on the tiny model.") def test_disk_offload_bin(self): diff --git a/tests/models/kosmos2/test_modeling_kosmos2.py b/tests/models/kosmos2/test_modeling_kosmos2.py index 7ede47a348d..9b5089a635d 100644 --- a/tests/models/kosmos2/test_modeling_kosmos2.py +++ b/tests/models/kosmos2/test_modeling_kosmos2.py @@ -21,7 +21,9 @@ import unittest import numpy as np +import pytest import requests +from parameterized import parameterized from transformers import AutoModelForImageTextToText, AutoProcessor, Kosmos2Config from transformers.models.kosmos2.configuration_kosmos2 import Kosmos2TextConfig, Kosmos2VisionConfig @@ -37,6 +39,7 @@ is_vision_available, ) +from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, @@ -205,6 +208,7 @@ def __init__(self, parent, text_kwargs=None, vision_kwargs=None, latent_query_nu self.text_model_tester = Kosmos2TextModelTester(parent, **text_kwargs) self.vision_model_tester = Kosmos2VisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test + self.seq_length = self.text_model_tester.seq_length self.latent_query_num = latent_query_num self.is_training = is_training @@ -253,7 +257,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class Kosmos2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): +class Kosmos2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Kosmos2Model, Kosmos2ForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (Kosmos2ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( @@ -451,6 +455,68 @@ def check_same_values(layer_1, layer_2): # self.assertTrue(model.transformer.wte.weight.shape, model.lm_head.weight.shape) # self.assertTrue(check_same_values(model.transformer.wte, model.lm_head)) + @pytest.mark.generate + @parameterized.expand([("greedy", 1), ("beam search", 2)]) + @unittest.skip( + "KOSMOS-2 doesn't support inputs embeds. The test isn't skipped by checking input args because KOSMOS-2 has `generate()` overwritten" + ) + def test_generate_from_inputs_embeds(self): + pass + + @pytest.mark.generate + def test_left_padding_compatibility(self): + # Overwrite because Kosmos-2 need to padd pixel values and pad image-attn-mask + + def _prepare_model_kwargs(input_ids, attention_mask, pad_size, signature): + model_kwargs = {"input_ids": input_ids, "attention_mask": attention_mask} + if "position_ids" in signature: + position_ids = torch.cumsum(attention_mask, dim=-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + model_kwargs["position_ids"] = position_ids + if "cache_position" in signature: + cache_position = torch.arange(input_ids.shape[-1], device=torch_device) + model_kwargs["cache_position"] = cache_position + if "image_embeds_position_mask" in signature: + image_embeds_position_mask = torch.zeros_like(input_ids) + image_embeds_position_mask[:, (pad_size + 1) : pad_size + 1 + self.model_tester.latent_query_num] = 1 + model_kwargs["image_embeds_position_mask"] = image_embeds_position_mask + return model_kwargs + + for model_class in self.all_generative_model_classes: + config, inputs_dict = self.prepare_config_and_inputs_for_generate() + input_ids = inputs_dict["input_ids"] + pixel_values = inputs_dict["pixel_values"] + attention_mask = inputs_dict.get("attention_mask") + if attention_mask is None: + attention_mask = torch.ones_like(input_ids) + + model = model_class(config).to(torch_device).eval() + signature = inspect.signature(model.forward).parameters.keys() + + # no cache as some models require special cache classes to be init outside forward + model.generation_config.use_cache = False + + # Without padding + model_kwargs = _prepare_model_kwargs(input_ids, attention_mask, pad_size=0, signature=signature) + next_logits_wo_padding = model(**model_kwargs, pixel_values=pixel_values).logits[:, -1, :] + + # With left-padding (length 32) + # can hardcode pad_token to be 0 as we'll do attn masking anyway + pad_token_id = ( + config.get_text_config().pad_token_id if config.get_text_config().pad_token_id is not None else 0 + ) + pad_size = (input_ids.shape[0], 32) + padding = torch.ones(pad_size, dtype=input_ids.dtype, device=torch_device) * pad_token_id + padded_input_ids = torch.cat((padding, input_ids), dim=1) + padded_attention_mask = torch.cat((torch.zeros_like(padding), attention_mask), dim=1) + model_kwargs = _prepare_model_kwargs( + padded_input_ids, padded_attention_mask, pad_size=32, signature=signature + ) + next_logits_with_padding = model(**model_kwargs, pixel_values=pixel_values).logits[:, -1, :] + + # They should result in very similar logits + self.assertTrue(torch.allclose(next_logits_wo_padding, next_logits_with_padding, atol=1e-3)) + @slow def test_model_from_pretrained(self): model_name = "microsoft/kosmos-2-patch14-224" diff --git a/tests/models/pix2struct/test_modeling_pix2struct.py b/tests/models/pix2struct/test_modeling_pix2struct.py index 7438dc6d666..adec2c893a0 100644 --- a/tests/models/pix2struct/test_modeling_pix2struct.py +++ b/tests/models/pix2struct/test_modeling_pix2struct.py @@ -27,6 +27,7 @@ from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available +from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, @@ -388,6 +389,7 @@ def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=Tru self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.seq_length = self.text_model_tester.seq_length # need seq_length for common tests self.is_training = is_training + self.max_patches = self.vision_model_tester.max_patches def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() @@ -417,7 +419,7 @@ def prepare_config_and_inputs_for_common(self): @require_torch -class Pix2StructModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): +class Pix2StructModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Pix2StructForConditionalGeneration,) if is_torch_available() else () all_generative_model_classes = (Pix2StructForConditionalGeneration,) if is_torch_available() else {} pipeline_model_mapping = ( @@ -751,6 +753,26 @@ def test_load_vision_text_config(self): text_config = Pix2StructTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) + def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): + # overwrite because # pix2struct seq length depends on image inputs + seq_length = self.model_tester.max_patches + encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length) + self.assertIsInstance(attentions, tuple) + self.assertListEqual( + [layer_attentions.shape for layer_attentions in attentions], + [encoder_expected_shape] * len(attentions), + ) + + def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length): + # overwrite because # pix2struct seq length depends on image inputs + seq_length = self.model_tester.max_patches + encoder_expected_shape = (batch_size, seq_length, config.hidden_size) + self.assertIsInstance(hidden_states, tuple) + self.assertListEqual( + [layer_hidden_states.shape for layer_hidden_states in hidden_states], + [encoder_expected_shape] * len(hidden_states), + ) + # We will verify our results on an image of a stop sign def prepare_img(): From d4e1acbb7c2f8bedeccc0d254f3a7ba5873bc61f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Quentin=20Gallou=C3=A9dec?= <45557362+qgallouedec@users.noreply.github.com> Date: Thu, 21 Nov 2024 11:37:02 +0100 Subject: [PATCH 09/36] Change logging level from warning to info for `max_steps` overriding `num_train_epochs` (#34810) Update trainer.py --- src/transformers/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index f2e0a90acdd..3fd067edfc5 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -661,7 +661,7 @@ def __init__( raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).") if args.max_steps > 0 and args.num_train_epochs > 0: - logger.warning("max_steps is given, it will override any value given in num_train_epochs") + logger.info("max_steps is given, it will override any value given in num_train_epochs") if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0: raise ValueError( From c57eafdaa119eecae8557be4c626629bc1adc0fd Mon Sep 17 00:00:00 2001 From: farrosalferro <127369839+farrosalferro@users.noreply.github.com> Date: Thu, 21 Nov 2024 19:37:34 +0900 Subject: [PATCH 10/36] Add Nemotron GGUF Loading Support (#34725) * Add Nemotron GGUF Loading Support * fix the Nemotron architecture assignation --------- Co-authored-by: Marc Sun <57196510+SunMarc@users.noreply.github.com> --- docs/source/en/gguf.md | 1 + src/transformers/integrations/ggml.py | 27 ++++++++++++++++++ tests/quantization/ggml/test_ggml.py | 40 +++++++++++++++++++++++++++ 3 files changed, 68 insertions(+) diff --git a/docs/source/en/gguf.md b/docs/source/en/gguf.md index 2da721b2898..b1ed1f0d492 100644 --- a/docs/source/en/gguf.md +++ b/docs/source/en/gguf.md @@ -87,6 +87,7 @@ For now the supported model architectures are the architectures that have been v - Starcoder2 - T5 - Mamba +- Nemotron ## Example usage diff --git a/src/transformers/integrations/ggml.py b/src/transformers/integrations/ggml.py index f4545f2698c..57f0af5667e 100644 --- a/src/transformers/integrations/ggml.py +++ b/src/transformers/integrations/ggml.py @@ -248,6 +248,20 @@ "output_norm": "backbone.norm_f", "output.weight": "lm_head.weight", }, + "nemotron": { + "token_embd": "model.embed_tokens", + "blk": "model.layers", + "ffn_up": "mlp.up_proj", + "ffn_down": "mlp.down_proj", + "ffn_norm": "post_attention_layernorm", + "attn_norm": "input_layernorm", + "attn_q": "self_attn.q_proj", + "attn_v": "self_attn.v_proj", + "attn_k": "self_attn.k_proj", + "attn_output": "self_attn.o_proj", + "output.weight": "lm_head.weight", + "output_norm": "model.norm", + }, } @@ -397,6 +411,18 @@ "ssm.time_step_rank": "time_step_rank", "ssm.inner_size": "intermediate_size", }, + "nemotron": { + "context_length": "max_position_embeddings", + "block_count": "num_hidden_layers", + "feed_forward_length": "intermediate_size", + "embedding_length": "hidden_size", + "rope.dimension_count": None, + "rope.freq_base": "rope_theta", + "attention.head_count": "num_attention_heads", + "attention.head_count_kv": "num_key_value_heads", + "attention.layer_norm_rms_epsilon": "norm_eps", + "vocab_size": "vocab_size", + }, } GGUF_TOKENIZER_MAPPING = { @@ -793,6 +819,7 @@ def converted(self) -> Tokenizer: "starcoder2": GGUFGPTConverter, "t5": GGUFT5Converter, "mamba": GGUFGPTConverter, + "nemotron": GGUFGPTConverter, } diff --git a/tests/quantization/ggml/test_ggml.py b/tests/quantization/ggml/test_ggml.py index 84278e70325..42b05f18449 100644 --- a/tests/quantization/ggml/test_ggml.py +++ b/tests/quantization/ggml/test_ggml.py @@ -61,6 +61,8 @@ class GgufIntegrationTests(unittest.TestCase): starcoder2_original_model_id = "bigcode/starcoder2-3b" mamba_original_model_id = "state-spaces/mamba-2.8b-hf" mamba_model_id = "jpodivin/mamba-2.8b-hf-GGUF" + nemotron_original_model_id = "nvidia/Nemotron-Mini-4B-Instruct" + nemotron_model_id = "bartowski/Nemotron-Mini-4B-Instruct-GGUF" # standard quants q4_0_gguf_model_id = "tinyllama-1.1b-chat-v1.0.Q4_0.gguf" @@ -106,6 +108,8 @@ class GgufIntegrationTests(unittest.TestCase): fp16_starcoder2_gguf_model_id = "starcoder2-3b.fp16.gguf" q6_k_mamba_model_id = "ggml-model-Q6_K.gguf" fp16_mamba_model_id = "ggml-model-f16.gguf" + q6_k_nemotron_model_id = "Nemotron-Mini-4B-Instruct-Q6_K.gguf" + fp16_nemotron_model_id = "Nemotron-Mini-4B-Instruct-f16.gguf" example_text = "Hello" @@ -792,6 +796,42 @@ def test_mamba_q6_k(self): EXPECTED_TEXT = "Hello,I answerthe question.\n\nA" self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) + def test_nemotron_weights_conversion_fp16(self): + original_model = AutoModelForCausalLM.from_pretrained( + self.nemotron_original_model_id, + torch_dtype=torch.float16, + ) + + converted_model = AutoModelForCausalLM.from_pretrained( + self.nemotron_model_id, + gguf_file=self.fp16_nemotron_model_id, + torch_dtype=torch.float16, + ) + + converted_state_dict = converted_model.state_dict() + original_state_dict = original_model.state_dict() + + for layer_name, original_params in original_state_dict.items(): + if layer_name in converted_state_dict: + self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape) + torch.testing.assert_close(original_params, converted_state_dict[layer_name]) + else: + raise ValueError(f"Layer {layer_name} is not presented in GGUF model") + + def test_nemotron_q6_k(self): + model = AutoModelForCausalLM.from_pretrained( + self.nemotron_model_id, + gguf_file=self.q6_k_nemotron_model_id, + torch_dtype=torch.float16, + ) + + tokenizer = AutoTokenizer.from_pretrained(self.nemotron_model_id, gguf_file=self.q6_k_nemotron_model_id) + text = tokenizer(self.example_text, return_tensors="pt")["input_ids"] + out = model.generate(text, max_new_tokens=10) + + EXPECTED_TEXT = "'Hello. hotmail.com.'" + self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT) + def test_tokenization_xnli(self): import tqdm from datasets import load_dataset From ae5cbf804bf8eeefa8f4a4359f0ea055b99369ad Mon Sep 17 00:00:00 2001 From: Vladislav Bronzov <58587565+VladOS95-cyber@users.noreply.github.com> Date: Thu, 21 Nov 2024 13:40:49 +0100 Subject: [PATCH 11/36] Improve gguf tensor processing (#34515) * add tensor processing system to separate logic for models * format refactoring * small fix * make some methods private * move custom methods to processors * refactor tensor processing * format fix --- .../modeling_gguf_pytorch_utils.py | 336 +++++++++++------- 1 file changed, 212 insertions(+), 124 deletions(-) diff --git a/src/transformers/modeling_gguf_pytorch_utils.py b/src/transformers/modeling_gguf_pytorch_utils.py index f58bf330ce7..cca6d548cdf 100644 --- a/src/transformers/modeling_gguf_pytorch_utils.py +++ b/src/transformers/modeling_gguf_pytorch_utils.py @@ -15,7 +15,7 @@ # limitations under the License. import re -from typing import Dict, Optional +from typing import Dict, NamedTuple, Optional import numpy as np from tqdm import tqdm @@ -55,6 +55,200 @@ GGUF_SUPPORTED_ARCHITECTURES = list(GGUF_TO_TRANSFORMERS_MAPPING["tensors"].keys()) +class GGUFTensor(NamedTuple): + weights: np.ndarray + name: str + metadata: dict + + +class TensorProcessor: + def __init__(self, config=None): + self.config = config or {} + + def process(self, weights, name, **kwargs): + return GGUFTensor(weights, name, {}) + + +class LlamaTensorProcessor(TensorProcessor): + def __init__(self, config=None): + super().__init__(config=config) + + def process(self, weights, name, **kwargs): + if ".attn_k." in name or ".attn_q." in name: + num_heads = self.config.get("num_attention_heads") + num_kv_heads = self.config.get("num_key_value_heads") + + if None in (num_heads, num_kv_heads): + return GGUFTensor(weights, name, {}) + if ".attn_q." in name: + weights = self._reverse_permute_weights(weights, num_heads, num_heads) + elif ".attn_k." in name: + weights = self._reverse_permute_weights(weights, num_heads, num_kv_heads) + return GGUFTensor(weights, name, {}) + + def _reverse_permute_weights( + self, weights: np.ndarray, n_head: int, num_kv_heads: Optional[int] = None + ) -> np.ndarray: + # Original permutation implementation + # https://github.com/ggerganov/llama.cpp/blob/a38b884c6c4b0c256583acfaaabdf556c62fabea/convert_hf_to_gguf.py#L1402-L1408 + if num_kv_heads is not None and n_head != num_kv_heads: + n_head = num_kv_heads + + dim = weights.shape[0] // n_head // 2 + w = weights.reshape(n_head, dim, 2, *weights.shape[1:]) + return w.swapaxes(2, 1).reshape(weights.shape) + + +class Qwen2MoeTensorProcessor(TensorProcessor): + def __init__(self, config=None): + super().__init__(config=config) + + def process(self, weights, name, **kwargs): + if "_exp" in name: + tensor_key_mapping = kwargs.get("tensor_key_mapping") + parsed_parameters = kwargs.get("parsed_parameters") + if tensor_key_mapping: + self._split_moe_expert_tensor(weights, parsed_parameters, name, tensor_key_mapping) + return GGUFTensor(weights, None, {}) + if "ffn_gate_inp_shexp" in name: + # for compatibility tensor shared_expert_gate must be (1, 2048) dim, + # quantized one is (2048) + weights = np.expand_dims(weights, axis=0) + return GGUFTensor(weights, name, {}) + + def _split_moe_expert_tensor( + self, weights: np.ndarray, parsed_parameters: Dict[str, Dict], name: str, tensor_key_mapping: dict + ): + # Original merge implementation + # https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L1994-L2022 + exp_name = "" + if "ffn_gate_exps" in name: + exp_name = "gate_proj" + elif "ffn_down_exps" in name: + exp_name = "down_proj" + elif "ffn_up_exps" in name: + exp_name = "up_proj" + else: + raise ValueError(f"Cannot map expert tensor {name} in Qwen2Moe architecture.") + for tensor_name in tensor_key_mapping: + if tensor_name in name: + name = name.replace(tensor_name, tensor_key_mapping[tensor_name]) + w_counter = self.config.get("num_experts", 60) + for i in range(0, w_counter): + temp_name = name.replace(".weight", f".{i}.{exp_name}.weight") + exp_weight = weights[i] + parsed_parameters["tensors"][temp_name] = torch.from_numpy(np.copy(exp_weight)) + + +class BloomTensorProcessor(TensorProcessor): + def __init__(self, config=None): + super().__init__(config=config) + + def process(self, weights, name, **kwargs): + if "attn_qkv" in name: + num_heads = self.config["n_head"] + n_embed = self.config["hidden_size"] + if "weight" in name: + weights = self._reverse_reshape_weights(weights, num_heads, n_embed) + else: + weights = self._reverse_reshape_bias(weights, num_heads, n_embed) + return GGUFTensor(weights, name, {}) + + def _reverse_reshape_weights(self, weights: np.ndarray, n_head: int, n_embed: int): + # Original reshape implementation + # https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L972-L985 + q, k, v = np.array_split(weights, 3, axis=0) + + q = q.reshape(n_head, n_embed // n_head, n_embed) + k = k.reshape(n_head, n_embed // n_head, n_embed) + v = v.reshape(n_head, n_embed // n_head, n_embed) + qkv_weights = np.stack([q, k, v], axis=1) + + return qkv_weights.reshape(n_head * 3 * (n_embed // n_head), n_embed) + + def _reverse_reshape_bias(self, weights: np.ndarray, n_head: int, n_embed: int): + # Original reshape implementation + # https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L986-L998 + q_bias, k_bias, v_bias = np.array_split(weights, 3) + + q_bias = q_bias.reshape(n_head, n_embed // n_head) + k_bias = k_bias.reshape(n_head, n_embed // n_head) + v_bias = v_bias.reshape(n_head, n_embed // n_head) + + qkv_bias = np.stack([q_bias, k_bias, v_bias], axis=1).flatten() + return qkv_bias + + +class T5TensorProcessor(TensorProcessor): + def __init__(self, config=None): + super().__init__(config=config) + + def process(self, weights, name, **kwargs): + bid = None + for chunk in name.split("."): + if chunk.isdigit(): + bid = int(chunk) + break + return GGUFTensor(weights, name, {"bid": bid}) + + +class GPT2TensorProcessor(TensorProcessor): + def __init__(self, config=None): + super().__init__(config=config) + + def process(self, weights, name, **kwargs): + # Original transpose implementation + # https://github.com/ggerganov/llama.cpp/blob/a38b884c6c4b0c256583acfaaabdf556c62fabea/convert_hf_to_gguf.py#L2060-L2061 + if ( + "attn_qkv.weight" in name + or "ffn_down.weight" in name + or "ffn_up.weight" in name + or "attn_output.weight" in name + ): + weights = weights.T + + # Handle special case for output.weight + if name == "output.weight": + # output.weight has conflicts with attn_output.weight in name checking + # Store the tensor directly and signal to skip further processing + name = "lm_head.weight" + parsed_parameters = kwargs.get("parsed_parameters", {}) + parsed_parameters["tensors"][name] = torch.from_numpy(np.copy(weights)) + name = None # Signal to skip further processing + return GGUFTensor(weights, name, {}) + + +class MambaTensorProcessor(TensorProcessor): + def __init__(self, config=None): + super().__init__(config=config) + + def process(self, weights, name, **kwargs): + if "ssm_d" in name and "bias" not in name and "weight" not in name: + # ssm_d has conflicts with ssm_dt in name checking + # we have to explicitly check that name is exactly ssm_d + name = name.replace("ssm_d", "mixer.D") + if "ssm_conv1d.weight" in name: + # for compatibility tensor ssm_conv1d must be (5120, 1, 4]) dim, + # quantized one is (5120, 4) + weights = np.expand_dims(weights, axis=1) + if "ssm_a" in name: + # Original exponential implementation + # https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L2975-L2977 + weights = np.log(-weights) + return GGUFTensor(weights, name, {}) + + +TENSOR_PROCESSORS = { + "llama": LlamaTensorProcessor, + "qwen2moe": Qwen2MoeTensorProcessor, + "bloom": BloomTensorProcessor, + "t5": T5TensorProcessor, + "t5encoder": T5TensorProcessor, + "gpt2": GPT2TensorProcessor, + "mamba": MambaTensorProcessor, +} + + def read_field(reader, field): value = reader.fields[field] return [_gguf_parse_value(value.parts[_data_index], value.types) for _data_index in value.data] @@ -177,73 +371,28 @@ def load_gguf_checkpoint(gguf_checkpoint_path, return_tensors=False): if return_tensors: tensor_key_mapping = GGUF_TO_TRANSFORMERS_MAPPING["tensors"][architecture + model_size] + config = parsed_parameters.get("config", {}) + + ProcessorClass = TENSOR_PROCESSORS.get(architecture, TensorProcessor) + processor = ProcessorClass(config=config) for tensor in tqdm(reader.tensors, desc="Converting and de-quantizing GGUF tensors..."): name = tensor.name - weights = dequantize(tensor.data, tensor.tensor_type) - if architecture == "llama" and (".attn_k." in name or ".attn_q." in name): - num_heads = parsed_parameters["config"]["num_attention_heads"] - num_kv_heads = parsed_parameters["config"]["num_key_value_heads"] - if ".attn_q." in name: - weights = reverse_permute_weights(weights, num_heads, num_heads) - elif ".attn_k." in name: - weights = reverse_permute_weights(weights, num_heads, num_kv_heads) - - if architecture == "qwen2moe": - if "_exp" in name: - split_moe_expert_tensor(weights, parsed_parameters, name, tensor_key_mapping) - continue - if "ffn_gate_inp_shexp" in name: - # for compatibility tensor shared_expert_gate must be (1, 2048) dim, - # quantized one is (2048) - weights = np.expand_dims(weights, axis=0) - - if architecture == "bloom" and "attn_qkv" in name: - num_heads = parsed_parameters["config"]["n_head"] - n_embed = parsed_parameters["config"]["hidden_size"] - if "weight" in name: - weights = reverse_reshape_weights(weights, num_heads, n_embed) - else: - weights = reverse_reshape_bias(weights, num_heads, n_embed) - - bid = None - if architecture in ("t5", "t5encoder"): - for chunk in name.split("."): - if chunk.isdigit(): - bid = int(chunk) - break - - if architecture == "gpt2": - if ( - "attn_qkv.weight" in name - or "ffn_down.weight" in name - or "ffn_up.weight" in name - or "attn_output.weight" in name - ): - # Original transpose implementation - # https://github.com/ggerganov/llama.cpp/blob/a38b884c6c4b0c256583acfaaabdf556c62fabea/convert_hf_to_gguf.py#L2060-L2061 - weights = weights.T - if name == "output.weight": - # output.weight has conflicts with attn_output.weight in name checking - # we have to explicitly check that name is exactly output.weight - name = "lm_head.weight" - parsed_parameters["tensors"][name] = torch.from_numpy(np.copy(weights)) - continue - if architecture == "mamba": - if "ssm_d" in name and "bias" not in name and "weight" not in name: - # ssm_d has conflicts with ssm_dt in name checking - # we have to explicitly check that name is exactly ssm_d - name = name.replace("ssm_d", "mixer.D") - if "ssm_conv1d.weight" in name: - # for compatibility tensor ssm_conv1d must be (5120, 1, 4]) dim, - # quantized one is (5120, 4) - weights = np.expand_dims(weights, axis=1) - if "ssm_a" in name: - # Original exponential implementation - # https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L2975-L2977 - weights = np.log(-weights) + result = processor.process( + weights=weights, + name=name, + tensor_key_mapping=tensor_key_mapping, + parsed_parameters=parsed_parameters, + ) + + weights = result.weights + name = result.name + bid = result.metadata.get("bid") + + if name is None: + continue for tensor_name in tensor_key_mapping: if tensor_name.format(bid=bid) in name: @@ -256,64 +405,3 @@ def load_gguf_checkpoint(gguf_checkpoint_path, return_tensors=False): logger.info(f"Some keys of the GGUF file were not considered: {reader_keys}") return parsed_parameters - - -def reverse_permute_weights(weights: np.ndarray, n_head: int, num_kv_heads: Optional[int] = None) -> np.ndarray: - # Original permutation implementation - # https://github.com/ggerganov/llama.cpp/blob/a38b884c6c4b0c256583acfaaabdf556c62fabea/convert_hf_to_gguf.py#L1402-L1408 - if num_kv_heads is not None and n_head != num_kv_heads: - n_head = num_kv_heads - - dim = weights.shape[0] // n_head // 2 - w = weights.reshape(n_head, dim, 2, *weights.shape[1:]) - return w.swapaxes(2, 1).reshape(weights.shape) - - -def reverse_reshape_weights(weights: np.ndarray, n_head: int, n_embed: int): - # Original reshape implementation - # https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L972-L985 - q, k, v = np.array_split(weights, 3, axis=0) - - q = q.reshape(n_head, n_embed // n_head, n_embed) - k = k.reshape(n_head, n_embed // n_head, n_embed) - v = v.reshape(n_head, n_embed // n_head, n_embed) - qkv_weights = np.stack([q, k, v], axis=1) - - return qkv_weights.reshape(n_head * 3 * (n_embed // n_head), n_embed) - - -def reverse_reshape_bias(weights: np.ndarray, n_head: int, n_embed: int): - # Original reshape implementation - # https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L986-L998 - q_bias, k_bias, v_bias = np.array_split(weights, 3) - - q_bias = q_bias.reshape(n_head, n_embed // n_head) - k_bias = k_bias.reshape(n_head, n_embed // n_head) - v_bias = v_bias.reshape(n_head, n_embed // n_head) - - qkv_bias = np.stack([q_bias, k_bias, v_bias], axis=1).flatten() - return qkv_bias - - -def split_moe_expert_tensor( - weights: np.ndarray, parsed_parameters: Dict[str, Dict], name: str, tensor_key_mapping: dict -): - # Original merge implementation - # https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py#L1994-L2022 - exp_name = "" - if "ffn_gate_exps" in name: - exp_name = "gate_proj" - elif "ffn_down_exps" in name: - exp_name = "down_proj" - elif "ffn_up_exps" in name: - exp_name = "up_proj" - else: - raise ValueError(f"Cannot map expert tensor {name} in Qwen2Moe architecture.") - for tensor_name in tensor_key_mapping: - if tensor_name in name: - name = name.replace(tensor_name, tensor_key_mapping[tensor_name]) - w_counter = parsed_parameters["config"].get("num_experts", 60) - for i in range(0, w_counter): - temp_name = name.replace(".weight", f".{i}.{exp_name}.weight") - exp_weight = weights[i] - parsed_parameters["tensors"][temp_name] = torch.from_numpy(np.copy(exp_weight)) From d6a5c23f71052e5405771ca6dcd3d4acbba49fc6 Mon Sep 17 00:00:00 2001 From: AbdelKarim ELJANDOUBI <78537694+eljandoubi@users.noreply.github.com> Date: Thu, 21 Nov 2024 13:52:22 +0100 Subject: [PATCH 12/36] Fix ds nvme (#34444) * skip nested deepspeed.zero.Init call * make fixup * solve conflict * solve conflict * put back local * use context mangers instead of local thread * Skip recursive calls to deepspeed.zero.Init * Skip recursive calls to deepspeed.zero.Init * back to old notebooks * make style --- src/transformers/modeling_utils.py | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index f679f7a190f..a4de8abed03 100755 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -139,6 +139,7 @@ _init_weights = True _is_quantized = False +_is_ds_init_called = False def is_fsdp_enabled(): @@ -226,6 +227,19 @@ def set_quantized_state(): _is_quantized = False +# Skip recursive calls to deepspeed.zero.Init to avoid pinning errors. +# This issue occurs with ZeRO stage 3 when using NVMe offloading. +# For more details, refer to issue #34429. +@contextmanager +def set_zero3_state(): + global _is_ds_init_called + _is_ds_init_called = True + try: + yield + finally: + _is_ds_init_called = False + + def get_parameter_device(parameter: Union[nn.Module, "ModuleUtilsMixin"]): try: return next(parameter.parameters()).device @@ -1473,13 +1487,14 @@ def _from_config(cls, config, **kwargs): torch_dtype=torch_dtype, ) - if is_deepspeed_zero3_enabled() and not _is_quantized: + if is_deepspeed_zero3_enabled() and not _is_quantized and not _is_ds_init_called: import deepspeed logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") # this immediately partitions the model across all gpus, to avoid the overhead in time # and memory copying it on CPU or each GPU first - with deepspeed.zero.Init(config_dict_or_path=deepspeed_config()): + init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), set_zero3_state()] + with ContextManagers(init_contexts): model = cls(config, **kwargs) else: @@ -4026,11 +4041,14 @@ def from_pretrained( init_contexts = [no_init_weights(_enable=_fast_init)] tp_device = None - if is_deepspeed_zero3_enabled() and not is_quantized: + if is_deepspeed_zero3_enabled() and not is_quantized and not _is_ds_init_called: import deepspeed logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") - init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config())] + init_contexts + init_contexts = [ + deepspeed.zero.Init(config_dict_or_path=deepspeed_config()), + set_zero3_state(), + ] + init_contexts elif low_cpu_mem_usage: if not is_accelerate_available(): raise ImportError( From 18871599c9ae76f7b5a09186b2c09fc5b8826604 Mon Sep 17 00:00:00 2001 From: Jonathan Mamou Date: Thu, 21 Nov 2024 15:46:35 +0200 Subject: [PATCH 13/36] Fix heuristic scheduling for UAG (#34805) * fix heuristic schedule * fix style * fix format --- src/transformers/generation/candidate_generator.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/generation/candidate_generator.py b/src/transformers/generation/candidate_generator.py index d8344c25a65..df213b458cf 100644 --- a/src/transformers/generation/candidate_generator.py +++ b/src/transformers/generation/candidate_generator.py @@ -255,7 +255,8 @@ def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.F "heuristic", "heuristic_transient", }: - if num_matches == int(self.num_assistant_tokens): + # len(scores[0])-1 is the number of candidates according to the target tokenizer. + if num_matches == len(scores[0]) - 1: self.num_assistant_tokens += 2.0 else: self.num_assistant_tokens = max(1.0, self.num_assistant_tokens - 1.0) From 4e90b99ed916300b80bac9db793f2a96b2a87122 Mon Sep 17 00:00:00 2001 From: Cyril Vallez Date: Thu, 21 Nov 2024 14:52:39 +0100 Subject: [PATCH 14/36] Refactor StarCoder2 using modular (#34015) * Create modular_starcoder2.py * Update modular_starcoder2.py * update * finalize modular * revert # no-unravel * Add support * style * Update modular_model_converter.py * update docstring --- .../models/starcoder2/modeling_starcoder2.py | 84 +-- .../models/starcoder2/modular_starcoder2.py | 573 ++++++++++++++++++ utils/modular_model_converter.py | 52 +- 3 files changed, 643 insertions(+), 66 deletions(-) create mode 100644 src/transformers/models/starcoder2/modular_starcoder2.py diff --git a/src/transformers/models/starcoder2/modeling_starcoder2.py b/src/transformers/models/starcoder2/modeling_starcoder2.py index 1a8b6412e73..93adc80d161 100644 --- a/src/transformers/models/starcoder2/modeling_starcoder2.py +++ b/src/transformers/models/starcoder2/modeling_starcoder2.py @@ -1,3 +1,9 @@ +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# This file was automatically generated from src/transformers/models/starcoder2/modular_starcoder2.py. +# Do NOT edit this file manually as any edits will be overwritten by the generation of +# the file from the modular. If any change should be done, please apply the change to the +# modular_starcoder2.py file directly. One of our CI enforces this. +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # coding=utf-8 # Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved. # @@ -17,20 +23,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""PyTorch Starcoder2 model.""" import math from typing import List, Optional, Tuple, Union import torch -import torch.utils.checkpoint from torch import nn -from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter +from ...modeling_flash_attention_utils import _flash_attention_forward from ...modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, @@ -56,12 +60,10 @@ logger = logging.get_logger(__name__) - _CHECKPOINT_FOR_DOC = "bigcode/starcoder2-7b" _CONFIG_FOR_DOC = "Starcoder2Config" -# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Starcoder2 class Starcoder2RotaryEmbedding(nn.Module): def __init__( self, @@ -149,7 +151,23 @@ def forward(self, x, position_ids): return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) -# Copied from transformers.models.llama.modeling_llama.rotate_half +class Starcoder2MLP(nn.Module): + def __init__(self, config: Starcoder2Config): + super().__init__() + embed_dim = config.hidden_size + self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias) + self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias) + self.act = ACT2FN[config.hidden_act] + self.residual_dropout = config.residual_dropout + + def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: + hidden_states = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.c_proj(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training) + return hidden_states + + def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] @@ -157,7 +175,6 @@ def rotate_half(x): return torch.cat((-x2, x1), dim=-1) -# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. @@ -185,24 +202,6 @@ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): return q_embed, k_embed -class Starcoder2MLP(nn.Module): - def __init__(self, config: Starcoder2Config): - super().__init__() - embed_dim = config.hidden_size - self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias) - self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias) - self.act = ACT2FN[config.hidden_act] - self.residual_dropout = config.residual_dropout - - def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: - hidden_states = self.c_fc(hidden_states) - hidden_states = self.act(hidden_states) - hidden_states = self.c_proj(hidden_states) - hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training) - return hidden_states - - -# Copied from transformers.models.llama.modeling_llama.repeat_kv def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, @@ -331,7 +330,6 @@ class Starcoder2FlashAttention2(Starcoder2Attention): flash attention and deal with padding tokens in case the input contains any of them. """ - # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -340,7 +338,6 @@ def __init__(self, *args, **kwargs): # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() - # Ignore copy def forward( self, hidden_states: torch.Tensor, @@ -406,7 +403,7 @@ def forward( key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) - # Reashape to the expected shape for Flash Attention + # Reshape to the expected shape for Flash Attention query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) @@ -434,7 +431,6 @@ def forward( return attn_output, attn_weights, past_key_value -# Copied from transformers.models.mixtral.modeling_mixtral.MixtralSdpaAttention with Mixtral->Starcoder2 class Starcoder2SdpaAttention(Starcoder2Attention): """ Starcoder2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from @@ -442,7 +438,6 @@ class Starcoder2SdpaAttention(Starcoder2Attention): SDPA API. """ - # Ignore copy def forward( self, hidden_states: torch.Tensor, @@ -552,7 +547,6 @@ def __init__(self, config: Starcoder2Config, layer_idx: int): self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) - # Copied from transformers.models.qwen2.modeling_qwen2.Qwen2DecoderLayer.forward def forward( self, hidden_states: torch.Tensor, @@ -642,7 +636,6 @@ def forward( "The bare Starcoder2 Model outputting raw hidden-states without any specific head on top.", STARCODER2_START_DOCSTRING, ) -# Copied from transformers.models.qwen2.modeling_qwen2.Qwen2PreTrainedModel with Qwen2->Starcoder2 class Starcoder2PreTrainedModel(PreTrainedModel): config_class = Starcoder2Config base_model_prefix = "model" @@ -760,14 +753,15 @@ def __init__(self, config: Starcoder2Config): self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) - self.embedding_dropout = config.embedding_dropout self.layers = nn.ModuleList( [Starcoder2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._attn_implementation = config._attn_implementation self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) self.rotary_emb = Starcoder2RotaryEmbedding(config=config) + self.gradient_checkpointing = False + self.embedding_dropout = config.embedding_dropout # Initialize weights and apply final processing self.post_init() @@ -904,7 +898,6 @@ def forward( attentions=all_self_attns, ) - # Copied from transformers.models.phi3.modeling_phi3.Phi3Model._update_causal_mask def _update_causal_mask( self, attention_mask: torch.Tensor, @@ -981,7 +974,6 @@ def _update_causal_mask( return causal_mask @staticmethod - # Copied from transformers.models.mistral.modeling_mistral.MistralModel._prepare_4d_causal_attention_mask_with_cache_position with Mistral->Starcoder2 def _prepare_4d_causal_attention_mask_with_cache_position( attention_mask: torch.Tensor, sequence_length: int, @@ -1049,7 +1041,6 @@ def _prepare_4d_causal_attention_mask_with_cache_position( return causal_mask -# Copied from transformers.models.qwen2.modeling_qwen2.Qwen2ForCausalLM with QWEN2->STARCODER2,Qwen2->Starcoder2 class Starcoder2ForCausalLM(Starcoder2PreTrainedModel, GenerationMixin): _tied_weights_keys = ["lm_head.weight"] @@ -1082,7 +1073,6 @@ def get_decoder(self): @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) - # Ignore copy def forward( self, input_ids: torch.LongTensor = None, @@ -1097,6 +1087,7 @@ def forward( return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, num_logits_to_keep: int = 0, + **loss_kwargs, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: @@ -1117,8 +1108,8 @@ def forward( ```python >>> from transformers import AutoTokenizer, Starcoder2ForCausalLM - >>> model = Starcoder2ForCausalLM.from_pretrained("bigcode/starcoder2-7b") - >>> tokenizer = AutoTokenizer.from_pretrained("bigcode/starcoder2-7b") + >>> model = Starcoder2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) + >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") @@ -1155,18 +1146,7 @@ def forward( loss = None if labels is not None: - # Upcast to float if we need to compute the loss to avoid potential precision issues - logits = logits.float() - # Shift so that tokens < n predict n - shift_logits = logits[..., :-1, :].contiguous() - shift_labels = labels[..., 1:].contiguous() - # Flatten the tokens - shift_logits = shift_logits.view(-1, self.config.vocab_size) - shift_labels = shift_labels.view(-1) - # Ensure tensors are on the same device - shift_labels = shift_labels.to(shift_logits.device) - loss_fct = CrossEntropyLoss() - loss = loss_fct(shift_logits, shift_labels) + loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs) if not return_dict: output = (logits,) + outputs[1:] @@ -1196,7 +1176,6 @@ def forward( """, STARCODER2_START_DOCSTRING, ) -# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Starcoder2, LLAMA->STARCODER2 class Starcoder2ForSequenceClassification(Starcoder2PreTrainedModel): def __init__(self, config): super().__init__(config) @@ -1293,7 +1272,6 @@ def forward( """, STARCODER2_START_DOCSTRING, ) -# Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Starcoder2, LLAMA->STARCODER2 class Starcoder2ForTokenClassification(Starcoder2PreTrainedModel): def __init__(self, config): super().__init__(config) diff --git a/src/transformers/models/starcoder2/modular_starcoder2.py b/src/transformers/models/starcoder2/modular_starcoder2.py new file mode 100644 index 00000000000..b323a3ce9e4 --- /dev/null +++ b/src/transformers/models/starcoder2/modular_starcoder2.py @@ -0,0 +1,573 @@ +# coding=utf-8 +# Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch Starcoder2 model.""" + +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn + +from ...activations import ACT2FN +from ...cache_utils import Cache, DynamicCache +from ...modeling_outputs import ( + BaseModelOutputWithPast, +) +from ...utils import ( + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, +) +from ..llama.modeling_llama import ( + LlamaForSequenceClassification, + LlamaForTokenClassification, + LlamaRotaryEmbedding, + apply_rotary_pos_emb, + repeat_kv, +) +from ..qwen2.modeling_qwen2 import Qwen2DecoderLayer, Qwen2ForCausalLM, Qwen2Model, Qwen2PreTrainedModel +from .configuration_starcoder2 import Starcoder2Config + + +if is_flash_attn_2_available(): + from ...modeling_flash_attention_utils import _flash_attention_forward + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "Starcoder2Config" +_CHECKPOINT_FOR_DOC = "bigcode/starcoder2-7b" + + +class Starcoder2RotaryEmbedding(LlamaRotaryEmbedding): + pass + + +class Starcoder2MLP(nn.Module): + def __init__(self, config: Starcoder2Config): + super().__init__() + embed_dim = config.hidden_size + self.c_fc = nn.Linear(embed_dim, config.intermediate_size, bias=config.use_bias) + self.c_proj = nn.Linear(config.intermediate_size, embed_dim, bias=config.use_bias) + self.act = ACT2FN[config.hidden_act] + self.residual_dropout = config.residual_dropout + + def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: + hidden_states = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.c_proj(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.residual_dropout, training=self.training) + return hidden_states + + +class Starcoder2Attention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer + and "Generating Long Sequences with Sparse Transformers". + """ + + def __init__(self, config: Starcoder2Config, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " + "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.rope_theta = config.rope_theta + self.use_bias = config.use_bias + self.is_causal = True + self.attention_dropout = config.attention_dropout + self.residual_dropout = config.residual_dropout + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=self.use_bias) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=self.use_bias) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=self.use_bias) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=self.use_bias) + + self.rotary_emb = Starcoder2RotaryEmbedding(config=self.config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be " + "removed and `position_embeddings` will be mandatory." + ) + cos, sin = self.rotary_emb(value_states, position_ids) + else: + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + attn_weights += causal_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + attn_output = nn.functional.dropout(attn_output, p=self.residual_dropout, training=self.training) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class Starcoder2FlashAttention2(Starcoder2Attention): + """ + Starcoder2 flash attention module. This module inherits from `Starcoder2Attention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 + ): + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be " + "removed and `position_embeddings` will be mandatory." + ) + cos, sin = self.rotary_emb(value_states, position_ids) + else: + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + dropout_rate = 0.0 if not self.training else self.attention_dropout + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in float16 just to be sure everything works as expected. + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + # Reshape to the expected shape for Flash Attention + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + attn_output = _flash_attention_forward( + query_states, + key_states, + value_states, + attention_mask, + q_len, + position_ids=position_ids, + dropout=dropout_rate, + sliding_window=getattr(self.config, "sliding_window", None), + is_causal=self.is_causal, + use_top_left_mask=self._flash_attn_uses_top_left_mask, + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + attn_output = nn.functional.dropout(attn_output, p=self.residual_dropout, training=self.training) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class Starcoder2SdpaAttention(Starcoder2Attention): + """ + Starcoder2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `Starcoder2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46 + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "Starcoder2Model is using Starcoder2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + if position_embeddings is None: + logger.warning_once( + "The attention layers in this model are transitioning from computing the RoPE embeddings internally " + "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed " + "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be " + "removed and `position_embeddings` will be mandatory." + ) + cos, sin = self.rotary_emb(value_states, position_ids) + else: + cos, sin = position_embeddings + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + causal_mask = attention_mask + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and attention_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment + # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. + # # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1. + is_causal = True if causal_mask is None and q_len > 1 else False + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=causal_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + is_causal=is_causal, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + # The difference with Mistral is that here it uses dropout + attn_output = nn.functional.dropout(attn_output, p=self.residual_dropout, training=self.training) + + return attn_output, None, past_key_value + + +STARCODER2_ATTENTION_CLASSES = { + "eager": Starcoder2Attention, + "flash_attention_2": Starcoder2FlashAttention2, + "sdpa": Starcoder2SdpaAttention, +} + + +class Starcoder2DecoderLayer(Qwen2DecoderLayer, nn.Module): + def __init__(self, config: Starcoder2Config, layer_idx: int): + nn.Module.__init__(self) + self.hidden_size = config.hidden_size + + self.self_attn = STARCODER2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx) + + self.mlp = Starcoder2MLP(config) + + self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) + self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) + + +class Starcoder2PreTrainedModel(Qwen2PreTrainedModel): + pass + + +STARCODER2_INPUTS_DOCSTRING = None # will be automatically redefined + + +class Starcoder2Model(Qwen2Model): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Starcoder2DecoderLayer`] + + Args: + config: Starcoder2Config + """ + + def __init__(self, config: Starcoder2Config): + super().__init__(config) + self.embedding_dropout = config.embedding_dropout + self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) + + @add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError("You must specify exactly one of input_ids or inputs_embeds") + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # kept for BC (non `Cache` `past_key_values` inputs) + return_legacy_cache = False + if use_cache and not isinstance(past_key_values, Cache): + return_legacy_cache = True + if past_key_values is None: + past_key_values = DynamicCache() + else: + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + logger.warning_once( + "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and " + "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class " + "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)" + ) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if cache_position is None: + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + causal_mask = self._update_causal_mask( + attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions + ) + + hidden_states = inputs_embeds + hidden_states = nn.functional.dropout(hidden_states, p=self.embedding_dropout, training=self.training) + + # create position embeddings to be shared across the decoder layers + position_embeddings = self.rotary_emb(hidden_states, position_ids) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + causal_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + cache_position, + position_embeddings, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if return_legacy_cache: + next_cache = next_cache.to_legacy_cache() + + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class Starcoder2ForCausalLM(Qwen2ForCausalLM): + pass + + +class Starcoder2ForSequenceClassification(LlamaForSequenceClassification): + pass + + +class Starcoder2ForTokenClassification(LlamaForTokenClassification): + pass diff --git a/utils/modular_model_converter.py b/utils/modular_model_converter.py index ccf15363de9..8d6c6782a57 100644 --- a/utils/modular_model_converter.py +++ b/utils/modular_model_converter.py @@ -145,45 +145,69 @@ def is_call_to_super(node, func_name): ) +def get_full_attribute_name(node: cst.Attribute | cst.Name) -> str | None: + """Get the full name of an Attribute or Name node (e.g. `"nn.Module"` for an Attribute representing it). If the + successive value of an Attribute are not Name nodes, return `None`.""" + if m.matches(node, m.Name()): + return node.value + elif m.matches(node, m.Attribute()): + if not m.matches(node.attr, m.Name()): + return None + name = node.attr.value + new_node = node.value + while m.matches(new_node, m.Attribute()): + if not m.matches(new_node.attr, m.Name()): + return None + name = new_node.attr.value + "." + name + new_node = new_node.value + if not m.matches(new_node, m.Name()): + return None + return new_node.value + "." + name + return None + + # Transformer class to replace ClassB.call_to_method and ClassB().call_to_method with super().call_to_method class ReplaceMethodCallTransformer(cst.CSTTransformer): def __init__(self, all_bases: Set[str]): self.all_bases = all_bases def leave_Attribute(self, original_node: cst.Attribute, updated_node: cst.Attribute) -> cst.CSTNode: - # Handle ClassB.call_to_method + # Handle ClassB.call_to_method or module.classB.call_to_method if ( - m.matches(original_node.value, m.Name()) - and original_node.value.value in self.all_bases + m.matches(original_node.value, m.Name() | m.Attribute()) + and get_full_attribute_name(original_node.value) in self.all_bases and m.matches(original_node.attr, m.Name()) ): # Replace with super().call_to_method return updated_node.with_changes( value=cst.Call(cst.Name("super")), ) - # Handle ClassB().call_to_method + # Handle ClassB().call_to_method or module.ClassB().call_to_method elif ( m.matches(original_node.value, m.Call()) - and m.matches(original_node.value.func, m.Name()) - and original_node.value.func.value in self.all_bases + and m.matches(original_node.value.func, m.Name() | m.Attribute()) + and get_full_attribute_name(original_node.value.func) in self.all_bases and m.matches(original_node.attr, m.Name()) ): # Replace with super().call_to_method - return updated_node.with_changes(func=cst.Attribute(value=cst.Call(func=cst.Name("super")))) + return updated_node.with_changes(value=cst.Call(cst.Name("super"))) return updated_node def leave_Call(self, original_node: cst.Call, updated_node: cst.Call) -> cst.CSTNode: # Check if the function being called is of the form ClassB().func_a or ClassB.func_a if m.matches(original_node.func, m.Attribute()) and ( - # Match ClassB().func_a(...) + # Match ClassB().func_a(...) or module ( m.matches(original_node.func.value, m.Call()) - and m.matches(original_node.func.value.func, m.Name()) - and original_node.func.value.func.value in self.all_bases + and m.matches(original_node.func.value.func, m.Name() | m.Attribute()) + and get_full_attribute_name(original_node.func.value.func) in self.all_bases ) or # Match ClassB.func_a(...) - (m.matches(original_node.func.value, m.Name()) and original_node.func.value.value in self.all_bases) + ( + m.matches(original_node.func.value, m.Name() | m.Attribute()) + and get_full_attribute_name(original_node.func.value) in self.all_bases + ) ): # Check if the first argument is 'self', and remove it if len(original_node.args) > 0 and m.matches(original_node.args[0].value, m.Name("self")): @@ -860,7 +884,9 @@ def replace_class_node(mapper: ModelFileMapper, class_node: cst.ClassDef, rename | self.post_init() | ``` """ - all_bases = [k.value.value for k in class_node.bases] + all_bases = [get_full_attribute_name(k.value) for k in class_node.bases] + if any(base is None for base in all_bases): + raise ValueError(f"Could not parse the name of the bases for {class_node.name.value}") original_node = mapper.classes[renamed_super_class] original_methods = { @@ -1496,7 +1522,7 @@ def save_modeling_file(modular_file, converted_file): parser = argparse.ArgumentParser() parser.add_argument( "--files_to_parse", - default=["src/transformers/models/gemma2/modular_gemma2.py"], + default=["src/transformers/models/starcoder2/modular_starcoder2.py"], nargs="+", help="A list of `modular_xxxx` files that should be converted to single model file", ) From 6a912ff2c5b3eadb9a0583d77083aae27d35d28d Mon Sep 17 00:00:00 2001 From: Raushan Turganbay Date: Fri, 22 Nov 2024 08:25:14 +0100 Subject: [PATCH 15/36] Watermarking: fix order (#34849) fix watermarking order --- src/transformers/generation/utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index e3657550d0e..1e94e9d1ef8 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -1029,10 +1029,6 @@ def _get_logits_processor( "You have explicitly specified `forced_decoder_ids`. Please remove the `forced_decoder_ids` argument " "in favour of `input_ids` or `decoder_input_ids` respectively.", ) - if generation_config.watermarking_config is not None: - processors.append( - generation_config.watermarking_config.construct_processor(self.config.vocab_size, device) - ) # TODO (joao): find a strategy to specify the order of the processors processors = self._merge_criteria_processor_list(processors, logits_processor) @@ -1085,6 +1081,12 @@ def _get_logits_processor( ) ) + # Watermarking should be after all logits processing is finished (see #34630) + if generation_config.watermarking_config is not None: + processors.append( + generation_config.watermarking_config.construct_processor(self.config.vocab_size, device) + ) + # `LogitNormalization` should always be the last logit processor, when present if generation_config.renormalize_logits is True: processors.append(LogitNormalization()) From 1867be666d4e18c69874d4bf35f55f4cde86040c Mon Sep 17 00:00:00 2001 From: Logan Adams <114770087+loadams@users.noreply.github.com> Date: Fri, 22 Nov 2024 01:05:26 -0800 Subject: [PATCH 16/36] Update checks for torch.distributed.tensor to require torch >= 2.5 (#34816) * Update checks for torch.distributed.tensor * Update PR with feedback * Formatting fix for import order * Remove unused function --- src/transformers/modeling_utils.py | 4 ++-- src/transformers/pytorch_utils.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index a4de8abed03..4703c415e42 100755 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -52,7 +52,6 @@ find_pruneable_heads_and_indices, id_tensor_storage, is_torch_greater_or_equal_than_1_13, - is_torch_greater_or_equal_than_2_4, prune_conv1d_layer, prune_layer, prune_linear_layer, @@ -90,6 +89,7 @@ is_peft_available, is_remote_url, is_safetensors_available, + is_torch_greater_or_equal, is_torch_sdpa_available, is_torch_xla_available, logging, @@ -5032,7 +5032,7 @@ def tensor_parallel(self, device_mesh): device_mesh (`torch.distributed.DeviceMesh`): The device mesh to use for tensor parallelism. """ - if not is_torch_greater_or_equal_than_2_4: + if not is_torch_greater_or_equal("2.5"): raise EnvironmentError("tensor parallel is only supported for `torch>=2.5`.") # Tensor parallelize a nn.Module based on the `_tp_plan` attribute of the module. diff --git a/src/transformers/pytorch_utils.py b/src/transformers/pytorch_utils.py index 6757f72350b..5bdf8a355dd 100644 --- a/src/transformers/pytorch_utils.py +++ b/src/transformers/pytorch_utils.py @@ -21,7 +21,7 @@ from safetensors.torch import storage_ptr, storage_size from torch import nn -from .utils import is_torch_xla_available, logging +from .utils import is_torch_greater_or_equal, is_torch_xla_available, logging ALL_LAYERNORM_LAYERS = [nn.LayerNorm] @@ -39,7 +39,7 @@ is_torch_greater_or_equal_than_1_12 = parsed_torch_version_base >= version.parse("1.12") -if is_torch_greater_or_equal_than_2_4: +if is_torch_greater_or_equal("2.5"): from torch.distributed.tensor import Replicate from torch.distributed.tensor.parallel import ( ColwiseParallel, From d9e6f307e71b5108a7882ec00ffcc0d0eb316cb7 Mon Sep 17 00:00:00 2001 From: Konrad Kalita Date: Fri, 22 Nov 2024 10:06:29 +0100 Subject: [PATCH 17/36] Remove quantization related config from dequantized model (#34856) * Remove quantization related config from dequantized model * Fix whitespace --- src/transformers/quantizers/base.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/transformers/quantizers/base.py b/src/transformers/quantizers/base.py index 015c0015cf7..e3236ab8f0f 100755 --- a/src/transformers/quantizers/base.py +++ b/src/transformers/quantizers/base.py @@ -215,6 +215,9 @@ def dequantize(self, model): # Delete quantizer and quantization config del model.hf_quantizer + del model.config.quantization_config + del model.config._pre_quantization_dtype + model.is_quantized = False return model From 597efd21d20fd320a167fef707a0bcf2be205725 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Fri, 22 Nov 2024 15:33:35 +0100 Subject: [PATCH 18/36] Auto compile when static cache (#34247) * generate with compile * nits * simple * generate with compile * nits * simple * safe * style * Update src/transformers/generation/utils.py Co-authored-by: Cyril Vallez * remove TOKENIZER forked warning --------- Co-authored-by: Cyril Vallez --- src/transformers/generation/utils.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 1e94e9d1ef8..c839a6538dc 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -15,6 +15,7 @@ # limitations under the License. import copy import inspect +import os import warnings from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union @@ -3224,6 +3225,16 @@ def _sample( unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device) model_kwargs = self._get_initial_cache_position(input_ids, model_kwargs) + def model_forward(model, *args, **kwargs): + return model.forward(*args, **kwargs) + + if isinstance(model_kwargs.get("past_key_values"), StaticCache): + if self.device.type == "cuda": + logger.warning_once("Using `torch.compile`.") + os.environ["TOKENIZERS_PARALLELISM"] = "0" + model_forward = torch.compile(model_forward, mode="reduce-overhead", fullgraph=True) + + i = 0 while self._has_unfinished_sequences( this_peer_finished, synced_gpus, device=input_ids.device, cur_len=cur_len, max_length=max_length ): @@ -3234,8 +3245,11 @@ def _sample( model_inputs.update({"output_attentions": output_attentions} if output_attentions else {}) model_inputs.update({"output_hidden_states": output_hidden_states} if output_hidden_states else {}) - # forward pass to get next token - outputs = self(**model_inputs, return_dict=True) + if i == 0: + outputs = self(**model_inputs, return_dict=True) + i += 1 + else: + outputs = model_forward(self, return_dict=True, **model_inputs) # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping model_kwargs = self._update_model_kwargs_for_generation( From 42b36d73958d326b2e0cc8fdd46c34d56402ba98 Mon Sep 17 00:00:00 2001 From: Nadav Timor Date: Fri, 22 Nov 2024 10:02:37 -0500 Subject: [PATCH 19/36] Speculative decoding: Test the target distribution (to prevent issues like #32867) (#34553) * Update test_utils.py * formatting * Update test_utils.py * formatting * formatting * Update test_utils.py * formatting * Update test_utils.py * formatting * format * comments at standard positions --- tests/generation/test_utils.py | 53 ++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 34adc132f88..a31def2f9a6 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -14,6 +14,7 @@ # limitations under the License. +import collections import copy import gc import inspect @@ -2450,6 +2451,58 @@ def test_speculative_sampling(self): self.assertTrue(n_matches.item() == 2) self.assertTrue(validated_tokens.tolist()[0] == [1, 4, 8]) + def test_speculative_sampling_target_distribution(self): + """ + Asserts that the target distribution is preserved. + Should help with catching issues like #32867. + """ + # assume vocab size 10, input length 5 + 3 generated candidates + candidate_input_ids = torch.tensor([[8, 0, 3, 9, 8, 1, 4, 5]]) # input tokens + candidate_logits = torch.tensor( + [ + [ + [-10.0, 10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0], # generated 1 + [-10.0, -10.0, -10.0, -10.0, 10.0, -10.0, -10.0, -10.0, -10.0, -10.0], # generated 4 + [-10.0, -10.0, -10.0, -10.0, -10.0, 10.0, -10.0, -10.0, -10.0, -10.0], # generated 5 + ] + ] + ) + candidate_length = 3 + inf = float("inf") + new_logits = torch.tensor( + [ + [ + # accepts 1: + [-inf, 10.0, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf], + # accepts 4: + [-inf, -inf, -inf, -inf, 10.0, -inf, -inf, -inf, -inf, -inf], + # most likely to be 1 or 8, less likely to be 3, then 7, and should never be any other value: + [-inf, 2.0, -inf, 1.0, -inf, -inf, -inf, -0.01, 2.0, -inf], + # N/A: + [-inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf, -inf], + ] + ] + ) + last_assistant_token_is_eos = False + last_validated_token = [] + for _ in range(10_000): + validated_tokens, n_matches = _speculative_sampling( + candidate_input_ids, + candidate_logits, + candidate_length, + new_logits, + last_assistant_token_is_eos, + ) + self.assertTrue(n_matches.item() == 2) + self.assertTrue(validated_tokens.tolist()[0][0] == 1) + self.assertTrue(validated_tokens.tolist()[0][1] == 4) + self.assertTrue(validated_tokens.tolist()[0][2] in [1, 3, 7, 8]) + last_validated_token.append(validated_tokens.tolist()[0][2]) + # check that the most likely tokens are selected more often than the less likely ones + last_token_counts = collections.Counter(last_validated_token) + self.assertTrue(last_token_counts[1] > last_token_counts[3] > last_token_counts[7] > 0) + self.assertTrue(last_token_counts[8] > last_token_counts[3]) + @pytest.mark.generate @require_torch From 861758e2358bae903861d54fbc25b87d3281ea78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20Marafioti?= Date: Fri, 22 Nov 2024 16:34:38 +0100 Subject: [PATCH 20/36] smol improvements to support more flexible usage (#34857) * smol improvements to support more flexible usage * ruff --- .../idefics3/image_processing_idefics3.py | 40 ++++++++----------- 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/src/transformers/models/idefics3/image_processing_idefics3.py b/src/transformers/models/idefics3/image_processing_idefics3.py index 05a1a396dc7..f9161416656 100644 --- a/src/transformers/models/idefics3/image_processing_idefics3.py +++ b/src/transformers/models/idefics3/image_processing_idefics3.py @@ -38,6 +38,7 @@ logger = logging.get_logger(__name__) +MAX_IMAGE_SIZE = 4096 # 4k resolution as absolute maximum if is_vision_available(): @@ -116,7 +117,6 @@ def _resize_output_size_scale_below_upper_bound( def get_resize_output_image_size( image, resolution_max_side: int, - max_image_size: int = 1820, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> Tuple[int, int]: """ @@ -126,24 +126,18 @@ def get_resize_output_image_size( Image to resize. resolution_max_side (`int`): The longest edge of the image will be resized to this value. The shortest edge will be resized to keep the - input aspect ratio, with a lower bound of `min_image_size`. - max_image_size (`int`, *optional*, defaults to 1820): - Maximum image resolution. If the image is larger than this size, the longest edge will be resized to this - value, with the shortest edge resized to keep the input aspect ratio, with a lower bound of `min_image_size`. + input aspect ratio. input_data_format (`ChannelDimension` or `str`): The channel dimension format of the input image. Returns: The output size of the image after resizing. """ - if resolution_max_side > max_image_size: - raise ValueError("`resolution_max_side` cannot be larger than `max_image_size`") - height, width = get_image_size(image, channel_dim=input_data_format) # Find the output size, when rescaling the longest edge to max_len and preserving the aspect ratio height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=resolution_max_side) - # Find the output size when scaling the image to be below the max_image_size - height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=max_image_size) + # Find the output size when scaling the image to be below the MAX_IMAGE_SIZE + height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=MAX_IMAGE_SIZE) return height, width @@ -251,7 +245,7 @@ def convert_to_rgb( data_format = input_data_format if data_format is None else data_format mode = "P" if palette is not None else None - image = to_pil_image(image, image_mode=mode) + image = to_pil_image(image, image_mode=mode, input_data_format=input_data_format) if image.mode == "P" and palette is not None: image.putpalette(palette) @@ -404,7 +398,7 @@ def resize( image_mode = None if image.ndim == 2 or image.shape[-1] == 1: image_mode = "P" - image = to_pil_image(image, image_mode=image_mode) + image = to_pil_image(image, image_mode=image_mode, input_data_format=input_data_format) resized_image = image.resize((size[1], size[0]), resample=resample) resized_image = np.array(resized_image) @@ -754,6 +748,16 @@ def preprocess( # All transformations expect numpy arrays. images_list = [[to_numpy_array(image) for image in images] for images in images_list] + # Extra channel dimension for grayscale images + if input_data_format in [ChannelDimension.LAST, None]: + images_list = [ + [np.expand_dims(img, axis=-1) if img.ndim == 2 else img for img in images] for images in images_list + ] + elif input_data_format == ChannelDimension.FIRST: + images_list = [ + [np.expand_dims(img, axis=0) if img.ndim == 2 else img for img in images] for images in images_list + ] + if is_scaled_image(images_list[0][0]) and do_rescale: logger.warning_once( "It looks like you are trying to rescale already rescaled images. If the input" @@ -764,18 +768,6 @@ def preprocess( if input_data_format is None: input_data_format = infer_channel_dimension_format(images_list[0][0], num_channels=(1, 3, 4)) - # Extra channel dimension for grayscale images - if input_data_format == ChannelDimension.LAST: - images_list = [ - [np.expand_dims(img, axis=-1) if img.ndim == 2 else img for img in images] for images in images_list - ] - elif input_data_format == ChannelDimension.FIRST: - images_list = [ - [np.expand_dims(img, axis=0) if img.ndim == 2 else img for img in images] for images in images_list - ] - else: - raise ValueError(f"Invalid channel dimension format {input_data_format}.") - if do_resize: images_list = [ [ From 286ffaaf0ab981f3530d0ac34d1b172efa5c03db Mon Sep 17 00:00:00 2001 From: Benjamin Bossan Date: Fri, 22 Nov 2024 17:13:30 +0100 Subject: [PATCH 21/36] [CI] Skip EETQ tests while package is broken with latest transformers (#34854) * CI Skip EETQ tests while package is broken EETQ tries to import the shard_checkpoint function from transformers but the function has been removed. Therefore, trying to use EETQ currently results in an import error. This fix results in EETQ tests being skipped if there is an import error. The issue has been reported to EETQ: https://github.com/NetEase-FuXi/EETQ/issues/34 * Raise helpful error when trying to use eetq * Forget to raise the error in else clause --- src/transformers/quantizers/quantizer_eetq.py | 14 ++++++++++++++ src/transformers/testing_utils.py | 12 +++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/transformers/quantizers/quantizer_eetq.py b/src/transformers/quantizers/quantizer_eetq.py index 602df62c012..7dfce75c373 100644 --- a/src/transformers/quantizers/quantizer_eetq.py +++ b/src/transformers/quantizers/quantizer_eetq.py @@ -53,6 +53,20 @@ def validate_environment(self, *args, **kwargs): "Please install the latest version of eetq from : https://github.com/NetEase-FuXi/EETQ" ) + try: + import eetq # noqa: F401 + except ImportError as exc: + if "shard_checkpoint" in str(exc): + # EETQ 1.0.0 is currently broken with the latest transformers because it tries to import the removed + # shard_checkpoint function, see https://github.com/NetEase-FuXi/EETQ/issues/34. + # TODO: Update message once eetq releases a fix + raise ImportError( + "You are using a version of EETQ that is incompatible with the current transformers version. " + "Either downgrade transformers to <= v4.46.3 or, if available, upgrade EETQ to > v1.0.0." + ) from exc + else: + raise + if not is_accelerate_available(): raise ImportError("Loading an EETQ quantized model requires accelerate (`pip install accelerate`)") diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 49c2aefa092..25d837ccec0 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -1143,7 +1143,17 @@ def require_eetq(test_case): """ Decorator marking a test that requires eetq """ - return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case) + eetq_available = is_eetq_available() + if eetq_available: + try: + import eetq # noqa: F401 + except ImportError as exc: + if "shard_checkpoint" in str(exc): + # EETQ 1.0.0 is currently broken with the latest transformers because it tries to import the removed + # shard_checkpoint function, see https://github.com/NetEase-FuXi/EETQ/issues/34. + # TODO: Remove once eetq releases a fix and this release is used in CI + eetq_available = False + return unittest.skipUnless(eetq_available, "test requires eetq")(test_case) def require_av(test_case): From 54be2d7ae87e873482b984cc956e165ca4dc0ba3 Mon Sep 17 00:00:00 2001 From: Mohamed Mekkouri <93391238+MekkCyber@users.noreply.github.com> Date: Fri, 22 Nov 2024 17:18:49 +0100 Subject: [PATCH 22/36] Bitnet test fix to avoid using gated model (#34863) small test fix --- tests/quantization/bitnet_integration/test_bitnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/quantization/bitnet_integration/test_bitnet.py b/tests/quantization/bitnet_integration/test_bitnet.py index ef71cc82dbf..38b05c3abff 100644 --- a/tests/quantization/bitnet_integration/test_bitnet.py +++ b/tests/quantization/bitnet_integration/test_bitnet.py @@ -65,7 +65,7 @@ def setUpClass(cls): """ Load the model """ - cls.tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct") + cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained(cls.model_name, device_map=cls.device) def tearDown(self): From 3a8eb74668e9c2cc563b2f5c62fac174797063e0 Mon Sep 17 00:00:00 2001 From: Yoni Gozlan <74535834+yonigozlan@users.noreply.github.com> Date: Fri, 22 Nov 2024 18:14:24 -0500 Subject: [PATCH 23/36] Fix support for image processors modifications in modular (#34866) * add fix and examples * fix camel case naming --- .../image_processing_new_imgproc_model.py | 287 ++++++++++++++++++ .../modular_new_imgproc_model.py | 9 + utils/modular_model_converter.py | 2 +- 3 files changed, 297 insertions(+), 1 deletion(-) create mode 100644 examples/modular-transformers/image_processing_new_imgproc_model.py create mode 100644 examples/modular-transformers/modular_new_imgproc_model.py diff --git a/examples/modular-transformers/image_processing_new_imgproc_model.py b/examples/modular-transformers/image_processing_new_imgproc_model.py new file mode 100644 index 00000000000..8966b454882 --- /dev/null +++ b/examples/modular-transformers/image_processing_new_imgproc_model.py @@ -0,0 +1,287 @@ +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +# This file was automatically generated from examples/modular-transformers/modular_new_imgproc_model.py. +# Do NOT edit this file manually as any edits will be overwritten by the generation of +# the file from the modular. If any change should be done, please apply the change to the +# modular_new_imgproc_model.py file directly. One of our CI enforces this. +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 +from typing import Dict, List, Optional, Union + +import numpy as np +import torch + +from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict +from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format +from ...image_utils import ( + OPENAI_CLIP_MEAN, + OPENAI_CLIP_STD, + ChannelDimension, + ImageInput, + PILImageResampling, + infer_channel_dimension_format, + is_scaled_image, + make_list_of_images, + to_numpy_array, + valid_images, + validate_preprocess_arguments, +) +from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging + + +if is_vision_available(): + import PIL + + +logger = logging.get_logger(__name__) + + +class ImgprocModelImageProcessor(BaseImageProcessor): + r""" + Constructs a NEW_IMGPROC_MODEL image processor. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the + `do_resize` parameter in the `preprocess` method. + size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`): + Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess` + method. + resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): + Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be + overridden by the `resample` parameter in the `preprocess` method. + do_rescale (`bool`, *optional*, defaults to `True`): + Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the + `do_rescale` parameter in the `preprocess` method. + rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): + Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be + overridden by the `rescale_factor` parameter in the `preprocess` method. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` + method. Can be overridden by the `do_normalize` parameter in the `preprocess` method. + image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): + Mean to use if normalizing the image. This is a float or list of floats the length of the number of + channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be + overridden by the `image_mean` parameter in the `preprocess` method. + image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): + Standard deviation to use if normalizing the image. This is a float or list of floats the length of the + number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. + Can be overridden by the `image_std` parameter in the `preprocess` method. + do_convert_rgb (`bool`, *optional*, defaults to `True`): + Whether to convert the image to RGB. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize: bool = True, + size: Dict[str, int] = None, + resample: PILImageResampling = PILImageResampling.BICUBIC, + do_rescale: bool = True, + rescale_factor: Union[int, float] = 1 / 255, + do_normalize: bool = True, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + do_convert_rgb: bool = True, + **kwargs, + ) -> None: + super().__init__(**kwargs) + size = size if size is not None else {"height": 384, "width": 384} + size = get_size_dict(size, default_to_square=True) + + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_rescale = do_rescale + self.rescale_factor = rescale_factor + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN + self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD + self.do_convert_rgb = do_convert_rgb + + def resize( + self, + image: np.ndarray, + size: Dict[str, int], + resample: PILImageResampling = PILImageResampling.BICUBIC, + data_format: Optional[Union[str, ChannelDimension]] = None, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + **kwargs, + ) -> np.ndarray: + """ + Resize an image to `(size["height"], size["width"])`. + + Args: + image (`np.ndarray`): + Image to resize. + size (`Dict[str, int]`): + Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image. + resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): + `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`. + data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the output image. If unset, the channel dimension format of the input + image is used. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + + Returns: + `np.ndarray`: The resized image. + """ + size = get_size_dict(size) + if "height" not in size or "width" not in size: + raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}") + output_size = (size["height"], size["width"]) + return resize( + image, + size=output_size, + resample=resample, + data_format=data_format, + input_data_format=input_data_format, + **kwargs, + ) + + @filter_out_non_signature_kwargs() + def preprocess( + self, + images: ImageInput, + do_resize: Optional[bool] = None, + size: Optional[Dict[str, int]] = None, + resample: PILImageResampling = None, + do_rescale: Optional[bool] = None, + rescale_factor: Optional[float] = None, + do_normalize: Optional[bool] = None, + image_mean: Optional[Union[float, List[float]]] = None, + image_std: Optional[Union[float, List[float]]] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + do_convert_rgb: bool = None, + data_format: ChannelDimension = ChannelDimension.FIRST, + input_data_format: Optional[Union[str, ChannelDimension]] = None, + ) -> PIL.Image.Image: + """ + Preprocess an image or batch of images. + + Args: + images (`ImageInput`): + Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If + passing in images with pixel values between 0 and 1, set `do_rescale=False`. + do_resize (`bool`, *optional*, defaults to `self.do_resize`): + Whether to resize the image. + size (`Dict[str, int]`, *optional*, defaults to `self.size`): + Controls the size of the image after `resize`. The shortest edge of the image is resized to + `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image + is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest + edge equal to `int(size["shortest_edge"] * (1333 / 800))`. + resample (`PILImageResampling`, *optional*, defaults to `self.resample`): + Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. + do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): + Whether to rescale the image values between [0 - 1]. + rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): + Rescale factor to rescale the image by if `do_rescale` is set to `True`. + do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): + Whether to normalize the image. + image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): + Image mean to normalize the image by if `do_normalize` is set to `True`. + image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): + Image standard deviation to normalize the image by if `do_normalize` is set to `True`. + do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): + Whether to convert the image to RGB. + return_tensors (`str` or `TensorType`, *optional*): + The type of tensors to return. Can be one of: + - Unset: Return a list of `np.ndarray`. + - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. + - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. + - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. + - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. + data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): + The channel dimension format for the output image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - Unset: Use the channel dimension format of the input image. + input_data_format (`ChannelDimension` or `str`, *optional*): + The channel dimension format for the input image. If unset, the channel dimension format is inferred + from the input image. Can be one of: + - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. + - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. + - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. + """ + do_resize = do_resize if do_resize is not None else self.do_resize + resample = resample if resample is not None else self.resample + do_rescale = do_rescale if do_rescale is not None else self.do_rescale + rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor + do_normalize = do_normalize if do_normalize is not None else self.do_normalize + image_mean = image_mean if image_mean is not None else self.image_mean + image_std = image_std if image_std is not None else self.image_std + do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb + + size = size if size is not None else self.size + size = get_size_dict(size, default_to_square=False) + + images = make_list_of_images(images) + + if not valid_images(images): + raise ValueError( + "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " + "torch.Tensor, tf.Tensor or jax.ndarray." + ) + + validate_preprocess_arguments( + do_rescale=do_rescale, + rescale_factor=rescale_factor, + do_normalize=do_normalize, + image_mean=image_mean, + image_std=image_std, + do_resize=do_resize, + size=size, + resample=resample, + ) + # PIL RGBA images are converted to RGB + if do_convert_rgb: + images = [convert_to_rgb(image) for image in images] + + # All transformations expect numpy arrays. + images = [to_numpy_array(image) for image in images] + + if is_scaled_image(images[0]) and do_rescale: + logger.warning_once( + "It looks like you are trying to rescale already rescaled images. If the input" + " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again." + ) + + if input_data_format is None: + # We assume that all images have the same channel dimension format. + input_data_format = infer_channel_dimension_format(images[0]) + + if do_resize: + images = [ + self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) + for image in images + ] + + if do_rescale: + images = [ + self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) + for image in images + ] + + if do_normalize: + images = [ + self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) + for image in images + ] + + images = [ + to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images + ] + + encoded_outputs = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors) + + return encoded_outputs + + def new_image_processing_method(self, pixel_values: torch.FloatTensor): + return pixel_values / 2 diff --git a/examples/modular-transformers/modular_new_imgproc_model.py b/examples/modular-transformers/modular_new_imgproc_model.py new file mode 100644 index 00000000000..1d054166c28 --- /dev/null +++ b/examples/modular-transformers/modular_new_imgproc_model.py @@ -0,0 +1,9 @@ +import torch +import torch.utils.checkpoint + +from transformers.models.blip.image_processing_blip import BlipImageProcessor + + +class ImgprocModelImageProcessor(BlipImageProcessor): + def new_image_processing_method(self, pixel_values: torch.FloatTensor): + return pixel_values / 2 diff --git a/utils/modular_model_converter.py b/utils/modular_model_converter.py index 8d6c6782a57..28e76ca19ac 100644 --- a/utils/modular_model_converter.py +++ b/utils/modular_model_converter.py @@ -1192,7 +1192,7 @@ def leave_Module(self, node): self.visited_modules = {} self.renamers = {} for file, module in self.model_specific_modules.items(): - file_model_name = re.search(r"models\.\w*?\.\w*?_(\S*)", file).groups()[0] + file_model_name = file.split(".")[-2] renamer = ReplaceNameTransformer( file_model_name, self.model_name, self.given_old_name, self.given_new_name ) From 318fe25f22a99ce1226f8d2aadc268b40f7e55af Mon Sep 17 00:00:00 2001 From: jeongin601 <78595701+jeongin601@users.noreply.github.com> Date: Mon, 25 Nov 2024 17:45:35 +0900 Subject: [PATCH 24/36] Fix: Enable prefill phase key value caching of nemotron/minitron models (#34742) * modeling nemotron kv caching bugfix Signed-off-by: jeongin601 <0200angela@gmail.com> * test file deleted Signed-off-by: jeongin601 <0200angela@gmail.com> * code refinement Signed-off-by: jeongin601 <0200angela@gmail.com> * remove unused variables Signed-off-by: jeongin601 <0200angela@gmail.com> * import block sorted * removed deprecation warning Signed-off-by: jeongin601 <0200angela@gmail.com> * removed support for tuple shape past_key_values Signed-off-by: jeongin601 <0200angela@gmail.com> * Update conditional statement for cache initialization Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --------- Signed-off-by: jeongin601 <0200angela@gmail.com> Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --- src/transformers/models/nemotron/modeling_nemotron.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/nemotron/modeling_nemotron.py b/src/transformers/models/nemotron/modeling_nemotron.py index 8de6bc90ea3..1c56ecd56f5 100644 --- a/src/transformers/models/nemotron/modeling_nemotron.py +++ b/src/transformers/models/nemotron/modeling_nemotron.py @@ -24,7 +24,7 @@ from torch import Size, Tensor, nn from ...activations import ACT2FN -from ...cache_utils import Cache, StaticCache +from ...cache_utils import Cache, DynamicCache, StaticCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import _flash_attention_forward @@ -783,8 +783,14 @@ def forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) + if use_cache and past_key_values is None: + past_key_values = DynamicCache() + if cache_position is None: - cache_position = torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device) + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) if position_ids is None: position_ids = cache_position.unsqueeze(0) From 1339a14dca0c633f74bc8fb771aa8a651dd472b0 Mon Sep 17 00:00:00 2001 From: Dmitry Rogozhkin Date: Mon, 25 Nov 2024 01:03:43 -0800 Subject: [PATCH 25/36] Add safe_globals to resume training on PyTorch 2.6 (#34632) Starting from version 2.4 PyTorch introduces a stricter check for the objects which can be loaded with torch.load(). Starting from version 2.6 loading with weights_only=True requires allowlisting of such objects. This commit adds allowlist of some numpy objects used to load model checkpoints. Usage is restricted by context manager. User can still additionally call torch.serialization.add_safe_globals() to add other objects into the safe globals list. Accelerate library also stepped into same problem and addressed it with PR-3036. Fixes: #34631 See: https://github.com/pytorch/pytorch/pull/137602 See: https://pytorch.org/docs/stable/notes/serialization.html#torch.serialization.add_safe_globals See: https://github.com/huggingface/accelerate/pull/3036 Signed-off-by: Dmitry Rogozhkin --- src/transformers/trainer.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 3fd067edfc5..46add00b018 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -272,6 +272,25 @@ def _get_fsdp_ckpt_kwargs(): return {} +def safe_globals(): + # Starting from version 2.4 PyTorch introduces a check for the objects loaded + # with torch.load(weights_only=True). Starting from 2.6 weights_only=True becomes + # a default and requires allowlisting of objects being loaded. + # See: https://github.com/pytorch/pytorch/pull/137602 + # See: https://pytorch.org/docs/stable/notes/serialization.html#torch.serialization.add_safe_globals + # See: https://github.com/huggingface/accelerate/pull/3036 + if version.parse(torch.__version__).release < version.parse("2.6").release: + return contextlib.nullcontext() + + np_core = np._core if version.parse(np.__version__) >= version.parse("2.0.0") else np.core + allowlist = [np_core.multiarray._reconstruct, np.ndarray, np.dtype] + # numpy >1.25 defines numpy.dtypes.UInt32DType, but below works for + # all versions of numpy + allowlist += [type(np.dtype(np.uint32))] + + return torch.serialization.safe_globals(allowlist) + + if TYPE_CHECKING: import optuna @@ -3055,7 +3074,8 @@ def _load_rng_state(self, checkpoint): ) return - checkpoint_rng_state = torch.load(rng_file) + with safe_globals(): + checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) From c1a8520419b7b7088b4a115072439b3b42bd5696 Mon Sep 17 00:00:00 2001 From: Raushan Turganbay Date: Mon, 25 Nov 2024 10:11:33 +0100 Subject: [PATCH 26/36] Cache: init empty cache when `use_cache` (#34274) * fix * fix tests * fix copies * add docs * Revert "add docs" This reverts commit 32d35634f12ba02781d2ebdee0c8dcfbe992a7b9. * qwen move deltas * mllama can potentiall fullgraph compile * enable mllama compile and fix tests * remove mllama fixes --- .../models/chameleon/modeling_chameleon.py | 6 +- .../models/mllama/modeling_mllama.py | 7 +- .../models/nemotron/modeling_nemotron.py | 3 + .../models/qwen2_vl/modeling_qwen2_vl.py | 90 +++++++------------ tests/generation/test_utils.py | 8 ++ .../models/qwen2_vl/test_modeling_qwen2_vl.py | 4 + tests/test_modeling_common.py | 3 +- 7 files changed, 57 insertions(+), 64 deletions(-) diff --git a/src/transformers/models/chameleon/modeling_chameleon.py b/src/transformers/models/chameleon/modeling_chameleon.py index 0661da87279..3255b6f44c0 100644 --- a/src/transformers/models/chameleon/modeling_chameleon.py +++ b/src/transformers/models/chameleon/modeling_chameleon.py @@ -25,7 +25,7 @@ from torch.nn import CrossEntropyLoss from ...activations import ACT2FN -from ...cache_utils import Cache, StaticCache +from ...cache_utils import Cache, DynamicCache, StaticCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_flash_attention_utils import _flash_attention_forward @@ -1300,6 +1300,10 @@ def forward( if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) + # torch.jit.trace() doesn't support cache objects in the output + if use_cache and past_key_values is None and not torch.jit.is_tracing(): + past_key_values = DynamicCache() + if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( diff --git a/src/transformers/models/mllama/modeling_mllama.py b/src/transformers/models/mllama/modeling_mllama.py index 8ce6150a2fa..3ce5d0b7aa0 100644 --- a/src/transformers/models/mllama/modeling_mllama.py +++ b/src/transformers/models/mllama/modeling_mllama.py @@ -24,7 +24,7 @@ from ... import PreTrainedModel from ...activations import ACT2FN -from ...cache_utils import Cache, StaticCache +from ...cache_utils import Cache, DynamicCache, StaticCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import AttentionMaskConverter from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, CausalLMOutputWithPast @@ -1618,6 +1618,9 @@ def forward( hidden_states = inputs_embeds + if use_cache and past_key_values is None: + past_key_values = DynamicCache() + if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( @@ -1845,7 +1848,7 @@ def __init__(self, config): super().__init__(config.get_text_config()) self.text_config = config.get_text_config() self.vocab_size = self.text_config.vocab_size - self.model = MllamaTextModel._from_config(self.text_config, attn_implementation=config._attn_implementation) + self.model = MllamaTextModel._from_config(self.text_config) self.lm_head = nn.Linear(self.text_config.hidden_size, self.vocab_size, bias=False) self.post_init() diff --git a/src/transformers/models/nemotron/modeling_nemotron.py b/src/transformers/models/nemotron/modeling_nemotron.py index 1c56ecd56f5..76275778c49 100644 --- a/src/transformers/models/nemotron/modeling_nemotron.py +++ b/src/transformers/models/nemotron/modeling_nemotron.py @@ -780,6 +780,9 @@ def forward( ) use_cache = False + if use_cache and past_key_values is None: + past_key_values = DynamicCache() + if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) diff --git a/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py b/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py index eabae7b2b0d..cc05baca2f0 100644 --- a/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py +++ b/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py @@ -21,7 +21,7 @@ import math from dataclasses import dataclass -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import List, Optional, Tuple, Union import torch import torch.nn as nn @@ -30,7 +30,7 @@ from torch.nn import CrossEntropyLoss, LayerNorm from ...activations import ACT2FN -from ...cache_utils import Cache, SlidingWindowCache, StaticCache +from ...cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( AttentionMaskConverter, @@ -549,10 +549,6 @@ def forward( key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) - kv_seq_len = key_states.shape[-2] - if past_key_value is not None: - kv_seq_len += cache_position[0] + 1 - if position_embeddings is None: logger.warning_once( "The attention layers in this model are transitioning from computing the RoPE embeddings internally " @@ -646,16 +642,6 @@ def forward( key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) - kv_seq_len = key_states.shape[-2] - if past_key_value is not None: - if self.layer_idx is None: - raise ValueError( - f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " - "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " - "with a layer index." - ) - kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) - # Because the input can be padded, the absolute sequence length depends on the max position id. if position_embeddings is None: logger.warning_once( @@ -784,9 +770,6 @@ def forward( key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) - kv_seq_len = key_states.shape[-2] - if past_key_value is not None: - kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) if position_embeddings is None: logger.warning_once( "The attention layers in this model are transitioning from computing the RoPE embeddings internally " @@ -1116,6 +1099,10 @@ def forward( ) use_cache = False + # torch.jit.trace() doesn't support cache objects in the output + if use_cache and past_key_values is None and not torch.jit.is_tracing(): + past_key_values = DynamicCache() + if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) @@ -1428,7 +1415,7 @@ def __init__(self, config): self.model = Qwen2VLModel(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - self.padding_side = "left" # set it to left by default, user can use setter to change padding_sides + self.rope_deltas = None # cache rope_deltas here # Initialize weights and apply final processing self.post_init() @@ -1507,7 +1494,7 @@ def get_rope_index( video_token_id = self.config.video_token_id vision_start_token_id = self.config.vision_start_token_id mrope_position_deltas = [] - if image_grid_thw is not None or video_grid_thw is not None: + if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None): total_input_ids = input_ids if attention_mask is None: attention_mask = torch.ones_like(total_input_ids) @@ -1600,25 +1587,6 @@ def get_rope_index( return position_ids, mrope_position_deltas - def _update_model_kwargs_for_generation( - self, - outputs: ModelOutput, - model_kwargs: Dict[str, Any], - is_encoder_decoder: bool = False, - num_new_tokens: int = 1, - ) -> Dict[str, Any]: - model_kwargs = super()._update_model_kwargs_for_generation( - outputs=outputs, - model_kwargs=model_kwargs, - is_encoder_decoder=is_encoder_decoder, - num_new_tokens=num_new_tokens, - ) - - if getattr(outputs, "rope_deltas", None) is not None: - model_kwargs["rope_deltas"] = outputs.rope_deltas - - return model_kwargs - @add_start_docstrings_to_model_forward(QWEN2_VL_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Qwen2VLCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( @@ -1638,6 +1606,7 @@ def forward( image_grid_thw: Optional[torch.LongTensor] = None, video_grid_thw: Optional[torch.LongTensor] = None, rope_deltas: Optional[torch.LongTensor] = None, + cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, Qwen2VLCausalLMOutputWithPast]: r""" Args: @@ -1726,8 +1695,24 @@ def forward( if attention_mask is not None: attention_mask = attention_mask.to(inputs_embeds.device) - if position_ids is None and input_ids is not None: - position_ids, _ = self.get_rope_index(input_ids, image_grid_thw, video_grid_thw, attention_mask) + # if we get 4D attention mask we cannot calculate rope deltas anymore. TODO @raushan fixme + if position_ids is None and input_ids is not None and (attention_mask is None or attention_mask.ndim == 2): + # calculate RoPE index once per generation in the pre-fill stage only + if (cache_position is not None and cache_position[0] == 0) or self.rope_deltas is None: + position_ids, rope_deltas = self.get_rope_index( + input_ids, image_grid_thw, video_grid_thw, attention_mask + ) + self.rope_deltas = rope_deltas + # then use the prev pre-calculated rope-deltas to get the correct position ids + else: + batch_size, seq_length, _ = inputs_embeds.shape + delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0 + position_ids = torch.arange(seq_length, device=inputs_embeds.device) + position_ids = position_ids.view(1, -1).expand(batch_size, -1) + if cache_position is not None: # otherwise `deltas` is an int `0` + delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0) + position_ids = position_ids.add(delta) + position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) outputs = self.model( input_ids=None, @@ -1739,6 +1724,7 @@ def forward( output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, + cache_position=cache_position, ) hidden_states = outputs[0] @@ -1769,7 +1755,7 @@ def forward( past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, - rope_deltas=rope_deltas, + rope_deltas=self.rope_deltas, ) def prepare_inputs_for_generation( @@ -1798,22 +1784,6 @@ def prepare_inputs_for_generation( elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2) input_ids = input_ids[:, cache_position] - rope_deltas = kwargs.get("rope_deltas", None) - if attention_mask is not None and position_ids is None: - if cache_position is None or (cache_position is not None and cache_position[0] == 0): - position_ids, rope_deltas = self.get_rope_index( - input_ids, image_grid_thw, video_grid_thw, attention_mask - ) - else: - batch_size, seq_length = input_ids.shape - delta = ( - cache_position[0] + rope_deltas if cache_position is not None and rope_deltas is not None else 0 - ) - position_ids = torch.arange(seq_length, device=input_ids.device) - position_ids = position_ids.view(1, -1).expand(batch_size, -1) - position_ids = position_ids.add(delta) - position_ids = position_ids.unsqueeze(0).expand(3, -1, -1) - if cache_position[0] != 0: pixel_values = None pixel_values_videos = None @@ -1854,7 +1824,7 @@ def prepare_inputs_for_generation( "pixel_values_videos": pixel_values_videos, "image_grid_thw": image_grid_thw, "video_grid_thw": video_grid_thw, - "rope_deltas": rope_deltas, + "cache_position": cache_position, } ) return model_inputs diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index a31def2f9a6..6c9a4801b65 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -1531,6 +1531,14 @@ def test_past_key_values_format(self): embed_dim = getattr(text_config, "d_model", text_config.hidden_size) per_head_embed_dim = embed_dim // num_attention_heads + # some models have diffent num-head for query vs key/value so we need to assign correct value + # BUT only after `per_head_embed_dim` is set + num_attention_heads = ( + text_config.num_key_value_heads + if getattr(text_config, "num_key_value_heads", None) is not None + else num_attention_heads + ) + past_kv = outputs["past_key_values"] self.assertEqual(len(past_kv), num_hidden_layers) diff --git a/tests/models/qwen2_vl/test_modeling_qwen2_vl.py b/tests/models/qwen2_vl/test_modeling_qwen2_vl.py index f2a3719e17b..93ed33ae774 100644 --- a/tests/models/qwen2_vl/test_modeling_qwen2_vl.py +++ b/tests/models/qwen2_vl/test_modeling_qwen2_vl.py @@ -333,6 +333,10 @@ def test_beam_search_low_memory(self): def test_generate_from_inputs_embeds_with_static_cache(self): pass + @unittest.skip(reason="Can't compile fullgraph due to dynamic control flow in `prepare_inputs_for_generate`") + def test_generate_compile_fullgraph(self): + pass + @require_torch class Qwen2VLIntegrationTest(unittest.TestCase): diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 4cfc91aade2..fe06e223586 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -2343,7 +2343,8 @@ def recursive_check(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return - else: + # model might return non-tensors objects (e.g. Cache class) + elif isinstance(tuple_object, torch.Tensor): self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 From 098962dac29d25b6fc3c3adb8554d83d9d650376 Mon Sep 17 00:00:00 2001 From: Raushan Turganbay Date: Mon, 25 Nov 2024 10:41:55 +0100 Subject: [PATCH 27/36] BLIP: fix generation after hub update (#34876) * fix blip generation * dont remove it yet * Update src/transformers/models/blip_2/modeling_blip_2.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * address comments * modular --------- Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> --- src/transformers/generation/utils.py | 7 +++++- .../models/blip_2/modeling_blip_2.py | 12 ++++++---- .../instructblip/modeling_instructblip.py | 11 +++++---- .../modeling_instructblipvideo.py | 11 +++++---- .../modular_instructblipvideo.py | 11 +++++---- tests/models/blip_2/test_modeling_blip_2.py | 23 ++++++++----------- .../test_modeling_instructblip.py | 2 +- 7 files changed, 42 insertions(+), 35 deletions(-) diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index c839a6538dc..16b26ade7a6 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -421,7 +421,12 @@ def prepare_inputs_for_generation( model_input = kwargs.get(model_input_name) if model_input is not None: if past_key_values is not None: - model_input = model_input[:, -input_ids.shape[1] :] + current_input_length = ( + model_inputs["inputs_embeds"].shape[1] + if model_inputs["inputs_embeds"] is not None + else model_inputs[input_ids_key].shape[1] + ) + model_input = model_input[:, -current_input_length:] model_input = model_input.clone(memory_format=torch.contiguous_format) model_inputs[model_input_name] = model_input diff --git a/src/transformers/models/blip_2/modeling_blip_2.py b/src/transformers/models/blip_2/modeling_blip_2.py index d34528b7431..2e32912421d 100644 --- a/src/transformers/models/blip_2/modeling_blip_2.py +++ b/src/transformers/models/blip_2/modeling_blip_2.py @@ -2307,12 +2307,14 @@ def generate( language_attention_mask = torch.ones( language_model_inputs.size()[:-1], dtype=torch.long, device=language_model_inputs.device ) + if input_ids is None: - input_ids = ( - torch.LongTensor([[self.config.text_config.bos_token_id]]) - .repeat(batch_size, 1) - .to(image_embeds.device) - ) + start_tokens = [self.config.text_config.bos_token_id] + if getattr(self.config, "image_token_index", None) is not None: + start_tokens += [self.config.image_token_index] * self.config.num_query_tokens + input_ids = torch.tensor([start_tokens], dtype=torch.long, device=image_embeds.device) + input_ids = input_ids.repeat(batch_size, 1) + inputs_embeds = self.get_input_embeddings()(input_ids) if attention_mask is None: attention_mask = torch.ones_like(input_ids) diff --git a/src/transformers/models/instructblip/modeling_instructblip.py b/src/transformers/models/instructblip/modeling_instructblip.py index e5622185bc3..a63393ab1dd 100644 --- a/src/transformers/models/instructblip/modeling_instructblip.py +++ b/src/transformers/models/instructblip/modeling_instructblip.py @@ -1591,11 +1591,12 @@ def generate( ) if input_ids is None: - input_ids = ( - torch.LongTensor([[self.config.text_config.bos_token_id]]) - .repeat(batch_size, 1) - .to(image_embeds.device) - ) + start_tokens = [self.config.text_config.bos_token_id] + if getattr(self.config, "image_token_index", None) is not None: + start_tokens += [self.config.image_token_index] * self.config.num_query_tokens + input_ids = torch.tensor([start_tokens], dtype=torch.long, device=image_embeds.device) + input_ids = input_ids.repeat(batch_size, 1) + if attention_mask is None: attention_mask = torch.ones_like(input_ids) diff --git a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py index b0a494dcfe6..e922d1e3f26 100644 --- a/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py @@ -1626,11 +1626,12 @@ def generate( ) if input_ids is None: - input_ids = ( - torch.LongTensor([[self.config.text_config.bos_token_id]]) - .repeat(batch_size, 1) - .to(image_embeds.device) - ) + start_tokens = [self.config.text_config.bos_token_id] + if getattr(self.config, "video_token_index", None) is not None: + start_tokens += [self.config.video_token_index] * self.config.num_query_tokens * 4 + input_ids = torch.tensor([start_tokens], dtype=torch.long, device=image_embeds.device) + input_ids = input_ids.repeat(batch_size, 1) + if attention_mask is None: attention_mask = torch.ones_like(input_ids) diff --git a/src/transformers/models/instructblipvideo/modular_instructblipvideo.py b/src/transformers/models/instructblipvideo/modular_instructblipvideo.py index b0dc8a21574..126d81b6d3d 100644 --- a/src/transformers/models/instructblipvideo/modular_instructblipvideo.py +++ b/src/transformers/models/instructblipvideo/modular_instructblipvideo.py @@ -439,11 +439,12 @@ def generate( ) if input_ids is None: - input_ids = ( - torch.LongTensor([[self.config.text_config.bos_token_id]]) - .repeat(batch_size, 1) - .to(image_embeds.device) - ) + start_tokens = [self.config.text_config.bos_token_id] + if getattr(self.config, "video_token_index", None) is not None: + start_tokens += [self.config.video_token_index] * self.config.num_query_tokens * 4 + input_ids = torch.tensor([start_tokens], dtype=torch.long, device=image_embeds.device) + input_ids = input_ids.repeat(batch_size, 1) + if attention_mask is None: attention_mask = torch.ones_like(input_ids) diff --git a/tests/models/blip_2/test_modeling_blip_2.py b/tests/models/blip_2/test_modeling_blip_2.py index a141ef40be1..a1ea708efd6 100644 --- a/tests/models/blip_2/test_modeling_blip_2.py +++ b/tests/models/blip_2/test_modeling_blip_2.py @@ -1994,8 +1994,8 @@ def test_inference_opt(self): generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output - print(predictions[0].tolist(), generated_text) - self.assertEqual(predictions[0].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 10, 2335, 50118]) + expected_ids = [50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 2, 102, 693, 2828, 15, 5, 4105, 19, 10, 2335, 50118] # fmt: skip + self.assertEqual(predictions[0].tolist(), expected_ids) self.assertEqual("a woman sitting on the beach with a dog", generated_text) # image and context @@ -2007,10 +2007,8 @@ def test_inference_opt(self): generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output - self.assertEqual( - predictions[0].tolist(), - [2, 45641, 35, 61, 343, 16, 42, 116, 31652, 35, 24, 18, 45, 10, 343, 6, 24, 18, 10, 4105, 50118], - ) + expected_ids = [50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 2, 45641, 35, 61, 343, 16, 42, 116, 31652, 35, 24, 18, 45, 10, 343, 6, 24, 18, 10, 4105, 50118] # fmt: skip + self.assertEqual(predictions[0].tolist(), expected_ids) self.assertEqual(generated_text, "Question: which city is this? Answer: it's not a city, it's a beach") def test_inference_interpolate_pos_encoding(self): @@ -2026,7 +2024,8 @@ def test_inference_interpolate_pos_encoding(self): predictions = model.generate(**inputs, interpolate_pos_encoding=True) generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() - self.assertEqual(predictions[0].tolist(), [2, 102, 693, 8, 2335, 15, 5, 4105, 50118]) + expected_ids = [50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 2, 102, 693, 8, 2335, 15, 5, 4105, 50118] # fmt: skip + self.assertEqual(predictions[0].tolist(), expected_ids) self.assertEqual(generated_text, "a woman and dog on the beach") def test_inference_opt_batched_beam_search(self): @@ -2042,8 +2041,9 @@ def test_inference_opt_batched_beam_search(self): predictions = model.generate(**inputs, num_beams=2) # Test output (in this case, slightly different from greedy search) - self.assertEqual(predictions[0].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 69, 2335, 50118]) - self.assertEqual(predictions[1].tolist(), [2, 102, 693, 2828, 15, 5, 4105, 19, 69, 2335, 50118]) + expected_ids = [50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 50265, 2, 102, 693, 2828, 15, 5, 4105, 19, 69, 2335, 50118] # fmt: skip + self.assertEqual(predictions[0].tolist(), expected_ids) + self.assertEqual(predictions[1].tolist(), expected_ids) def test_inference_t5(self): processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xl") @@ -2070,10 +2070,7 @@ def test_inference_t5(self): generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip() # Test output - self.assertEqual( - predictions[0].tolist(), - [0, 3, 7, 152, 67, 839, 1], - ) + self.assertEqual(predictions[0].tolist(), [0, 3, 7, 152, 67, 839, 1]) self.assertEqual(generated_text, "san diego") def test_inference_t5_batched_beam_search(self): diff --git a/tests/models/instructblip/test_modeling_instructblip.py b/tests/models/instructblip/test_modeling_instructblip.py index e77577dad78..baacc12caa0 100644 --- a/tests/models/instructblip/test_modeling_instructblip.py +++ b/tests/models/instructblip/test_modeling_instructblip.py @@ -945,7 +945,7 @@ def test_expansion_in_processing(self): # Add args to the config to trigger new logic when inputs are expanded in processing file processor.num_query_tokens = model.config.num_query_tokens processor.tokenizer.add_special_tokens({"additional_special_tokens": [""]}) - model.config.image_token_index = len(processor.tokenizer) - 1 + model.config.image_token_index = len(processor.tokenizer) - 2 model.resize_token_embeddings(processor.tokenizer.vocab_size, pad_to_multiple_of=64) # Generate again with new inputs From 857d46ca0c824d7d2497a84a1ed616effe79106c Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Mon, 25 Nov 2024 10:43:16 +0100 Subject: [PATCH 28/36] [`Deberta/Deberta-v2`] Refactor code base to support compile, export, and fix LLM (#22105) * some modification for roadmap * revert some changes * yups * weird * make it work * sttling * fix-copies * fixup * renaming * more fix-copies * move stuff around * remove torch script warnings * ignore copies * revert bad changes * woops * just styling * nit * revert * style fixup * nits configuration style * fixup * nits * will this fix the tf pt issue? * style * ??????? * update * eval? * update error message * updates * style * grumble grumble * update * style * nit * skip torch fx tests that were failing * style * skip the failing tests * skip another test and make style --- .../models/deberta/configuration_deberta.py | 5 + .../models/deberta/modeling_deberta.py | 1014 +++++++-------- .../deberta_v2/configuration_deberta_v2.py | 5 + .../models/deberta_v2/modeling_deberta_v2.py | 1117 ++++++++--------- .../models/sew_d/modeling_sew_d.py | 19 +- tests/models/deberta/test_modeling_deberta.py | 12 + .../deberta/test_modeling_tf_deberta.py | 4 + .../deberta_v2/test_modeling_deberta_v2.py | 12 + .../deberta_v2/test_modeling_tf_deberta_v2.py | 4 + tests/test_modeling_common.py | 8 +- 10 files changed, 1009 insertions(+), 1191 deletions(-) diff --git a/src/transformers/models/deberta/configuration_deberta.py b/src/transformers/models/deberta/configuration_deberta.py index 1c826a784f3..cfee176047e 100644 --- a/src/transformers/models/deberta/configuration_deberta.py +++ b/src/transformers/models/deberta/configuration_deberta.py @@ -82,6 +82,9 @@ class DebertaConfig(PretrainedConfig): `["p2c", "c2p"]`. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. + legacy (`bool`, *optional*, defaults to `True`): + Whether or not the model should use the legacy `LegacyDebertaOnlyMLMHead`, which does not work properly + for mask infilling tasks. Example: @@ -121,6 +124,7 @@ def __init__( pos_att_type=None, pooler_dropout=0, pooler_hidden_act="gelu", + legacy=True, **kwargs, ): super().__init__(**kwargs) @@ -151,6 +155,7 @@ def __init__( self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size) self.pooler_dropout = pooler_dropout self.pooler_hidden_act = pooler_hidden_act + self.legacy = legacy # Copied from transformers.models.deberta_v2.configuration_deberta_v2.DebertaV2OnnxConfig diff --git a/src/transformers/models/deberta/modeling_deberta.py b/src/transformers/models/deberta/modeling_deberta.py index 814d3cb2852..6993121b6c1 100644 --- a/src/transformers/models/deberta/modeling_deberta.py +++ b/src/transformers/models/deberta/modeling_deberta.py @@ -14,7 +14,6 @@ # limitations under the License. """PyTorch DeBERTa model.""" -from collections.abc import Sequence from typing import Optional, Tuple, Union import torch @@ -31,7 +30,6 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import softmax_backward_data from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_deberta import DebertaConfig @@ -53,206 +51,6 @@ _QA_TARGET_END_INDEX = 14 -class ContextPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size) - self.dropout = StableDropout(config.pooler_dropout) - self.config = config - - def forward(self, hidden_states): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - - context_token = hidden_states[:, 0] - context_token = self.dropout(context_token) - pooled_output = self.dense(context_token) - pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output) - return pooled_output - - @property - def output_dim(self): - return self.config.hidden_size - - -class XSoftmax(torch.autograd.Function): - """ - Masked Softmax which is optimized for saving memory - - Args: - input (`torch.tensor`): The input tensor that will apply softmax. - mask (`torch.IntTensor`): - The mask matrix where 0 indicate that element will be ignored in the softmax calculation. - dim (int): The dimension that will apply softmax - - Example: - - ```python - >>> import torch - >>> from transformers.models.deberta.modeling_deberta import XSoftmax - - >>> # Make a tensor - >>> x = torch.randn([4, 20, 100]) - - >>> # Create a mask - >>> mask = (x > 0).int() - - >>> # Specify the dimension to apply softmax - >>> dim = -1 - - >>> y = XSoftmax.apply(x, mask, dim) - ```""" - - @staticmethod - def forward(ctx, input, mask, dim): - ctx.dim = dim - rmask = ~(mask.to(torch.bool)) - - output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min)) - output = torch.softmax(output, ctx.dim) - output.masked_fill_(rmask, 0) - ctx.save_for_backward(output) - return output - - @staticmethod - def backward(ctx, grad_output): - (output,) = ctx.saved_tensors - inputGrad = softmax_backward_data(ctx, grad_output, output, ctx.dim, output) - return inputGrad, None, None - - @staticmethod - def symbolic(g, self, mask, dim): - import torch.onnx.symbolic_helper as sym_help - from torch.onnx.symbolic_opset9 import masked_fill, softmax - - mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"]) - r_mask = g.op( - "Cast", - g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), - to_i=sym_help.cast_pytorch_to_onnx["Bool"], - ) - output = masked_fill( - g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min)) - ) - output = softmax(g, output, dim) - return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool))) - - -class DropoutContext: - def __init__(self): - self.dropout = 0 - self.mask = None - self.scale = 1 - self.reuse_mask = True - - -def get_mask(input, local_context): - if not isinstance(local_context, DropoutContext): - dropout = local_context - mask = None - else: - dropout = local_context.dropout - dropout *= local_context.scale - mask = local_context.mask if local_context.reuse_mask else None - - if dropout > 0 and mask is None: - mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool) - - if isinstance(local_context, DropoutContext): - if local_context.mask is None: - local_context.mask = mask - - return mask, dropout - - -class XDropout(torch.autograd.Function): - """Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" - - @staticmethod - def forward(ctx, input, local_ctx): - mask, dropout = get_mask(input, local_ctx) - ctx.scale = 1.0 / (1 - dropout) - if dropout > 0: - ctx.save_for_backward(mask) - return input.masked_fill(mask, 0) * ctx.scale - else: - return input - - @staticmethod - def backward(ctx, grad_output): - if ctx.scale > 1: - (mask,) = ctx.saved_tensors - return grad_output.masked_fill(mask, 0) * ctx.scale, None - else: - return grad_output, None - - @staticmethod - def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value: - from torch.onnx import symbolic_opset12 - - dropout_p = local_ctx - if isinstance(local_ctx, DropoutContext): - dropout_p = local_ctx.dropout - # StableDropout only calls this function when training. - train = True - # TODO: We should check if the opset_version being used to export - # is > 12 here, but there's no good way to do that. As-is, if the - # opset_version < 12, export will fail with a CheckerError. - # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like: - # if opset_version < 12: - # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train) - return symbolic_opset12.dropout(g, input, dropout_p, train) - - -class StableDropout(nn.Module): - """ - Optimized dropout module for stabilizing the training - - Args: - drop_prob (float): the dropout probabilities - """ - - def __init__(self, drop_prob): - super().__init__() - self.drop_prob = drop_prob - self.count = 0 - self.context_stack = None - - def forward(self, x): - """ - Call the module - - Args: - x (`torch.tensor`): The input tensor to apply dropout - """ - if self.training and self.drop_prob > 0: - return XDropout.apply(x, self.get_context()) - return x - - def clear_context(self): - self.count = 0 - self.context_stack = None - - def init_context(self, reuse_mask=True, scale=1): - if self.context_stack is None: - self.context_stack = [] - self.count = 0 - for c in self.context_stack: - c.reuse_mask = reuse_mask - c.scale = scale - - def get_context(self): - if self.context_stack is not None: - if self.count >= len(self.context_stack): - self.context_stack.append(DropoutContext()) - ctx = self.context_stack[self.count] - ctx.dropout = self.drop_prob - self.count += 1 - return ctx - else: - return self.drop_prob - - class DebertaLayerNorm(nn.Module): """LayerNorm module in the TF style (epsilon inside the square root).""" @@ -278,74 +76,7 @@ def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class DebertaAttention(nn.Module): - def __init__(self, config): - super().__init__() - self.self = DisentangledSelfAttention(config) - self.output = DebertaSelfOutput(config) - self.config = config - - def forward( - self, - hidden_states, - attention_mask, - output_attentions=False, - query_states=None, - relative_pos=None, - rel_embeddings=None, - ): - self_output = self.self( - hidden_states, - attention_mask, - output_attentions, - query_states=query_states, - relative_pos=relative_pos, - rel_embeddings=rel_embeddings, - ) - if output_attentions: - self_output, att_matrix = self_output - if query_states is None: - query_states = hidden_states - attention_output = self.output(self_output, query_states) - - if output_attentions: - return (attention_output, att_matrix) - else: - return attention_output - - -# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta -class DebertaIntermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.intermediate_size) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -class DebertaOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.hidden_dropout_prob) - self.config = config + self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) @@ -354,142 +85,8 @@ def forward(self, hidden_states, input_tensor): return hidden_states -class DebertaLayer(nn.Module): - def __init__(self, config): - super().__init__() - self.attention = DebertaAttention(config) - self.intermediate = DebertaIntermediate(config) - self.output = DebertaOutput(config) - - def forward( - self, - hidden_states, - attention_mask, - query_states=None, - relative_pos=None, - rel_embeddings=None, - output_attentions=False, - ): - attention_output = self.attention( - hidden_states, - attention_mask, - output_attentions=output_attentions, - query_states=query_states, - relative_pos=relative_pos, - rel_embeddings=rel_embeddings, - ) - if output_attentions: - attention_output, att_matrix = attention_output - intermediate_output = self.intermediate(attention_output) - layer_output = self.output(intermediate_output, attention_output) - if output_attentions: - return (layer_output, att_matrix) - else: - return layer_output - - -class DebertaEncoder(nn.Module): - """Modified BertEncoder with relative position bias support""" - - def __init__(self, config): - super().__init__() - self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)]) - self.relative_attention = getattr(config, "relative_attention", False) - if self.relative_attention: - self.max_relative_positions = getattr(config, "max_relative_positions", -1) - if self.max_relative_positions < 1: - self.max_relative_positions = config.max_position_embeddings - self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size) - self.gradient_checkpointing = False - - def get_rel_embedding(self): - rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None - return rel_embeddings - - def get_attention_mask(self, attention_mask): - if attention_mask.dim() <= 2: - extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) - attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1) - elif attention_mask.dim() == 3: - attention_mask = attention_mask.unsqueeze(1) - - return attention_mask - - def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None): - if self.relative_attention and relative_pos is None: - q = query_states.size(-2) if query_states is not None else hidden_states.size(-2) - relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device) - return relative_pos - - def forward( - self, - hidden_states, - attention_mask, - output_hidden_states=True, - output_attentions=False, - query_states=None, - relative_pos=None, - return_dict=True, - ): - attention_mask = self.get_attention_mask(attention_mask) - relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos) - - all_hidden_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - if isinstance(hidden_states, Sequence): - next_kv = hidden_states[0] - else: - next_kv = hidden_states - rel_embeddings = self.get_rel_embedding() - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if self.gradient_checkpointing and self.training: - hidden_states = self._gradient_checkpointing_func( - layer_module.__call__, - next_kv, - attention_mask, - query_states, - relative_pos, - rel_embeddings, - output_attentions, - ) - else: - hidden_states = layer_module( - next_kv, - attention_mask, - query_states=query_states, - relative_pos=relative_pos, - rel_embeddings=rel_embeddings, - output_attentions=output_attentions, - ) - - if output_attentions: - hidden_states, att_m = hidden_states - - if query_states is not None: - query_states = hidden_states - if isinstance(hidden_states, Sequence): - next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None - else: - next_kv = hidden_states - - if output_attentions: - all_attentions = all_attentions + (att_m,) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions - ) - - -def build_relative_position(query_size, key_size, device): +@torch.jit.script +def build_relative_position(query_layer, key_layer): """ Build relative position according to the query and key @@ -506,8 +103,11 @@ def build_relative_position(query_size, key_size, device): """ - q_ids = torch.arange(query_size, dtype=torch.long, device=device) - k_ids = torch.arange(key_size, dtype=torch.long, device=device) + query_size = query_layer.size(-2) + key_size = key_layer.size(-2) + + q_ids = torch.arange(query_size, dtype=torch.long, device=query_layer.device) + k_ids = torch.arange(key_size, dtype=torch.long, device=key_layer.device) rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1) rel_pos_ids = rel_pos_ids[:query_size, :] rel_pos_ids = rel_pos_ids.unsqueeze(0) @@ -529,6 +129,39 @@ def pos_dynamic_expand(pos_index, p2c_att, key_layer): return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2))) +###### To support a general trace, we have to define these operation as they use python objects (sizes) ################## +# which are not supported by torch.jit.trace. +# Full credits to @Szustarol +@torch.jit.script +def scaled_size_sqrt(query_layer: torch.Tensor, scale_factor: int): + return torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) + + +@torch.jit.script +def build_rpos(query_layer: torch.Tensor, key_layer: torch.Tensor, relative_pos): + if query_layer.size(-2) != key_layer.size(-2): + return build_relative_position(query_layer, key_layer) + else: + return relative_pos + + +@torch.jit.script +def compute_attention_span(query_layer: torch.Tensor, key_layer: torch.Tensor, max_relative_positions: int): + return torch.tensor(min(max(query_layer.size(-2), key_layer.size(-2)), max_relative_positions)) + + +@torch.jit.script +def uneven_size_corrected(p2c_att, query_layer: torch.Tensor, key_layer: torch.Tensor, relative_pos): + if query_layer.size(-2) != key_layer.size(-2): + pos_index = relative_pos[:, :, :, 0].unsqueeze(-1) + return torch.gather(p2c_att, dim=2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer)) + else: + return p2c_att + + +######################################################################################################################## + + class DisentangledSelfAttention(nn.Module): """ Disentangled self-attention module @@ -561,19 +194,22 @@ def __init__(self, config): if self.talking_head: self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False) self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False) + else: + self.head_logits_proj = None + self.head_weights_proj = None if self.relative_attention: self.max_relative_positions = getattr(config, "max_relative_positions", -1) if self.max_relative_positions < 1: self.max_relative_positions = config.max_position_embeddings - self.pos_dropout = StableDropout(config.hidden_dropout_prob) + self.pos_dropout = nn.Dropout(config.hidden_dropout_prob) if "c2p" in self.pos_att_type: self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False) if "p2c" in self.pos_att_type: self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size) - self.dropout = StableDropout(config.attention_probs_dropout_prob) + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1) @@ -582,13 +218,13 @@ def transpose_for_scores(self, x): def forward( self, - hidden_states, - attention_mask, - output_attentions=False, - query_states=None, - relative_pos=None, - rel_embeddings=None, - ): + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + output_attentions: bool = False, + query_states: Optional[torch.Tensor] = None, + relative_pos: Optional[torch.Tensor] = None, + rel_embeddings: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """ Call the module @@ -622,31 +258,24 @@ def forward( qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1) query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1) else: - - def linear(w, b, x): - if b is not None: - return torch.matmul(x, w.t()) + b.t() - else: - return torch.matmul(x, w.t()) # + b.t() - ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0) qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)] - qkvb = [None] * 3 - - q = linear(qkvw[0], qkvb[0], query_states.to(dtype=qkvw[0].dtype)) - k, v = [linear(qkvw[i], qkvb[i], hidden_states.to(dtype=qkvw[i].dtype)) for i in range(1, 3)] + q = torch.matmul(qkvw[0], query_states.t().to(dtype=qkvw[0].dtype)) + k = torch.matmul(qkvw[1], hidden_states.t().to(dtype=qkvw[1].dtype)) + v = torch.matmul(qkvw[2], hidden_states.t().to(dtype=qkvw[2].dtype)) query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]] query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :]) value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :]) - rel_att = None + rel_att: int = 0 # Take the dot product between "query" and "key" to get the raw attention scores. scale_factor = 1 + len(self.pos_att_type) - scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) + scale = scaled_size_sqrt(query_layer, scale_factor) query_layer = query_layer / scale.to(dtype=query_layer.dtype) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) - if self.relative_attention: + + if self.relative_attention and rel_embeddings is not None and relative_pos is not None: rel_embeddings = self.pos_dropout(rel_embeddings) rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor) @@ -654,27 +283,37 @@ def linear(w, b, x): attention_scores = attention_scores + rel_att # bxhxlxd - if self.talking_head: + if self.head_logits_proj is not None: attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) - attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1) + attention_mask = attention_mask.bool() + attention_scores = attention_scores.masked_fill(~(attention_mask), torch.finfo(query_layer.dtype).min) + # bsz x height x length x dimension + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + attention_probs.masked_fill(attention_mask, 0) + attention_probs = self.dropout(attention_probs) - if self.talking_head: + if self.head_weights_proj is not None: attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (-1,) context_layer = context_layer.view(new_context_layer_shape) - if output_attentions: - return (context_layer, attention_probs) - else: - return context_layer + if not output_attentions: + return (context_layer, None) + return (context_layer, attention_probs) - def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor): + def disentangled_att_bias( + self, + query_layer: torch.Tensor, + key_layer: torch.Tensor, + relative_pos: torch.Tensor, + rel_embeddings: torch.Tensor, + scale_factor: int, + ): if relative_pos is None: - q = query_layer.size(-2) - relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device) + relative_pos = build_relative_position(query_layer, key_layer, query_layer.device) if relative_pos.dim() == 2: relative_pos = relative_pos.unsqueeze(0).unsqueeze(0) elif relative_pos.dim() == 3: @@ -683,8 +322,8 @@ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embedd elif relative_pos.dim() != 4: raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}") - att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions) - relative_pos = relative_pos.long().to(query_layer.device) + att_span = compute_attention_span(query_layer, key_layer, self.max_relative_positions) + relative_pos = relative_pos.long() rel_embeddings = rel_embeddings[ self.max_relative_positions - att_span : self.max_relative_positions + att_span, : ].unsqueeze(0) @@ -704,20 +343,19 @@ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embedd if "p2c" in self.pos_att_type: pos_query_layer = self.pos_q_proj(rel_embeddings) pos_query_layer = self.transpose_for_scores(pos_query_layer) - pos_query_layer /= torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor) - if query_layer.size(-2) != key_layer.size(-2): - r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device) - else: - r_pos = relative_pos + pos_query_layer /= scaled_size_sqrt(pos_query_layer, scale_factor) + r_pos = build_rpos( + query_layer, + key_layer, + relative_pos, + ) p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1) p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2).to(dtype=key_layer.dtype)) p2c_att = torch.gather( p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer) ).transpose(-1, -2) - if query_layer.size(-2) != key_layer.size(-2): - pos_index = relative_pos[:, :, :, 0].unsqueeze(-1) - p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer)) + p2c_att = uneven_size_corrected(p2c_att, query_layer, key_layer, relative_pos) score += p2c_att return score @@ -732,71 +370,267 @@ def __init__(self, config): self.embedding_size = getattr(config, "embedding_size", config.hidden_size) self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id) - self.position_biased_input = getattr(config, "position_biased_input", True) - if not self.position_biased_input: - self.position_embeddings = None + self.position_biased_input = getattr(config, "position_biased_input", True) + if not self.position_biased_input: + self.position_embeddings = None + else: + self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size) + + if config.type_vocab_size > 0: + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size) + else: + self.token_type_embeddings = None + + if self.embedding_size != config.hidden_size: + self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False) + else: + self.embed_proj = None + + self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.config = config + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) + + def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + if self.position_embeddings is not None: + position_embeddings = self.position_embeddings(position_ids.long()) + else: + position_embeddings = torch.zeros_like(inputs_embeds) + + embeddings = inputs_embeds + if self.position_biased_input: + embeddings += position_embeddings + if self.token_type_embeddings is not None: + token_type_embeddings = self.token_type_embeddings(token_type_ids) + embeddings += token_type_embeddings + + if self.embed_proj is not None: + embeddings = self.embed_proj(embeddings) + + embeddings = self.LayerNorm(embeddings) + + if mask is not None: + if mask.dim() != embeddings.dim(): + if mask.dim() == 4: + mask = mask.squeeze(1).squeeze(1) + mask = mask.unsqueeze(2) + mask = mask.to(embeddings.dtype) + + embeddings = embeddings * mask + + embeddings = self.dropout(embeddings) + return embeddings + + +class DebertaAttention(nn.Module): + def __init__(self, config): + super().__init__() + self.self = DisentangledSelfAttention(config) + self.output = DebertaSelfOutput(config) + self.config = config + + def forward( + self, + hidden_states, + attention_mask, + output_attentions: bool = False, + query_states=None, + relative_pos=None, + rel_embeddings=None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + self_output, att_matrix = self.self( + hidden_states, + attention_mask, + output_attentions, + query_states=query_states, + relative_pos=relative_pos, + rel_embeddings=rel_embeddings, + ) + if query_states is None: + query_states = hidden_states + attention_output = self.output(self_output, query_states) + + if output_attentions: + return (attention_output, att_matrix) + else: + return (attention_output, None) + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta +class DebertaIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class DebertaOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.config = config + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class DebertaLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.attention = DebertaAttention(config) + self.intermediate = DebertaIntermediate(config) + self.output = DebertaOutput(config) + + def forward( + self, + hidden_states, + attention_mask, + query_states=None, + relative_pos=None, + rel_embeddings=None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + attention_output, att_matrix = self.attention( + hidden_states, + attention_mask, + output_attentions=output_attentions, + query_states=query_states, + relative_pos=relative_pos, + rel_embeddings=rel_embeddings, + ) + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + + if output_attentions: + return (layer_output, att_matrix) else: - self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size) + return (layer_output, None) - if config.type_vocab_size > 0: - self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size) - if self.embedding_size != config.hidden_size: - self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False) - self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.hidden_dropout_prob) - self.config = config +class DebertaEncoder(PreTrainedModel): + """Modified BertEncoder with relative position bias support""" - # position_ids (1, len position emb) is contiguous in memory and exported when serialized - self.register_buffer( - "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False - ) + def __init__(self, config): + super().__init__(config) + self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)]) + self.relative_attention = getattr(config, "relative_attention", False) + if self.relative_attention: + self.max_relative_positions = getattr(config, "max_relative_positions", -1) + if self.max_relative_positions < 1: + self.max_relative_positions = config.max_position_embeddings + self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size) + self.gradient_checkpointing = False - def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None): - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] + def get_rel_embedding(self): + rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None + return rel_embeddings - seq_length = input_shape[1] + def get_attention_mask(self, attention_mask): + if attention_mask.dim() <= 2: + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1) + elif attention_mask.dim() == 3: + attention_mask = attention_mask.unsqueeze(1) - if position_ids is None: - position_ids = self.position_ids[:, :seq_length] + return attention_mask - if token_type_ids is None: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None): + if self.relative_attention and relative_pos is None: + if query_states is not None: + relative_pos = build_relative_position(query_states, hidden_states) + else: + relative_pos = build_relative_position(hidden_states, hidden_states) + return relative_pos - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + output_hidden_states: bool = True, + output_attentions: bool = False, + query_states=None, + relative_pos=None, + return_dict: bool = True, + ): + attention_mask = self.get_attention_mask(attention_mask) + relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos) - if self.position_embeddings is not None: - position_embeddings = self.position_embeddings(position_ids.long()) - else: - position_embeddings = torch.zeros_like(inputs_embeds) + all_hidden_states: Optional[Tuple[torch.Tensor]] = (hidden_states,) if output_hidden_states else None + all_attentions = () if output_attentions else None - embeddings = inputs_embeds - if self.position_biased_input: - embeddings += position_embeddings - if self.config.type_vocab_size > 0: - token_type_embeddings = self.token_type_embeddings(token_type_ids) - embeddings += token_type_embeddings + next_kv = hidden_states - if self.embedding_size != self.config.hidden_size: - embeddings = self.embed_proj(embeddings) + rel_embeddings = self.get_rel_embedding() + for i, layer_module in enumerate(self.layer): + if self.gradient_checkpointing and self.training: + hidden_states, att_m = self._gradient_checkpointing_func( + layer_module.__call__, + next_kv, + attention_mask, + query_states, + relative_pos, + rel_embeddings, + output_attentions, + ) + else: + hidden_states, att_m = layer_module( + next_kv, + attention_mask, + query_states=query_states, + relative_pos=relative_pos, + rel_embeddings=rel_embeddings, + output_attentions=output_attentions, + ) - embeddings = self.LayerNorm(embeddings) + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) - if mask is not None: - if mask.dim() != embeddings.dim(): - if mask.dim() == 4: - mask = mask.squeeze(1).squeeze(1) - mask = mask.unsqueeze(2) - mask = mask.to(embeddings.dtype) + if query_states is not None: + query_states = hidden_states + else: + next_kv = hidden_states - embeddings = embeddings * mask + if output_attentions: + all_attentions = all_attentions + (att_m,) - embeddings = self.dropout(embeddings) - return embeddings + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions + ) class DebertaPreTrainedModel(PreTrainedModel): @@ -1000,25 +834,128 @@ def forward( ) +class LegacyDebertaPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.embedding_size = getattr(config, "embedding_size", config.hidden_size) + + self.dense = nn.Linear(config.hidden_size, self.embedding_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class LegacyDebertaLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = LegacyDebertaPredictionHeadTransform(config) + + self.embedding_size = getattr(config, "embedding_size", config.hidden_size) + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def _tie_weights(self): + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->LegacyDeberta +class LegacyDebertaOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = LegacyDebertaLMPredictionHead(config) + + def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class DebertaLMPredictionHead(nn.Module): + """https://github.com/microsoft/DeBERTa/blob/master/DeBERTa/deberta/bert.py#L270""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=True) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # note that the input embeddings must be passed as an argument + def forward(self, hidden_states, word_embeddings): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm( + hidden_states + ) # original used MaskedLayerNorm, but passed no mask. This is equivalent. + hidden_states = torch.matmul(hidden_states, word_embeddings.weight.t()) + self.bias + return hidden_states + + +class DebertaOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.lm_head = DebertaLMPredictionHead(config) + + # note that the input embeddings must be passed as an argument + def forward(self, sequence_output, word_embeddings): + prediction_scores = self.lm_head(sequence_output, word_embeddings) + return prediction_scores + + @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING) class DebertaForMaskedLM(DebertaPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) - + self.legacy = config.legacy self.deberta = DebertaModel(config) - self.cls = DebertaOnlyMLMHead(config) + if self.legacy: + self.cls = LegacyDebertaOnlyMLMHead(config) + else: + self._tied_weights_keys = ["lm_predictions.lm_head.weight", "deberta.embeddings.word_embeddings.weight"] + self.lm_predictions = DebertaOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): - return self.cls.predictions.decoder + if self.legacy: + return self.cls.predictions.decoder + else: + return self.lm_predictions.lm_head.dense def set_output_embeddings(self, new_embeddings): - self.cls.predictions.decoder = new_embeddings - self.cls.predictions.bias = new_embeddings.bias + if self.legacy: + self.cls.predictions.decoder = new_embeddings + self.cls.predictions.bias = new_embeddings.bias + else: + self.lm_predictions.lm_head.dense = new_embeddings + self.lm_predictions.lm_head.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1062,7 +999,10 @@ def forward( ) sequence_output = outputs[0] - prediction_scores = self.cls(sequence_output) + if self.legacy: + prediction_scores = self.cls(sequence_output) + else: + prediction_scores = self.lm_predictions(sequence_output, self.deberta.embeddings.word_embeddings) masked_lm_loss = None if labels is not None: @@ -1081,58 +1021,26 @@ def forward( ) -class DebertaPredictionHeadTransform(nn.Module): - def __init__(self, config): - super().__init__() - self.embedding_size = getattr(config, "embedding_size", config.hidden_size) - - self.dense = nn.Linear(config.hidden_size, self.embedding_size) - if isinstance(config.hidden_act, str): - self.transform_act_fn = ACT2FN[config.hidden_act] - else: - self.transform_act_fn = config.hidden_act - self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps) - - def forward(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.transform_act_fn(hidden_states) - hidden_states = self.LayerNorm(hidden_states) - return hidden_states - - -class DebertaLMPredictionHead(nn.Module): +class ContextPooler(nn.Module): def __init__(self, config): super().__init__() - self.transform = DebertaPredictionHeadTransform(config) - - self.embedding_size = getattr(config, "embedding_size", config.hidden_size) - # The output weights are the same as the input embeddings, but there is - # an output-only bias for each token. - self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False) - - self.bias = nn.Parameter(torch.zeros(config.vocab_size)) - - # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` - self.decoder.bias = self.bias - - def _tie_weights(self): - self.decoder.bias = self.bias + self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size) + self.dropout = nn.Dropout(config.pooler_dropout) + self.config = config def forward(self, hidden_states): - hidden_states = self.transform(hidden_states) - hidden_states = self.decoder(hidden_states) - return hidden_states - + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. -# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta -class DebertaOnlyMLMHead(nn.Module): - def __init__(self, config): - super().__init__() - self.predictions = DebertaLMPredictionHead(config) + context_token = hidden_states[:, 0] + context_token = self.dropout(context_token) + pooled_output = self.dense(context_token) + pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output) + return pooled_output - def forward(self, sequence_output): - prediction_scores = self.predictions(sequence_output) - return prediction_scores + @property + def output_dim(self): + return self.config.hidden_size @add_start_docstrings( @@ -1156,7 +1064,7 @@ def __init__(self, config): self.classifier = nn.Linear(output_dim, num_labels) drop_out = getattr(config, "cls_dropout", None) drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out - self.dropout = StableDropout(drop_out) + self.dropout = nn.Dropout(drop_out) # Initialize weights and apply final processing self.post_init() diff --git a/src/transformers/models/deberta_v2/configuration_deberta_v2.py b/src/transformers/models/deberta_v2/configuration_deberta_v2.py index 80ab0124117..cf3f61033c3 100644 --- a/src/transformers/models/deberta_v2/configuration_deberta_v2.py +++ b/src/transformers/models/deberta_v2/configuration_deberta_v2.py @@ -82,6 +82,9 @@ class DebertaV2Config(PretrainedConfig): `["p2c", "c2p"]`, `["p2c", "c2p"]`. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. + legacy (`bool`, *optional*, defaults to `True`): + Whether or not the model should use the legacy `LegacyDebertaOnlyMLMHead`, which does not work properly + for mask infilling tasks. Example: @@ -121,6 +124,7 @@ def __init__( pos_att_type=None, pooler_dropout=0, pooler_hidden_act="gelu", + legacy=True, **kwargs, ): super().__init__(**kwargs) @@ -151,6 +155,7 @@ def __init__( self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size) self.pooler_dropout = pooler_dropout self.pooler_hidden_act = pooler_hidden_act + self.legacy = legacy class DebertaV2OnnxConfig(OnnxConfig): diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index f47cb86ab52..6645c1de832 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -32,7 +32,6 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import softmax_backward_data from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_deberta_v2 import DebertaV2Config @@ -45,501 +44,23 @@ _QA_TARGET_END_INDEX = 9 -# Copied from transformers.models.deberta.modeling_deberta.ContextPooler -class ContextPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size) - self.dropout = StableDropout(config.pooler_dropout) - self.config = config - - def forward(self, hidden_states): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - - context_token = hidden_states[:, 0] - context_token = self.dropout(context_token) - pooled_output = self.dense(context_token) - pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output) - return pooled_output - - @property - def output_dim(self): - return self.config.hidden_size - - -# Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2 -class XSoftmax(torch.autograd.Function): - """ - Masked Softmax which is optimized for saving memory - - Args: - input (`torch.tensor`): The input tensor that will apply softmax. - mask (`torch.IntTensor`): - The mask matrix where 0 indicate that element will be ignored in the softmax calculation. - dim (int): The dimension that will apply softmax - - Example: - - ```python - >>> import torch - >>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax - - >>> # Make a tensor - >>> x = torch.randn([4, 20, 100]) - - >>> # Create a mask - >>> mask = (x > 0).int() - - >>> # Specify the dimension to apply softmax - >>> dim = -1 - - >>> y = XSoftmax.apply(x, mask, dim) - ```""" - - @staticmethod - def forward(ctx, input, mask, dim): - ctx.dim = dim - rmask = ~(mask.to(torch.bool)) - - output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min)) - output = torch.softmax(output, ctx.dim) - output.masked_fill_(rmask, 0) - ctx.save_for_backward(output) - return output - - @staticmethod - def backward(ctx, grad_output): - (output,) = ctx.saved_tensors - inputGrad = softmax_backward_data(ctx, grad_output, output, ctx.dim, output) - return inputGrad, None, None - - @staticmethod - def symbolic(g, self, mask, dim): - import torch.onnx.symbolic_helper as sym_help - from torch.onnx.symbolic_opset9 import masked_fill, softmax - - mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"]) - r_mask = g.op( - "Cast", - g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), - to_i=sym_help.cast_pytorch_to_onnx["Bool"], - ) - output = masked_fill( - g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min)) - ) - output = softmax(g, output, dim) - return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool))) - - -# Copied from transformers.models.deberta.modeling_deberta.DropoutContext -class DropoutContext: - def __init__(self): - self.dropout = 0 - self.mask = None - self.scale = 1 - self.reuse_mask = True - - -# Copied from transformers.models.deberta.modeling_deberta.get_mask -def get_mask(input, local_context): - if not isinstance(local_context, DropoutContext): - dropout = local_context - mask = None - else: - dropout = local_context.dropout - dropout *= local_context.scale - mask = local_context.mask if local_context.reuse_mask else None - - if dropout > 0 and mask is None: - mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool) - - if isinstance(local_context, DropoutContext): - if local_context.mask is None: - local_context.mask = mask - - return mask, dropout - - -# Copied from transformers.models.deberta.modeling_deberta.XDropout -class XDropout(torch.autograd.Function): - """Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" - - @staticmethod - def forward(ctx, input, local_ctx): - mask, dropout = get_mask(input, local_ctx) - ctx.scale = 1.0 / (1 - dropout) - if dropout > 0: - ctx.save_for_backward(mask) - return input.masked_fill(mask, 0) * ctx.scale - else: - return input - - @staticmethod - def backward(ctx, grad_output): - if ctx.scale > 1: - (mask,) = ctx.saved_tensors - return grad_output.masked_fill(mask, 0) * ctx.scale, None - else: - return grad_output, None - - @staticmethod - def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value: - from torch.onnx import symbolic_opset12 - - dropout_p = local_ctx - if isinstance(local_ctx, DropoutContext): - dropout_p = local_ctx.dropout - # StableDropout only calls this function when training. - train = True - # TODO: We should check if the opset_version being used to export - # is > 12 here, but there's no good way to do that. As-is, if the - # opset_version < 12, export will fail with a CheckerError. - # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like: - # if opset_version < 12: - # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train) - return symbolic_opset12.dropout(g, input, dropout_p, train) - - -# Copied from transformers.models.deberta.modeling_deberta.StableDropout -class StableDropout(nn.Module): - """ - Optimized dropout module for stabilizing the training - - Args: - drop_prob (float): the dropout probabilities - """ - - def __init__(self, drop_prob): - super().__init__() - self.drop_prob = drop_prob - self.count = 0 - self.context_stack = None - - def forward(self, x): - """ - Call the module - - Args: - x (`torch.tensor`): The input tensor to apply dropout - """ - if self.training and self.drop_prob > 0: - return XDropout.apply(x, self.get_context()) - return x - - def clear_context(self): - self.count = 0 - self.context_stack = None - - def init_context(self, reuse_mask=True, scale=1): - if self.context_stack is None: - self.context_stack = [] - self.count = 0 - for c in self.context_stack: - c.reuse_mask = reuse_mask - c.scale = scale - - def get_context(self): - if self.context_stack is not None: - if self.count >= len(self.context_stack): - self.context_stack.append(DropoutContext()) - ctx = self.context_stack[self.count] - ctx.dropout = self.drop_prob - self.count += 1 - return ctx - else: - return self.drop_prob - - -# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaLayerNorm->LayerNorm -class DebertaV2SelfOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->DebertaV2 -class DebertaV2Attention(nn.Module): - def __init__(self, config): - super().__init__() - self.self = DisentangledSelfAttention(config) - self.output = DebertaV2SelfOutput(config) - self.config = config - - def forward( - self, - hidden_states, - attention_mask, - output_attentions=False, - query_states=None, - relative_pos=None, - rel_embeddings=None, - ): - self_output = self.self( - hidden_states, - attention_mask, - output_attentions, - query_states=query_states, - relative_pos=relative_pos, - rel_embeddings=rel_embeddings, - ) - if output_attentions: - self_output, att_matrix = self_output - if query_states is None: - query_states = hidden_states - attention_output = self.output(self_output, query_states) - - if output_attentions: - return (attention_output, att_matrix) - else: - return attention_output - - -# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->DebertaV2 -class DebertaV2Intermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.intermediate_size) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm -class DebertaV2Output(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.hidden_dropout_prob) - self.config = config - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->DebertaV2 -class DebertaV2Layer(nn.Module): - def __init__(self, config): - super().__init__() - self.attention = DebertaV2Attention(config) - self.intermediate = DebertaV2Intermediate(config) - self.output = DebertaV2Output(config) - - def forward( - self, - hidden_states, - attention_mask, - query_states=None, - relative_pos=None, - rel_embeddings=None, - output_attentions=False, - ): - attention_output = self.attention( - hidden_states, - attention_mask, - output_attentions=output_attentions, - query_states=query_states, - relative_pos=relative_pos, - rel_embeddings=rel_embeddings, - ) - if output_attentions: - attention_output, att_matrix = attention_output - intermediate_output = self.intermediate(attention_output) - layer_output = self.output(intermediate_output, attention_output) - if output_attentions: - return (layer_output, att_matrix) - else: - return layer_output - - -class ConvLayer(nn.Module): - def __init__(self, config): - super().__init__() - kernel_size = getattr(config, "conv_kernel_size", 3) - groups = getattr(config, "conv_groups", 1) - self.conv_act = getattr(config, "conv_act", "tanh") - self.conv = nn.Conv1d( - config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups - ) - self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.hidden_dropout_prob) - self.config = config - - def forward(self, hidden_states, residual_states, input_mask): - out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous() - rmask = (1 - input_mask).bool() - out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0) - out = ACT2FN[self.conv_act](self.dropout(out)) - - layer_norm_input = residual_states + out - output = self.LayerNorm(layer_norm_input).to(layer_norm_input) - - if input_mask is None: - output_states = output - else: - if input_mask.dim() != layer_norm_input.dim(): - if input_mask.dim() == 4: - input_mask = input_mask.squeeze(1).squeeze(1) - input_mask = input_mask.unsqueeze(2) - - input_mask = input_mask.to(output.dtype) - output_states = output * input_mask - - return output_states - - -class DebertaV2Encoder(nn.Module): - """Modified BertEncoder with relative position bias support""" - - def __init__(self, config): - super().__init__() - - self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)]) - self.relative_attention = getattr(config, "relative_attention", False) - - if self.relative_attention: - self.max_relative_positions = getattr(config, "max_relative_positions", -1) - if self.max_relative_positions < 1: - self.max_relative_positions = config.max_position_embeddings - - self.position_buckets = getattr(config, "position_buckets", -1) - pos_ebd_size = self.max_relative_positions * 2 - - if self.position_buckets > 0: - pos_ebd_size = self.position_buckets * 2 - - self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size) - - self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")] - - if "layer_norm" in self.norm_rel_ebd: - self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True) - - self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None - self.gradient_checkpointing = False - - def get_rel_embedding(self): - rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None - if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd): - rel_embeddings = self.LayerNorm(rel_embeddings) - return rel_embeddings - - def get_attention_mask(self, attention_mask): - if attention_mask.dim() <= 2: - extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) - attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1) - elif attention_mask.dim() == 3: - attention_mask = attention_mask.unsqueeze(1) - - return attention_mask - - def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None): - if self.relative_attention and relative_pos is None: - q = query_states.size(-2) if query_states is not None else hidden_states.size(-2) - relative_pos = build_relative_position( - q, - hidden_states.size(-2), - bucket_size=self.position_buckets, - max_position=self.max_relative_positions, - device=hidden_states.device, - ) - return relative_pos - - def forward( - self, - hidden_states, - attention_mask, - output_hidden_states=True, - output_attentions=False, - query_states=None, - relative_pos=None, - return_dict=True, - ): - if attention_mask.dim() <= 2: - input_mask = attention_mask - else: - input_mask = attention_mask.sum(-2) > 0 - attention_mask = self.get_attention_mask(attention_mask) - relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos) - - all_hidden_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - if isinstance(hidden_states, Sequence): - next_kv = hidden_states[0] - else: - next_kv = hidden_states - rel_embeddings = self.get_rel_embedding() - output_states = next_kv - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (output_states,) - - if self.gradient_checkpointing and self.training: - output_states = self._gradient_checkpointing_func( - layer_module.__call__, - next_kv, - attention_mask, - query_states, - relative_pos, - rel_embeddings, - output_attentions, - ) - else: - output_states = layer_module( - next_kv, - attention_mask, - query_states=query_states, - relative_pos=relative_pos, - rel_embeddings=rel_embeddings, - output_attentions=output_attentions, - ) - - if output_attentions: - output_states, att_m = output_states - - if i == 0 and self.conv is not None: - output_states = self.conv(hidden_states, output_states, input_mask) - - if query_states is not None: - query_states = output_states - if isinstance(hidden_states, Sequence): - next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None - else: - next_kv = output_states - - if output_attentions: - all_attentions = all_attentions + (att_m,) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (output_states,) +# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaLayerNorm->LayerNorm +class DebertaV2SelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) - if not return_dict: - return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions - ) + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states -def make_log_bucket_position(relative_pos, bucket_size, max_position): +@torch.jit.script +def make_log_bucket_position(relative_pos, bucket_size: int, max_position: int): sign = torch.sign(relative_pos) mid = bucket_size // 2 abs_pos = torch.where( @@ -554,7 +75,7 @@ def make_log_bucket_position(relative_pos, bucket_size, max_position): return bucket_pos -def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1, device=None): +def build_relative_position(query_layer, key_layer, bucket_size: int = -1, max_position: int = -1): """ Build relative position according to the query and key @@ -572,9 +93,11 @@ def build_relative_position(query_size, key_size, bucket_size=-1, max_position=- Return: `torch.LongTensor`: A tensor with shape [1, query_size, key_size] """ + query_size = query_layer.size(-2) + key_size = key_layer.size(-2) - q_ids = torch.arange(0, query_size, device=device) - k_ids = torch.arange(0, key_size, device=device) + q_ids = torch.arange(query_size, dtype=torch.long, device=query_layer.device) + k_ids = torch.arange(key_size, dtype=torch.long, device=key_layer.device) rel_pos_ids = q_ids[:, None] - k_ids[None, :] if bucket_size > 0 and max_position > 0: rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position) @@ -602,6 +125,24 @@ def pos_dynamic_expand(pos_index, p2c_att, key_layer): return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2))) +@torch.jit.script +def scaled_size_sqrt(query_layer: torch.Tensor, scale_factor: int): + return torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) + + +@torch.jit.script +def build_rpos(query_layer, key_layer, relative_pos, position_buckets: int, max_relative_positions: int): + if key_layer.size(-2) != query_layer.size(-2): + return build_relative_position( + key_layer, + key_layer, + bucket_size=position_buckets, + max_position=max_relative_positions, + ) + else: + return relative_pos + + class DisentangledSelfAttention(nn.Module): """ Disentangled self-attention module @@ -641,7 +182,7 @@ def __init__(self, config): if self.position_buckets > 0: self.pos_ebd_size = self.position_buckets - self.pos_dropout = StableDropout(config.hidden_dropout_prob) + self.pos_dropout = nn.Dropout(config.hidden_dropout_prob) if not self.share_att_key: if "c2p" in self.pos_att_type: @@ -649,9 +190,9 @@ def __init__(self, config): if "p2c" in self.pos_att_type: self.pos_query_proj = nn.Linear(config.hidden_size, self.all_head_size) - self.dropout = StableDropout(config.attention_probs_dropout_prob) + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - def transpose_for_scores(self, x, attention_heads): + def transpose_for_scores(self, x, attention_heads) -> torch.Tensor: new_x_shape = x.size()[:-1] + (attention_heads, -1) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3).contiguous().view(-1, x.size(1), x.size(-1)) @@ -707,7 +248,7 @@ def forward( scale_factor += 1 if "p2c" in self.pos_att_type: scale_factor += 1 - scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) + scale = scaled_size_sqrt(query_layer, scale_factor) attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2) / scale.to(dtype=query_layer.dtype)) if self.relative_attention: rel_embeddings = self.pos_dropout(rel_embeddings) @@ -722,8 +263,12 @@ def forward( -1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1) ) + attention_mask = attention_mask.bool() + attention_scores = attention_scores.masked_fill(~(attention_mask), torch.finfo(query_layer.dtype).min) # bsz x height x length x dimension - attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1) + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + attention_probs.masked_fill(attention_mask, 0) + attention_probs = self.dropout(attention_probs) context_layer = torch.bmm( attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer @@ -735,20 +280,17 @@ def forward( ) new_context_layer_shape = context_layer.size()[:-2] + (-1,) context_layer = context_layer.view(new_context_layer_shape) - if output_attentions: - return (context_layer, attention_probs) - else: - return context_layer + if not output_attentions: + return (context_layer, None) + return (context_layer, attention_probs) def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor): if relative_pos is None: - q = query_layer.size(-2) relative_pos = build_relative_position( - q, - key_layer.size(-2), + query_layer, + key_layer, bucket_size=self.position_buckets, max_position=self.max_relative_positions, - device=query_layer.device, ) if relative_pos.dim() == 2: relative_pos = relative_pos.unsqueeze(0).unsqueeze(0) @@ -782,7 +324,7 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ score = 0 # content->position if "c2p" in self.pos_att_type: - scale = torch.sqrt(torch.tensor(pos_key_layer.size(-1), dtype=torch.float) * scale_factor) + scale = scaled_size_sqrt(pos_key_layer, scale_factor) c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2)) c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1) c2p_att = torch.gather( @@ -794,19 +336,14 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ # position->content if "p2c" in self.pos_att_type: - scale = torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor) - if key_layer.size(-2) != query_layer.size(-2): - r_pos = build_relative_position( - key_layer.size(-2), - key_layer.size(-2), - bucket_size=self.position_buckets, - max_position=self.max_relative_positions, - device=query_layer.device, - ) - r_pos = r_pos.unsqueeze(0) - else: - r_pos = relative_pos - + scale = scaled_size_sqrt(pos_query_layer, scale_factor) + r_pos = build_rpos( + query_layer, + key_layer, + relative_pos, + self.max_relative_positions, + self.position_buckets, + ) p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1) p2c_att = torch.bmm(key_layer, pos_query_layer.transpose(-1, -2)) p2c_att = torch.gather( @@ -819,7 +356,144 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ return score -# Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm +# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->DebertaV2 +class DebertaV2Attention(nn.Module): + def __init__(self, config): + super().__init__() + self.self = DisentangledSelfAttention(config) + self.output = DebertaV2SelfOutput(config) + self.config = config + + def forward( + self, + hidden_states, + attention_mask, + output_attentions: bool = False, + query_states=None, + relative_pos=None, + rel_embeddings=None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + self_output, att_matrix = self.self( + hidden_states, + attention_mask, + output_attentions, + query_states=query_states, + relative_pos=relative_pos, + rel_embeddings=rel_embeddings, + ) + if query_states is None: + query_states = hidden_states + attention_output = self.output(self_output, query_states) + + if output_attentions: + return (attention_output, att_matrix) + else: + return (attention_output, None) + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->DebertaV2 +class DebertaV2Intermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm +class DebertaV2Output(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.config = config + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->DebertaV2 +class DebertaV2Layer(nn.Module): + def __init__(self, config): + super().__init__() + self.attention = DebertaV2Attention(config) + self.intermediate = DebertaV2Intermediate(config) + self.output = DebertaV2Output(config) + + def forward( + self, + hidden_states, + attention_mask, + query_states=None, + relative_pos=None, + rel_embeddings=None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + attention_output, att_matrix = self.attention( + hidden_states, + attention_mask, + output_attentions=output_attentions, + query_states=query_states, + relative_pos=relative_pos, + rel_embeddings=rel_embeddings, + ) + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + + if output_attentions: + return (layer_output, att_matrix) + else: + return (layer_output, None) + + +class ConvLayer(nn.Module): + def __init__(self, config): + super().__init__() + kernel_size = getattr(config, "conv_kernel_size", 3) + groups = getattr(config, "conv_groups", 1) + self.conv_act = getattr(config, "conv_act", "tanh") + self.conv = nn.Conv1d( + config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups + ) + self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.config = config + + def forward(self, hidden_states, residual_states, input_mask): + out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous() + rmask = (1 - input_mask).bool() + out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0) + out = ACT2FN[self.conv_act](self.dropout(out)) + + layer_norm_input = residual_states + out + output = self.LayerNorm(layer_norm_input).to(layer_norm_input) + + if input_mask is None: + output_states = output + else: + if input_mask.dim() != layer_norm_input.dim(): + if input_mask.dim() == 4: + input_mask = input_mask.squeeze(1).squeeze(1) + input_mask = input_mask.unsqueeze(2) + + input_mask = input_mask.to(output.dtype) + output_states = output * input_mask + + return output_states + + +# Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm,Deberta->DebertaV2 class DebertaV2Embeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" @@ -837,63 +511,197 @@ def __init__(self, config): if config.type_vocab_size > 0: self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size) + else: + self.token_type_embeddings = None if self.embedding_size != config.hidden_size: self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False) + else: + self.embed_proj = None + self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.hidden_dropout_prob) + self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config - # position_ids (1, len position emb) is contiguous in memory and exported when serialized - self.register_buffer( - "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False - ) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) + + def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + if self.position_embeddings is not None: + position_embeddings = self.position_embeddings(position_ids.long()) + else: + position_embeddings = torch.zeros_like(inputs_embeds) + + embeddings = inputs_embeds + if self.position_biased_input: + embeddings += position_embeddings + if self.token_type_embeddings is not None: + token_type_embeddings = self.token_type_embeddings(token_type_ids) + embeddings += token_type_embeddings + + if self.embed_proj is not None: + embeddings = self.embed_proj(embeddings) + + embeddings = self.LayerNorm(embeddings) + + if mask is not None: + if mask.dim() != embeddings.dim(): + if mask.dim() == 4: + mask = mask.squeeze(1).squeeze(1) + mask = mask.unsqueeze(2) + mask = mask.to(embeddings.dtype) + + embeddings = embeddings * mask + + embeddings = self.dropout(embeddings) + return embeddings + + +class DebertaV2Encoder(nn.Module): + """Modified BertEncoder with relative position bias support""" + + def __init__(self, config): + super().__init__() + + self.layer = nn.ModuleList([DebertaV2Layer(config) for _ in range(config.num_hidden_layers)]) + self.relative_attention = getattr(config, "relative_attention", False) + + if self.relative_attention: + self.max_relative_positions = getattr(config, "max_relative_positions", -1) + if self.max_relative_positions < 1: + self.max_relative_positions = config.max_position_embeddings + + self.position_buckets = getattr(config, "position_buckets", -1) + pos_ebd_size = self.max_relative_positions * 2 + + if self.position_buckets > 0: + pos_ebd_size = self.position_buckets * 2 + + self.rel_embeddings = nn.Embedding(pos_ebd_size, config.hidden_size) + + self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")] + + if "layer_norm" in self.norm_rel_ebd: + self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps, elementwise_affine=True) - def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None): - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] + self.conv = ConvLayer(config) if getattr(config, "conv_kernel_size", 0) > 0 else None + self.gradient_checkpointing = False - seq_length = input_shape[1] + def get_rel_embedding(self): + rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None + if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd): + rel_embeddings = self.LayerNorm(rel_embeddings) + return rel_embeddings - if position_ids is None: - position_ids = self.position_ids[:, :seq_length] + def get_attention_mask(self, attention_mask): + if attention_mask.dim() <= 2: + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1) + elif attention_mask.dim() == 3: + attention_mask = attention_mask.unsqueeze(1) - if token_type_ids is None: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + return attention_mask - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) + def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None): + if self.relative_attention and relative_pos is None: + if query_states is not None: + relative_pos = build_relative_position( + query_states, + hidden_states, + bucket_size=self.position_buckets, + max_position=self.max_relative_positions, + ) + else: + relative_pos = build_relative_position( + hidden_states, + hidden_states, + bucket_size=self.position_buckets, + max_position=self.max_relative_positions, + ) + return relative_pos - if self.position_embeddings is not None: - position_embeddings = self.position_embeddings(position_ids.long()) + def forward( + self, + hidden_states, + attention_mask, + output_hidden_states=True, + output_attentions=False, + query_states=None, + relative_pos=None, + return_dict=True, + ): + if attention_mask.dim() <= 2: + input_mask = attention_mask else: - position_embeddings = torch.zeros_like(inputs_embeds) + input_mask = attention_mask.sum(-2) > 0 + attention_mask = self.get_attention_mask(attention_mask) + relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos) - embeddings = inputs_embeds - if self.position_biased_input: - embeddings += position_embeddings - if self.config.type_vocab_size > 0: - token_type_embeddings = self.token_type_embeddings(token_type_ids) - embeddings += token_type_embeddings + all_hidden_states: Optional[Tuple[torch.Tensor]] = (hidden_states,) if output_hidden_states else None + all_attentions = () if output_attentions else None - if self.embedding_size != self.config.hidden_size: - embeddings = self.embed_proj(embeddings) + next_kv = hidden_states + rel_embeddings = self.get_rel_embedding() + for i, layer_module in enumerate(self.layer): + if self.gradient_checkpointing and self.training: + output_states, attn_weights = self._gradient_checkpointing_func( + layer_module.__call__, + next_kv, + attention_mask, + query_states, + relative_pos, + rel_embeddings, + output_attentions, + ) + else: + output_states, attn_weights = layer_module( + next_kv, + attention_mask, + query_states=query_states, + relative_pos=relative_pos, + rel_embeddings=rel_embeddings, + output_attentions=output_attentions, + ) - embeddings = self.LayerNorm(embeddings) + if output_attentions: + all_attentions = all_attentions + (attn_weights,) - if mask is not None: - if mask.dim() != embeddings.dim(): - if mask.dim() == 4: - mask = mask.squeeze(1).squeeze(1) - mask = mask.unsqueeze(2) - mask = mask.to(embeddings.dtype) + if i == 0 and self.conv is not None: + output_states = self.conv(hidden_states, output_states, input_mask) - embeddings = embeddings * mask + if output_hidden_states: + all_hidden_states = all_hidden_states + (output_states,) - embeddings = self.dropout(embeddings) - return embeddings + if query_states is not None: + query_states = output_states + if isinstance(hidden_states, Sequence): + next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None + else: + next_kv = output_states + + if not return_dict: + return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions + ) # Copied from transformers.models.deberta.modeling_deberta.DebertaPreTrainedModel with Deberta->DebertaV2 @@ -1099,25 +907,126 @@ def forward( ) +# Copied from transformers.models.deberta.modeling_deberta.LegacyDebertaPredictionHeadTransform with Deberta->DebertaV2 +class LegacyDebertaV2PredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.embedding_size = getattr(config, "embedding_size", config.hidden_size) + + self.dense = nn.Linear(config.hidden_size, self.embedding_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class LegacyDebertaV2LMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = LegacyDebertaV2PredictionHeadTransform(config) + + self.embedding_size = getattr(config, "embedding_size", config.hidden_size) + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def _tie_weights(self): + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class LegacyDebertaV2OnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = LegacyDebertaV2LMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class DebertaV2LMPredictionHead(nn.Module): + """https://github.com/microsoft/DeBERTa/blob/master/DeBERTa/deberta/bert.py#L270""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps, elementwise_affine=True) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # note that the input embeddings must be passed as an argument + def forward(self, hidden_states, word_embeddings): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + hidden_states = torch.matmul(hidden_states, word_embeddings.weight.t()) + self.bias + return hidden_states + + +class DebertaV2OnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.lm_head = DebertaV2LMPredictionHead(config) + + # note that the input embeddings must be passed as an argument + def forward(self, sequence_output, word_embeddings): + prediction_scores = self.lm_head(sequence_output, word_embeddings) + return prediction_scores + + @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING) class DebertaV2ForMaskedLM(DebertaV2PreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] + _keys_to_ignore_on_load_unexpected = r"mask_predictions.*" def __init__(self, config): super().__init__(config) - + self.legacy = config.legacy self.deberta = DebertaV2Model(config) - self.cls = DebertaV2OnlyMLMHead(config) - + if self.legacy: + self.cls = LegacyDebertaV2OnlyMLMHead(config) + else: + self._tied_weights_keys = ["lm_predictions.lm_head.weight", "deberta.embeddings.word_embeddings.weight"] + self.lm_predictions = DebertaV2OnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): - return self.cls.predictions.decoder + if self.legacy: + return self.cls.predictions.decoder + else: + return self.lm_predictions.lm_head.dense def set_output_embeddings(self, new_embeddings): - self.cls.predictions.decoder = new_embeddings - self.cls.predictions.bias = new_embeddings.bias + if self.legacy: + self.cls.predictions.decoder = new_embeddings + self.cls.predictions.bias = new_embeddings.bias + else: + self.lm_predictions.lm_head.dense = new_embeddings + self.lm_predictions.lm_head.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( @@ -1160,7 +1069,10 @@ def forward( ) sequence_output = outputs[0] - prediction_scores = self.cls(sequence_output) + if self.legacy: + prediction_scores = self.cls(sequence_output) + else: + prediction_scores = self.lm_predictions(sequence_output, self.deberta.embeddings.word_embeddings) masked_lm_loss = None if labels is not None: @@ -1179,60 +1091,27 @@ def forward( ) -# Copied from transformers.models.deberta.modeling_deberta.DebertaPredictionHeadTransform with Deberta->DebertaV2 -class DebertaV2PredictionHeadTransform(nn.Module): - def __init__(self, config): - super().__init__() - self.embedding_size = getattr(config, "embedding_size", config.hidden_size) - - self.dense = nn.Linear(config.hidden_size, self.embedding_size) - if isinstance(config.hidden_act, str): - self.transform_act_fn = ACT2FN[config.hidden_act] - else: - self.transform_act_fn = config.hidden_act - self.LayerNorm = nn.LayerNorm(self.embedding_size, eps=config.layer_norm_eps) - - def forward(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.transform_act_fn(hidden_states) - hidden_states = self.LayerNorm(hidden_states) - return hidden_states - - -# Copied from transformers.models.deberta.modeling_deberta.DebertaLMPredictionHead with Deberta->DebertaV2 -class DebertaV2LMPredictionHead(nn.Module): +# Copied from transformers.models.deberta.modeling_deberta.ContextPooler +class ContextPooler(nn.Module): def __init__(self, config): super().__init__() - self.transform = DebertaV2PredictionHeadTransform(config) - - self.embedding_size = getattr(config, "embedding_size", config.hidden_size) - # The output weights are the same as the input embeddings, but there is - # an output-only bias for each token. - self.decoder = nn.Linear(self.embedding_size, config.vocab_size, bias=False) - - self.bias = nn.Parameter(torch.zeros(config.vocab_size)) - - # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` - self.decoder.bias = self.bias - - def _tie_weights(self): - self.decoder.bias = self.bias + self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size) + self.dropout = nn.Dropout(config.pooler_dropout) + self.config = config def forward(self, hidden_states): - hidden_states = self.transform(hidden_states) - hidden_states = self.decoder(hidden_states) - return hidden_states - + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. -# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta -class DebertaV2OnlyMLMHead(nn.Module): - def __init__(self, config): - super().__init__() - self.predictions = DebertaV2LMPredictionHead(config) + context_token = hidden_states[:, 0] + context_token = self.dropout(context_token) + pooled_output = self.dense(context_token) + pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output) + return pooled_output - def forward(self, sequence_output): - prediction_scores = self.predictions(sequence_output) - return prediction_scores + @property + def output_dim(self): + return self.config.hidden_size @add_start_docstrings( @@ -1256,7 +1135,7 @@ def __init__(self, config): self.classifier = nn.Linear(output_dim, num_labels) drop_out = getattr(config, "cls_dropout", None) drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out - self.dropout = StableDropout(drop_out) + self.dropout = nn.Dropout(drop_out) # Initialize weights and apply final processing self.post_init() @@ -1549,7 +1428,7 @@ def __init__(self, config): self.classifier = nn.Linear(output_dim, 1) drop_out = getattr(config, "cls_dropout", None) drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out - self.dropout = StableDropout(drop_out) + self.dropout = nn.Dropout(drop_out) self.init_weights() diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index 7f3db54defc..5cccc0218e6 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -176,7 +176,6 @@ def compute_num_masked_span(input_length): return spec_aug_mask -# Copied from transformers.models.deberta_v2.modeling_deberta_v2.make_log_bucket_position def make_log_bucket_position(relative_pos, bucket_size, max_position): sign = torch.sign(relative_pos) mid = bucket_size // 2 @@ -192,7 +191,6 @@ def make_log_bucket_position(relative_pos, bucket_size, max_position): return bucket_pos -# Copied from transformers.models.deberta_v2.modeling_deberta_v2.build_relative_position def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1, device=None): """ Build relative position according to the query and key @@ -241,7 +239,6 @@ def pos_dynamic_expand(pos_index, p2c_att, key_layer): return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2))) -# Copied from transformers.models.deberta.modeling_deberta.get_mask def get_mask(input, local_context): if not isinstance(local_context, DropoutContext): dropout = local_context @@ -471,7 +468,6 @@ def __init__(self, config): ) -# Copied from transformers.models.deberta.modeling_deberta.ContextPooler class ContextPooler(nn.Module): def __init__(self, config): super().__init__() @@ -494,7 +490,6 @@ def output_dim(self): return self.config.hidden_size -# Copied from transformers.models.deberta.modeling_deberta.XSoftmax with deberta->deberta_v2 class XSoftmax(torch.autograd.Function): """ Masked Softmax which is optimized for saving memory @@ -558,7 +553,6 @@ def symbolic(g, self, mask, dim): return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool))) -# Copied from transformers.models.deberta.modeling_deberta.DropoutContext class DropoutContext: def __init__(self): self.dropout = 0 @@ -567,7 +561,6 @@ def __init__(self): self.reuse_mask = True -# Copied from transformers.models.deberta.modeling_deberta.XDropout class XDropout(torch.autograd.Function): """Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" @@ -607,7 +600,6 @@ def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, D return symbolic_opset12.dropout(g, input, dropout_p, train) -# Copied from transformers.models.deberta.modeling_deberta.StableDropout class StableDropout(nn.Module): """ Optimized dropout module for stabilizing the training @@ -657,13 +649,12 @@ def get_context(self): return self.drop_prob -# Copied from transformers.models.deberta.modeling_deberta.DebertaSelfOutput with DebertaV2->SEWD, DebertaLayerNorm->LayerNorm, hidden_dropout_prob->activation_dropout class SEWDSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.activation_dropout) + self.dropout = nn.Dropout(config.activation_dropout) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) @@ -672,7 +663,6 @@ def forward(self, hidden_states, input_tensor): return hidden_states -# Copied from transformers.models.deberta_v2.modeling_deberta_v2.DisentangledSelfAttention with attention_probs_dropout_prob->attention_dropout, hidden_dropout_prob->activation_dropout class DisentangledSelfAttention(nn.Module): """ Disentangled self-attention module @@ -890,7 +880,6 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ return score -# Copied from transformers.models.deberta.modeling_deberta.DebertaAttention with Deberta->SEWD class SEWDAttention(nn.Module): def __init__(self, config): super().__init__() @@ -943,13 +932,12 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return hidden_states -# Copied from transformers.models.deberta.modeling_deberta.DebertaOutput with DebertaLayerNorm->LayerNorm, hidden_dropout_prob->activation_dropout class SEWDOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) - self.dropout = StableDropout(config.activation_dropout) + self.dropout = nn.Dropout(config.activation_dropout) self.config = config def forward(self, hidden_states, input_tensor): @@ -959,7 +947,6 @@ def forward(self, hidden_states, input_tensor): return hidden_states -# Copied from transformers.models.deberta.modeling_deberta.DebertaLayer with Deberta->SEWD class SEWDLayer(nn.Module): def __init__(self, config): super().__init__() @@ -994,7 +981,6 @@ def forward( return layer_output -# Copied from transformers.models.deberta_v2.modeling_deberta_v2.ConvLayer class ConvLayer(nn.Module): def __init__(self, config): super().__init__() @@ -1031,7 +1017,6 @@ def forward(self, hidden_states, residual_states, input_mask): return output_states -# Copied from transformers.models.deberta_v2.modeling_deberta_v2.DebertaV2Encoder with DebertaV2->SEWD class SEWDTransformerEncoder(nn.Module): """Modified BertEncoder with relative position bias support""" diff --git a/tests/models/deberta/test_modeling_deberta.py b/tests/models/deberta/test_modeling_deberta.py index 4b6f570e9ea..48d8cb67e34 100644 --- a/tests/models/deberta/test_modeling_deberta.py +++ b/tests/models/deberta/test_modeling_deberta.py @@ -277,6 +277,18 @@ def test_model_from_pretrained(self): model = DebertaModel.from_pretrained(model_name) self.assertIsNotNone(model) + @unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker") + def test_torch_fx_output_loss(self): + pass + + @unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker") + def test_torch_fx(self): + pass + + @unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker") + def test_pt_tf_model_equivalence(self): + pass + @require_torch @require_sentencepiece diff --git a/tests/models/deberta/test_modeling_tf_deberta.py b/tests/models/deberta/test_modeling_tf_deberta.py index 14a99ea947e..003c1a9240b 100644 --- a/tests/models/deberta/test_modeling_tf_deberta.py +++ b/tests/models/deberta/test_modeling_tf_deberta.py @@ -270,6 +270,10 @@ def test_model_from_pretrained(self): model = TFDebertaModel.from_pretrained("kamalkraj/deberta-base") self.assertIsNotNone(model) + @unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker") + def test_pt_tf_model_equivalence(self): + pass + @require_tf class TFDeBERTaModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/deberta_v2/test_modeling_deberta_v2.py b/tests/models/deberta_v2/test_modeling_deberta_v2.py index 0a9256aaf72..ea26043248d 100644 --- a/tests/models/deberta_v2/test_modeling_deberta_v2.py +++ b/tests/models/deberta_v2/test_modeling_deberta_v2.py @@ -295,6 +295,18 @@ def test_model_from_pretrained(self): model = DebertaV2Model.from_pretrained(model_name) self.assertIsNotNone(model) + @unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker") + def test_torch_fx_output_loss(self): + pass + + @unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker") + def test_torch_fx(self): + pass + + @unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker") + def test_pt_tf_model_equivalence(self): + pass + @require_torch @require_sentencepiece diff --git a/tests/models/deberta_v2/test_modeling_tf_deberta_v2.py b/tests/models/deberta_v2/test_modeling_tf_deberta_v2.py index b46f68525d3..4f2a5bffd07 100644 --- a/tests/models/deberta_v2/test_modeling_tf_deberta_v2.py +++ b/tests/models/deberta_v2/test_modeling_tf_deberta_v2.py @@ -290,6 +290,10 @@ def test_model_from_pretrained(self): model = TFDebertaV2Model.from_pretrained("kamalkraj/deberta-v2-xlarge") self.assertIsNotNone(model) + @unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker") + def test_pt_tf_model_equivalence(self): + pass + @require_tf class TFDeBERTaV2ModelIntegrationTest(unittest.TestCase): diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index fe06e223586..f3f326a4ce8 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -2539,7 +2539,11 @@ def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, nam tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) - self.assertLessEqual(max_diff, tol, f"{name}: Difference between PyTorch and TF is {max_diff} (>= {tol}).") + self.assertLessEqual( + max_diff, + tol, + f"{name}: Difference between PyTorch and TF is {max_diff} (>= {tol}) for {model_class.__name__}", + ) else: raise ValueError( "`tf_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `tf.Tensor`. Got" @@ -2615,7 +2619,7 @@ def test_pt_tf_model_equivalence(self, allow_missing_keys=False): tf_model_class = getattr(transformers, tf_model_class_name) - pt_model = model_class(config) + pt_model = model_class(config).eval() tf_model = tf_model_class(config) pt_inputs_dict = self._prepare_for_class(inputs_dict, model_class) From 1e492afd6169a32b9ccd0659a1321fe00f1df155 Mon Sep 17 00:00:00 2001 From: Raushan Turganbay Date: Mon, 25 Nov 2024 11:20:20 +0100 Subject: [PATCH 29/36] =?UTF-8?q?=F0=9F=94=B4=20Mllama:=20fix=20base=20pre?= =?UTF-8?q?fix=20(#34874)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix base prefix --- .../models/mllama/modeling_mllama.py | 249 +++++++++--------- 1 file changed, 125 insertions(+), 124 deletions(-) diff --git a/src/transformers/models/mllama/modeling_mllama.py b/src/transformers/models/mllama/modeling_mllama.py index 3ce5d0b7aa0..4a1e4aff081 100644 --- a/src/transformers/models/mllama/modeling_mllama.py +++ b/src/transformers/models/mllama/modeling_mllama.py @@ -1065,6 +1065,129 @@ def _init_weights(self, module): nn.init.normal_(module.gate_attn.data, std=std) nn.init.normal_(module.gate_ffn.data, std=std) + # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask + def _update_causal_mask( + self, + attention_mask: torch.Tensor, + input_tensor: torch.Tensor, + cache_position: torch.Tensor, + past_key_values: Cache, + output_attentions: bool, + ): + if self.config._attn_implementation == "flash_attention_2": + if attention_mask is not None and 0.0 in attention_mask: + return attention_mask + return None + + # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in + # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail + # to infer the attention mask. + past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 + using_static_cache = isinstance(past_key_values, StaticCache) + + # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward + if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: + if AttentionMaskConverter._ignore_causal_mask_sdpa( + attention_mask, + inputs_embeds=input_tensor, + past_key_values_length=past_seen_tokens, + is_training=self.training, + ): + return None + + dtype, device = input_tensor.dtype, input_tensor.device + sequence_length = input_tensor.shape[1] + if using_static_cache: + target_length = past_key_values.get_max_cache_shape() + else: + target_length = ( + attention_mask.shape[-1] + if isinstance(attention_mask, torch.Tensor) + else past_seen_tokens + sequence_length + 1 + ) + + # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). + causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( + attention_mask, + sequence_length=sequence_length, + target_length=target_length, + dtype=dtype, + device=device, + cache_position=cache_position, + batch_size=input_tensor.shape[0], + ) + + if ( + self.config._attn_implementation == "sdpa" + and attention_mask is not None + and attention_mask.device.type == "cuda" + and not output_attentions + ): + # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when + # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. + # Details: https://github.com/pytorch/pytorch/issues/110213 + min_dtype = torch.finfo(dtype).min + causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) + + return causal_mask + + @staticmethod + # Copied from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position + def _prepare_4d_causal_attention_mask_with_cache_position( + attention_mask: torch.Tensor, + sequence_length: int, + target_length: int, + dtype: torch.dtype, + device: torch.device, + cache_position: torch.Tensor, + batch_size: int, + **kwargs, + ): + """ + Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape + `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. + + Args: + attention_mask (`torch.Tensor`): + A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape + `(batch_size, 1, query_length, key_value_length)`. + sequence_length (`int`): + The sequence length being processed. + target_length (`int`): + The target length: when generating with static cache, the mask should be as long as the static cache, + to account for the 0 padding, the part of the cache that is not filled yet. + dtype (`torch.dtype`): + The dtype to use for the 4D attention mask. + device (`torch.device`): + The device to plcae the 4D attention mask on. + cache_position (`torch.Tensor`): + Indices depicting the position of the input sequence tokens in the sequence. + batch_size (`torch.Tensor`): + Batch size. + """ + if attention_mask is not None and attention_mask.dim() == 4: + # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. + causal_mask = attention_mask + else: + min_dtype = torch.finfo(dtype).min + causal_mask = torch.full( + (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device + ) + if sequence_length != 1: + causal_mask = torch.triu(causal_mask, diagonal=1) + causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) + causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) + if attention_mask is not None: + causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit + mask_length = attention_mask.shape[-1] + padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] + padding_mask = padding_mask == 0 + causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( + padding_mask, min_dtype + ) + + return causal_mask + MLLAMA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the @@ -1711,129 +1834,6 @@ def forward( attentions=all_self_attns, ) - # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask - def _update_causal_mask( - self, - attention_mask: torch.Tensor, - input_tensor: torch.Tensor, - cache_position: torch.Tensor, - past_key_values: Cache, - output_attentions: bool, - ): - if self.config._attn_implementation == "flash_attention_2": - if attention_mask is not None and 0.0 in attention_mask: - return attention_mask - return None - - # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in - # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail - # to infer the attention mask. - past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 - using_static_cache = isinstance(past_key_values, StaticCache) - - # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward - if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: - if AttentionMaskConverter._ignore_causal_mask_sdpa( - attention_mask, - inputs_embeds=input_tensor, - past_key_values_length=past_seen_tokens, - is_training=self.training, - ): - return None - - dtype, device = input_tensor.dtype, input_tensor.device - sequence_length = input_tensor.shape[1] - if using_static_cache: - target_length = past_key_values.get_max_cache_shape() - else: - target_length = ( - attention_mask.shape[-1] - if isinstance(attention_mask, torch.Tensor) - else past_seen_tokens + sequence_length + 1 - ) - - # In case the provided `attention` mask is 2D, we generate a causal mask here (4D). - causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position( - attention_mask, - sequence_length=sequence_length, - target_length=target_length, - dtype=dtype, - device=device, - cache_position=cache_position, - batch_size=input_tensor.shape[0], - ) - - if ( - self.config._attn_implementation == "sdpa" - and attention_mask is not None - and attention_mask.device.type == "cuda" - and not output_attentions - ): - # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when - # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. - # Details: https://github.com/pytorch/pytorch/issues/110213 - min_dtype = torch.finfo(dtype).min - causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) - - return causal_mask - - @staticmethod - # Copied from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position - def _prepare_4d_causal_attention_mask_with_cache_position( - attention_mask: torch.Tensor, - sequence_length: int, - target_length: int, - dtype: torch.dtype, - device: torch.device, - cache_position: torch.Tensor, - batch_size: int, - **kwargs, - ): - """ - Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape - `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. - - Args: - attention_mask (`torch.Tensor`): - A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape - `(batch_size, 1, query_length, key_value_length)`. - sequence_length (`int`): - The sequence length being processed. - target_length (`int`): - The target length: when generating with static cache, the mask should be as long as the static cache, - to account for the 0 padding, the part of the cache that is not filled yet. - dtype (`torch.dtype`): - The dtype to use for the 4D attention mask. - device (`torch.device`): - The device to plcae the 4D attention mask on. - cache_position (`torch.Tensor`): - Indices depicting the position of the input sequence tokens in the sequence. - batch_size (`torch.Tensor`): - Batch size. - """ - if attention_mask is not None and attention_mask.dim() == 4: - # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing. - causal_mask = attention_mask - else: - min_dtype = torch.finfo(dtype).min - causal_mask = torch.full( - (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device - ) - if sequence_length != 1: - causal_mask = torch.triu(causal_mask, diagonal=1) - causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) - causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) - if attention_mask is not None: - causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit - mask_length = attention_mask.shape[-1] - padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] - padding_mask = padding_mask == 0 - causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( - padding_mask, min_dtype - ) - - return causal_mask - @add_start_docstrings( """The Mllama Text Model with a language modeling head on top.""", @@ -1841,7 +1841,8 @@ def _prepare_4d_causal_attention_mask_with_cache_position( ) class MllamaForCausalLM(MllamaPreTrainedModel, GenerationMixin): config_class = MllamaTextConfig - base_model_prefix = "model" + _supports_static_cache = True # only the LLM without cross attn can do compile + base_model_prefix = "language_model" _tied_weights_keys = ["lm_head.weight"] def __init__(self, config): From 4dc1a69349c02bf1c39497e2bcd0c2ac1d80b285 Mon Sep 17 00:00:00 2001 From: kang sheng Date: Mon, 25 Nov 2024 18:27:13 +0800 Subject: [PATCH 30/36] Sum gathered input tokens (#34554) * sum gathered input tokens * ruff line-length is 119, format the code --------- Co-authored-by: kangsheng --- src/transformers/trainer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 46add00b018..ed45624983a 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -2489,7 +2489,9 @@ def _inner_training_loop( else: input_tokens = inputs[main_input_name].numel() input_tokens = torch.tensor(input_tokens, device=self.args.device, dtype=torch.int64) - self.state.num_input_tokens_seen += self.accelerator.gather(input_tokens).cpu().item() + self.state.num_input_tokens_seen += ( + self.accelerator.gather(input_tokens).sum().cpu().item() + ) if rng_to_sync: self._load_rng_state(resume_from_checkpoint) rng_to_sync = False From a0f4f3174f4aee87dd88ffda95579f7450934fc8 Mon Sep 17 00:00:00 2001 From: VictorAtIfInsurance <143422373+VictorAtIfInsurance@users.noreply.github.com> Date: Mon, 25 Nov 2024 11:36:44 +0100 Subject: [PATCH 31/36] allow unused input parameters passthrough when chunking in asr pipelines (#33889) * allow unused parameter passthrough when chunking in asr pipelines * format code * format * run fixup * update tests * update parameters to pipline in test * updates parametrs in tests * change spelling in gitignore * revert .gitignore to main * add git ignore of devcontainer folder * assert asr output follows expected inference output type * run fixup * Remove .devcontainer from .gitignore * remove compliance check --- .../pipelines/automatic_speech_recognition.py | 2 +- ..._pipelines_automatic_speech_recognition.py | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/transformers/pipelines/automatic_speech_recognition.py b/src/transformers/pipelines/automatic_speech_recognition.py index f4ffdf64453..09958b5fca1 100644 --- a/src/transformers/pipelines/automatic_speech_recognition.py +++ b/src/transformers/pipelines/automatic_speech_recognition.py @@ -434,7 +434,7 @@ def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None): for item in chunk_iter( inputs, self.feature_extractor, chunk_len, stride_left, stride_right, self.torch_dtype ): - yield item + yield {**item, **extra} else: if self.type == "seq2seq_whisper" and inputs.shape[0] > self.feature_extractor.n_samples: processed = self.feature_extractor( diff --git a/tests/pipelines/test_pipelines_automatic_speech_recognition.py b/tests/pipelines/test_pipelines_automatic_speech_recognition.py index b21e8cd25f2..e8cd8febca0 100644 --- a/tests/pipelines/test_pipelines_automatic_speech_recognition.py +++ b/tests/pipelines/test_pipelines_automatic_speech_recognition.py @@ -1443,6 +1443,25 @@ def test_chunking_fast(self): self.assertEqual(output, [{"text": ANY(str)}]) self.assertEqual(output[0]["text"][:6], "ZBT ZC") + @require_torch + def test_input_parameter_passthrough(self): + """Test that chunked vs non chunked versions of ASR pipelines returns the same structure for the same inputs.""" + speech_recognizer = pipeline( + task="automatic-speech-recognition", + model="hf-internal-testing/tiny-random-wav2vec2", + ) + + ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") + audio = ds[40]["audio"]["array"] + + inputs = {"raw": audio, "sampling_rate": 16_000, "id": 1} + + chunked_output = speech_recognizer(inputs.copy(), chunk_length_s=30) + non_chunked_output = speech_recognizer(inputs.copy()) + assert ( + chunked_output.keys() == non_chunked_output.keys() + ), "The output structure should be the same for chunked vs non-chunked versions of asr pipelines." + @require_torch def test_return_timestamps_ctc_fast(self): speech_recognizer = pipeline( From c50b5675d648d7c4bbe395d763cd468c3c4b56b7 Mon Sep 17 00:00:00 2001 From: Meliksah Turker Date: Mon, 25 Nov 2024 15:51:26 +0300 Subject: [PATCH 32/36] prepare_fa2_from_position_ids function bugfix (#33269) contiguous() is called before view() for key and value within prepare_fa2_from_position_ids function --- src/transformers/modeling_flash_attention_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/modeling_flash_attention_utils.py b/src/transformers/modeling_flash_attention_utils.py index 045d2f6d646..1b9274e21f5 100644 --- a/src/transformers/modeling_flash_attention_utils.py +++ b/src/transformers/modeling_flash_attention_utils.py @@ -163,8 +163,8 @@ def prepare_fa2_from_position_ids(query, key, value, position_ids): Maximum sequence length in batch (`max_seqlen_in_batch_q` for the target sequence i.e. query, `max_seqlen_in_batch_k` for the source sequence i.e. key/value). """ query = query.view(-1, query.size(-2), query.size(-1)) - key = key.view(-1, key.size(-2), key.size(-1)) - value = value.view(-1, value.size(-2), value.size(-1)) + key = key.contiguous().view(-1, key.size(-2), key.size(-1)) + value = value.contiguous().view(-1, value.size(-2), value.size(-1)) position_ids = position_ids.flatten() indices_q = torch.arange(position_ids.size(0), device=position_ids.device, dtype=torch.int32) From 62ab94dea8015440a6617afe0ec28d346976d884 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 12:54:55 +0000 Subject: [PATCH 33/36] Bump tornado from 6.4.1 to 6.4.2 in /examples/research_projects/visual_bert (#34887) Bump tornado in /examples/research_projects/visual_bert Bumps [tornado](https://github.com/tornadoweb/tornado) from 6.4.1 to 6.4.2. - [Changelog](https://github.com/tornadoweb/tornado/blob/v6.4.2/docs/releases.rst) - [Commits](https://github.com/tornadoweb/tornado/compare/v6.4.1...v6.4.2) --- updated-dependencies: - dependency-name: tornado dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/research_projects/visual_bert/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_projects/visual_bert/requirements.txt b/examples/research_projects/visual_bert/requirements.txt index ed9ecaa7bf9..e2778663a53 100644 --- a/examples/research_projects/visual_bert/requirements.txt +++ b/examples/research_projects/visual_bert/requirements.txt @@ -86,7 +86,7 @@ testpath==0.4.4 tokenizers==0.8.1rc2 torch==2.2.0 torchvision==0.7.0 -tornado==6.4.1 +tornado==6.4.2 tqdm==4.66.3 traitlets git+https://github.com/huggingface/transformers.git From 97514a8ba3c4a738546ea13728f463dfbc398c8a Mon Sep 17 00:00:00 2001 From: wanxiangchwng Date: Mon, 25 Nov 2024 21:05:59 +0800 Subject: [PATCH 34/36] chore: fix some typos (#34891) Signed-off-by: wanxiangchwng --- src/transformers/generation/flax_logits_process.py | 2 +- src/transformers/generation/logits_process.py | 2 +- src/transformers/generation/tf_logits_process.py | 2 +- src/transformers/models/jamba/configuration_jamba.py | 2 +- src/transformers/models/jamba/modeling_jamba.py | 2 +- src/transformers/models/mixtral/modeling_mixtral.py | 2 +- src/transformers/models/moshi/modeling_moshi.py | 2 +- src/transformers/models/phimoe/modeling_phimoe.py | 2 +- src/transformers/models/whisper/modeling_tf_whisper.py | 2 +- src/transformers/models/zamba/configuration_zamba.py | 2 +- src/transformers/tokenization_utils_base.py | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/transformers/generation/flax_logits_process.py b/src/transformers/generation/flax_logits_process.py index 9b2ab5fb1af..d106c32defa 100644 --- a/src/transformers/generation/flax_logits_process.py +++ b/src/transformers/generation/flax_logits_process.py @@ -273,7 +273,7 @@ class FlaxSuppressTokensAtBeginLogitsProcessor(FlaxLogitsProcessor): r""" [`FlaxLogitsProcessor`] supressing a list of tokens as soon as the `generate` function starts generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are not sampled at the - begining of the generation. + beginning of the generation. Args: begin_suppress_tokens (`List[int]`): diff --git a/src/transformers/generation/logits_process.py b/src/transformers/generation/logits_process.py index 9d244191da8..39a38f9139e 100644 --- a/src/transformers/generation/logits_process.py +++ b/src/transformers/generation/logits_process.py @@ -1782,7 +1782,7 @@ class SuppressTokensAtBeginLogitsProcessor(LogitsProcessor): r""" [`SuppressTokensAtBeginLogitsProcessor`] supresses a list of tokens as soon as the `generate` function starts generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are - not generated at the begining. Originally created for + not generated at the beginning. Originally created for [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper). Examples: diff --git a/src/transformers/generation/tf_logits_process.py b/src/transformers/generation/tf_logits_process.py index 91e20fe02f7..f70655fb7c1 100644 --- a/src/transformers/generation/tf_logits_process.py +++ b/src/transformers/generation/tf_logits_process.py @@ -512,7 +512,7 @@ class TFSuppressTokensAtBeginLogitsProcessor(TFLogitsProcessor): r""" [`TFSuppressTokensAtBeginLogitsProcessor`] suppresses a list of tokens as soon as the `generate` function starts generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not - sampled at the begining of the generation. + sampled at the beginning of the generation. """ def __init__(self, begin_suppress_tokens, begin_index): diff --git a/src/transformers/models/jamba/configuration_jamba.py b/src/transformers/models/jamba/configuration_jamba.py index b493db7ed45..3aabe979d8e 100644 --- a/src/transformers/models/jamba/configuration_jamba.py +++ b/src/transformers/models/jamba/configuration_jamba.py @@ -114,7 +114,7 @@ class JambaConfig(PretrainedConfig): mamba_expand (`int`, *optional*, defaults to 2): Expanding factor (relative to hidden_size) used to determine the mamba intermediate size mamba_dt_rank (`Union[int,str]`, *optional*, defaults to `"auto"`): - Rank of the the mamba discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)` + Rank of the mamba discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)` mamba_conv_bias (`bool`, *optional*, defaults to `True`): Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block. mamba_proj_bias (`bool`, *optional*, defaults to `False`): diff --git a/src/transformers/models/jamba/modeling_jamba.py b/src/transformers/models/jamba/modeling_jamba.py index 32ae6ea02eb..a185d5ebc6e 100755 --- a/src/transformers/models/jamba/modeling_jamba.py +++ b/src/transformers/models/jamba/modeling_jamba.py @@ -852,7 +852,7 @@ class JambaSparseMoeBlock(nn.Module): This implementation is strictly equivalent to standard MoE with full capacity (no dropped tokens). It's faster since it formulates MoE operations - in terms of block-sparse operations to accomodate imbalanced + in terms of block-sparse operations to accommodate imbalanced assignments of tokens to experts, whereas standard MoE either (1) drop tokens at the cost of reduced performance or (2) set capacity factor to number of experts and thus waste computation diff --git a/src/transformers/models/mixtral/modeling_mixtral.py b/src/transformers/models/mixtral/modeling_mixtral.py index de1cd1097a5..0f04ef255c4 100644 --- a/src/transformers/models/mixtral/modeling_mixtral.py +++ b/src/transformers/models/mixtral/modeling_mixtral.py @@ -607,7 +607,7 @@ class MixtralSparseMoeBlock(nn.Module): This implementation is strictly equivalent to standard MoE with full capacity (no dropped tokens). It's faster since it formulates MoE operations - in terms of block-sparse operations to accomodate imbalanced + in terms of block-sparse operations to accommodate imbalanced assignments of tokens to experts, whereas standard MoE either (1) drop tokens at the cost of reduced performance or (2) set capacity factor to number of experts and thus waste computation diff --git a/src/transformers/models/moshi/modeling_moshi.py b/src/transformers/models/moshi/modeling_moshi.py index 9975996d21d..82abfa66c2e 100644 --- a/src/transformers/models/moshi/modeling_moshi.py +++ b/src/transformers/models/moshi/modeling_moshi.py @@ -2527,7 +2527,7 @@ def build_delay_pattern_mask( - [ B, -1, -1, -1, -1, -1] - [ B, -1, -1, -1, -1, -1] - [ B, -1, -1, -1, -1, -1] - where B is the begining-of-sentence token, P is the special padding token id and -1 indicates that the token is valid for prediction. If we include + where B is the beginning-of-sentence token, P is the special padding token id and -1 indicates that the token is valid for prediction. If we include a prompt (input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the mask is set to the value in the prompt: - [ a0, a1, -1, -1, -1, P] diff --git a/src/transformers/models/phimoe/modeling_phimoe.py b/src/transformers/models/phimoe/modeling_phimoe.py index f3690e5f686..82763ccea62 100644 --- a/src/transformers/models/phimoe/modeling_phimoe.py +++ b/src/transformers/models/phimoe/modeling_phimoe.py @@ -735,7 +735,7 @@ class PhimoeSparseMoeBlock(nn.Module): This implementation is strictly equivalent to standard MoE with full capacity (no dropped tokens). It's faster since it formulates MoE operations - in terms of block-sparse operations to accomodate imbalanced + in terms of block-sparse operations to accommodate imbalanced assignments of tokens to experts, whereas standard MoE either (1) drop tokens at the cost of reduced performance or (2) set capacity factor to number of experts and thus waste computation diff --git a/src/transformers/models/whisper/modeling_tf_whisper.py b/src/transformers/models/whisper/modeling_tf_whisper.py index 18f55dce8a2..a2873037163 100644 --- a/src/transformers/models/whisper/modeling_tf_whisper.py +++ b/src/transformers/models/whisper/modeling_tf_whisper.py @@ -1646,7 +1646,7 @@ def generate( prompt_ids = prompt_ids.tolist() decoder_start_token_id, *text_prompt_ids = prompt_ids # Slicing the text prompt ids in a manner consistent with the OpenAI implementation - # to accomodate context space for the prefix (see https://github.com/openai/whisper/blob/c09a7ae299c4c34c5839a76380ae407e7d785914/whisper/decoding.py#L599) + # to accommodate context space for the prefix (see https://github.com/openai/whisper/blob/c09a7ae299c4c34c5839a76380ae407e7d785914/whisper/decoding.py#L599) text_prompt_ids = text_prompt_ids[-self.config.max_length // 2 - 1 :] # Set the decoder_start_token_id to <|startofprev|> kwargs.update({"decoder_start_token_id": decoder_start_token_id}) diff --git a/src/transformers/models/zamba/configuration_zamba.py b/src/transformers/models/zamba/configuration_zamba.py index a6764a82608..77aa940141f 100644 --- a/src/transformers/models/zamba/configuration_zamba.py +++ b/src/transformers/models/zamba/configuration_zamba.py @@ -106,7 +106,7 @@ class ZambaConfig(PretrainedConfig): mamba_expand (`int`, *optional*, defaults to 2): Expanding factor (relative to hidden_size) used to determine the mamba intermediate size mamba_dt_rank (`Union[int,str]`, *optional*, defaults to `"auto"`): - Rank of the the mamba discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)` + Rank of the mamba discretization projection matrix. `"auto"` means that it will default to `math.ceil(self.hidden_size / 16)` time_step_min (`float`, *optional*, defaults to 0.001): Minimum `time_step` used to bound `dt_proj_bias`. time_step_max (`float`, *optional*, defaults to 0.1): diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index 03df02d21ff..ca5a3bb9c20 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -1000,7 +1000,7 @@ def add_tokens( ) -> int: """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to - it with indices starting from length of the current vocabulary and and will be isolated before the tokenization + it with indices starting from length of the current vocabulary and will be isolated before the tokenization algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore not treated in the same way. From 74db22f9051dc320e7cbfdd9668a950828fddb66 Mon Sep 17 00:00:00 2001 From: Donald Szeto Date: Mon, 25 Nov 2024 05:35:24 -0800 Subject: [PATCH 35/36] Fix convert_tokens_to_string when decoder is None (#34569) * Fix convert_tokens_to_string when decoder is None * revert unrelated changs --------- Co-authored-by: Arthur Zucker --- src/transformers/tokenization_utils_fast.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/transformers/tokenization_utils_fast.py b/src/transformers/tokenization_utils_fast.py index 5d238a5715f..d1353adfd22 100644 --- a/src/transformers/tokenization_utils_fast.py +++ b/src/transformers/tokenization_utils_fast.py @@ -624,7 +624,7 @@ def _encode_plus( if return_tensors is None and not return_overflowing_tokens: batched_output = BatchEncoding( { - key: value[0] if len(value) > 0 and isinstance(value[0], list) else value + key: (value[0] if len(value) > 0 and isinstance(value[0], list) else value) for key, value in batched_output.items() }, batched_output.encodings, @@ -635,7 +635,11 @@ def _encode_plus( return batched_output def convert_tokens_to_string(self, tokens: List[str]) -> str: - return self.backend_tokenizer.decoder.decode(tokens) + return ( + self.backend_tokenizer.decoder.decode(tokens) + if self.backend_tokenizer.decoder is not None + else " ".join(tokens) + ) def _decode( self, From 11cc2295c74d75cf76c4e84483224fc3a430e4f5 Mon Sep 17 00:00:00 2001 From: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com> Date: Mon, 25 Nov 2024 15:29:52 +0100 Subject: [PATCH 36/36] [`peft`] Given that `self.active_adapter` is deprecated, avoid using it (#34804) * Given that self.active_adapter is deprecated, avoid using it * Remove misleading comment - `self.active_adapter` is not used (and deprecated) --- src/transformers/integrations/peft.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/integrations/peft.py b/src/transformers/integrations/peft.py index 8afff36eb08..b3352be0f95 100644 --- a/src/transformers/integrations/peft.py +++ b/src/transformers/integrations/peft.py @@ -381,7 +381,7 @@ def enable_adapters(self) -> None: If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT official documentation: https://huggingface.co/docs/peft - Enable adapters that are attached to the model. The model will use `self.active_adapter()` + Enable adapters that are attached to the model. """ check_peft_version(min_version=MIN_PEFT_VERSION) @@ -457,7 +457,7 @@ def get_adapter_state_dict(self, adapter_name: Optional[str] = None) -> dict: from peft import get_peft_model_state_dict if adapter_name is None: - adapter_name = self.active_adapter() + adapter_name = self.active_adapters()[0] adapter_state_dict = get_peft_model_state_dict(self, adapter_name=adapter_name) return adapter_state_dict