From 78e15d03737909059e62edb9116fc24f2b0fc7f9 Mon Sep 17 00:00:00 2001 From: Andrei Ivanov Date: Thu, 9 Jan 2025 16:54:16 -0800 Subject: [PATCH] Fixing warning triggered by `torch.cuda.reset_max_memory_allocated()` usage --- examples/llm/molecule_gpt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/llm/molecule_gpt.py b/examples/llm/molecule_gpt.py index 8f6c6024014d..6f11d87969a4 100644 --- a/examples/llm/molecule_gpt.py +++ b/examples/llm/molecule_gpt.py @@ -167,7 +167,7 @@ def adjust_learning_rate(param_group, LR, epoch): f'moleculegpt_epoch{best_epoch}_val_loss{best_val_loss:4f}_ckpt.pt' # noqa: E501 ) torch.cuda.empty_cache() - torch.cuda.reset_max_memory_allocated() + torch.cuda.reset_peak_memory_stats() print(f"Total Training Time: {time.time() - start_time:2f}s") # Test