From 4702912a31d5187492bdb54372a9d83fa506dbcb Mon Sep 17 00:00:00 2001 From: AdrianAbeyta Date: Wed, 8 Nov 2023 23:52:51 +0000 Subject: [PATCH] Modify test file --- examples/pytorch/language-modeling/run_clm.py | 1 - tests/models/ibert/test_modeling_ibert.py | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index 24b9e150f04..15c9261be48 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -57,7 +57,6 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.35.0.dev0") -print("Modify file test for CI") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/tests/models/ibert/test_modeling_ibert.py b/tests/models/ibert/test_modeling_ibert.py index 096a55169a0..9a1b6abf175 100644 --- a/tests/models/ibert/test_modeling_ibert.py +++ b/tests/models/ibert/test_modeling_ibert.py @@ -519,7 +519,7 @@ def test_int_gelu(self): gelu_q = IntGELU(quant_mode=True) gelu_dq = nn.GELU() - x_int = torch.range(-10000, 10000, 1) + x_int = torch.arange(-10000, 10001, 1) x_scaling_factor = torch.tensor(0.001) x = x_int * x_scaling_factor @@ -534,7 +534,7 @@ def test_int_gelu(self): self.assertTrue(torch.allclose(q_int, q_int.round(), atol=1e-4)) def test_force_dequant_gelu(self): - x_int = torch.range(-10000, 10000, 1) + x_int = torch.arange(-10000, 10001, 1) x_scaling_factor = torch.tensor(0.001) x = x_int * x_scaling_factor @@ -565,7 +565,7 @@ def test_int_softmax(self): softmax_q = IntSoftmax(output_bit, quant_mode=True) softmax_dq = nn.Softmax() - # x_int = torch.range(-10000, 10000, 1) + def _test(array): x_int = torch.tensor(array) x_scaling_factor = torch.tensor(0.1)