From 8815beaea8347ddd4daf9d50e292dcaf82242da4 Mon Sep 17 00:00:00 2001 From: Jirka Date: Wed, 21 Feb 2024 09:45:34 +0100 Subject: [PATCH] test: unify using ``@pytest.mark.flaky(...)`` --- requirements/requirements-test.txt | 4 ++-- test/ext/naive_2/test_predictors.py | 3 +-- test/mx/distribution/test_distribution_sampling.py | 7 +++---- test/mx/distribution/test_mx_distribution_inference.py | 8 ++++---- test/mx/model/gpvar/test_gpvar.py | 3 +-- test/mx/model/simple_feedforward/test_model.py | 2 +- test/mx/model/transformer/test_model.py | 2 +- .../torch/modules/test_torch_distribution_inference.py | 10 +++++----- 8 files changed, 18 insertions(+), 21 deletions(-) diff --git a/requirements/requirements-test.txt b/requirements/requirements-test.txt index 88205bcaf8..060009b9a1 100644 --- a/requirements/requirements-test.txt +++ b/requirements/requirements-test.txt @@ -1,9 +1,9 @@ pandas>=1.1 -flaky~=3.6 +pytest>=6.0 pytest-cov==2.6.* pytest-timeout~=1.3 pytest-xdist~=1.27 -pytest>=6.0 +pytest-retry~=1.6 ujson orjson requests diff --git a/test/ext/naive_2/test_predictors.py b/test/ext/naive_2/test_predictors.py index 225a895f29..be889cb8f9 100644 --- a/test/ext/naive_2/test_predictors.py +++ b/test/ext/naive_2/test_predictors.py @@ -17,7 +17,6 @@ import numpy as np import pandas as pd import pytest -from flaky import flaky from gluonts.dataset.artificial import constant_dataset from gluonts.dataset.common import Dataset @@ -106,7 +105,7 @@ def test_predictor(make_predictor, freq: str): CONSTANT_DATASET_PREDICTION_LENGTH = dataset_info.prediction_length -@flaky(max_runs=3, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize( "predictor, accuracy", [ diff --git a/test/mx/distribution/test_distribution_sampling.py b/test/mx/distribution/test_distribution_sampling.py index 80b61882ce..603164c028 100644 --- a/test/mx/distribution/test_distribution_sampling.py +++ b/test/mx/distribution/test_distribution_sampling.py @@ -14,7 +14,6 @@ import mxnet as mx import numpy as np import pytest -from flaky import flaky from gluonts.core.serde import dump_json, load_json from gluonts.mx.model.tpp.distribution import Loglogistic, Weibull @@ -149,7 +148,7 @@ @pytest.mark.parametrize("distr_class, params", test_cases) @pytest.mark.parametrize("serialize_fn", serialize_fn_list) -@flaky +@pytest.mark.flaky(retries=3, delay=1) def test_sampling(distr_class, params, serialize_fn) -> None: distr = distr_class(**params) distr = serialize_fn(distr) @@ -205,7 +204,7 @@ def test_sampling(distr_class, params, serialize_fn) -> None: ] -@flaky(min_passes=1, max_runs=3) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("distr, params, dim", test_cases_multivariate) @pytest.mark.parametrize("serialize_fn", serialize_fn_list) def test_multivariate_sampling(distr, params, dim, serialize_fn) -> None: @@ -261,7 +260,7 @@ def test_piecewise_linear_sampling(distr, params, serialize_fn): assert samples.shape == (num_samples, 2) -@pytest.mark.flaky(max_runs=3, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("alpha, beta", [(0.3, 0.9), (1.5, 1.7)]) @pytest.mark.parametrize("zero_probability, one_probability", [(0.1, 0.2)]) def test_inflated_beta_sampling( diff --git a/test/mx/distribution/test_mx_distribution_inference.py b/test/mx/distribution/test_mx_distribution_inference.py index d347d0b35c..a9279dfa19 100644 --- a/test/mx/distribution/test_mx_distribution_inference.py +++ b/test/mx/distribution/test_mx_distribution_inference.py @@ -536,7 +536,7 @@ def test_dirichlet_multinomial(hybridize: bool) -> None: ), f"Covariance did not match: cov = {cov}, cov_hat = {cov_hat}" -@pytest.mark.flaky(max_runs=3, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("hybridize", [True, False]) @pytest.mark.parametrize("rank", [0, 1]) def test_lowrank_multivariate_gaussian(hybridize: bool, rank: int) -> None: @@ -604,7 +604,7 @@ def test_lowrank_multivariate_gaussian(hybridize: bool, rank: int) -> None: ), f"sigma did not match: sigma = {Sigma}, sigma_hat = {Sigma_hat}" -@pytest.mark.flaky(max_runs=3, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("hybridize", [True, False]) def test_empirical_distribution(hybridize: bool) -> None: r""" @@ -1243,7 +1243,7 @@ def test_genpareto_likelihood(xi: float, beta: float, hybridize: bool) -> None: @pytest.mark.timeout(120) -@pytest.mark.flaky(max_runs=6, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("rate", [50.0]) @pytest.mark.parametrize("zero_probability", [0.8, 0.2, 0.01]) @pytest.mark.parametrize("hybridize", [False, True]) @@ -1291,7 +1291,7 @@ def test_inflated_poisson_likelihood( @pytest.mark.timeout(150) -@pytest.mark.flaky(max_runs=6, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("mu", [5.0]) @pytest.mark.parametrize("alpha", [0.05]) @pytest.mark.parametrize("zero_probability", [0.3]) diff --git a/test/mx/model/gpvar/test_gpvar.py b/test/mx/model/gpvar/test_gpvar.py index 26714bff7b..845cc95b0d 100644 --- a/test/mx/model/gpvar/test_gpvar.py +++ b/test/mx/model/gpvar/test_gpvar.py @@ -14,7 +14,6 @@ import mxnet as mx import pytest -from flaky import flaky from gluonts.dataset.artificial import constant_dataset from gluonts.dataset.common import TrainDatasets @@ -93,7 +92,7 @@ def test_gpvar_proj(): assert distr.mean.shape == (batch, dim) -@flaky(max_runs=3, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("hybridize", [True, False]) @pytest.mark.parametrize("target_dim_sample", [None, 2]) @pytest.mark.parametrize("use_marginal_transformation", [True, False]) diff --git a/test/mx/model/simple_feedforward/test_model.py b/test/mx/model/simple_feedforward/test_model.py index ba51109991..63946f9a40 100644 --- a/test/mx/model/simple_feedforward/test_model.py +++ b/test/mx/model/simple_feedforward/test_model.py @@ -31,7 +31,7 @@ def hyperparameters(): ) -@pytest.mark.flaky(max_runs=3, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("hybridize", [True, False]) @pytest.mark.parametrize("sampling", [True, False]) def test_accuracy(accuracy_test, hyperparameters, hybridize, sampling): diff --git a/test/mx/model/transformer/test_model.py b/test/mx/model/transformer/test_model.py index 0c115a8bf3..d34c9f7b68 100644 --- a/test/mx/model/transformer/test_model.py +++ b/test/mx/model/transformer/test_model.py @@ -32,7 +32,7 @@ def hyperparameters(): ) -@pytest.mark.flaky(max_runs=3, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("hybridize", [True, False]) def test_accuracy(accuracy_test, hyperparameters, hybridize): hyperparameters.update(num_batches_per_epoch=80, hybridize=hybridize) diff --git a/test/torch/modules/test_torch_distribution_inference.py b/test/torch/modules/test_torch_distribution_inference.py index 2a6b7a64e7..0d760fc144 100644 --- a/test/torch/modules/test_torch_distribution_inference.py +++ b/test/torch/modules/test_torch_distribution_inference.py @@ -119,7 +119,7 @@ def compare_logits( ).all(), f"logits did not match: logits_true = {param_true}, logits_hat = {param_hat}" -@pytest.mark.flaky(max_runs=3, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("concentration1, concentration0", [(3.75, 1.25)]) def test_beta_likelihood(concentration1: float, concentration0: float) -> None: """ @@ -158,7 +158,7 @@ def test_beta_likelihood(concentration1: float, concentration0: float) -> None: ), f"concentration0 did not match: concentration0 = {concentration0}, concentration0_hat = {concentration0_hat}" -@pytest.mark.flaky(max_runs=3, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("concentration, rate", [(3.75, 1.25)]) def test_gamma_likelihood(concentration: float, rate: float) -> None: """ @@ -193,7 +193,7 @@ def test_gamma_likelihood(concentration: float, rate: float) -> None: ), f"rate did not match: rate = {rate}, rate_hat = {rate_hat}" -@pytest.mark.flaky(max_runs=3, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("loc, scale,", [(1.0, 0.1)]) def test_normal_likelihood(loc: float, scale: float): locs = torch.zeros((NUM_SAMPLES,)) + loc @@ -223,7 +223,7 @@ def test_normal_likelihood(loc: float, scale: float): ), f"scale did not match: scale = {scale}, scale_hat = {scale_hat}" -@pytest.mark.flaky(max_runs=3, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("df, loc, scale,", [(6.0, 2.3, 0.7)]) def test_studentT_likelihood(df: float, loc: float, scale: float): dfs = torch.zeros((NUM_SAMPLES,)) + df @@ -258,7 +258,7 @@ def test_studentT_likelihood(df: float, loc: float, scale: float): ), f"scale did not match: scale = {scale}, scale_hat = {scale_hat}" -@pytest.mark.flaky(max_runs=3, min_passes=1) +@pytest.mark.flaky(retries=3, delay=1) @pytest.mark.parametrize("rate", [1.0]) def test_poisson(rate: float) -> None: """