From 58796e1a272f83382a5c7f5b7a07180f9ba694f0 Mon Sep 17 00:00:00 2001 From: VNMabus Date: Sat, 31 Aug 2024 19:15:34 +0200 Subject: [PATCH] Allow to use NumPy 2. All tests are now executed with NumPy 2. This is important, because the representation of arrays (and thus the expected doctest output) has been changed in this version. --- pyproject.toml | 3 ++- skfda/_utils/_utils.py | 20 ++++++++-------- .../outliers/_directional_outlyingness.py | 2 +- skfda/misc/_math.py | 6 ++--- skfda/misc/covariances.py | 2 +- .../_linear_differential_operator.py | 16 ++++++------- .../classification/_centroid_classifiers.py | 4 ++-- skfda/ml/classification/_depth_classifiers.py | 18 +++++++------- .../classification/_neighbors_classifiers.py | 4 ++-- skfda/ml/classification/_qda.py | 6 ++--- skfda/ml/clustering/_hierarchical.py | 2 +- .../ml/regression/_historical_linear_model.py | 22 ++++++++--------- .../recursive_maxima_hunting.py | 4 ++-- .../_function_transformers.py | 2 +- .../feature_construction/_functions.py | 2 +- .../_per_class_transformer.py | 4 ++-- .../preprocessing/registration/validation.py | 12 +++++----- skfda/preprocessing/smoothing/validation.py | 2 +- skfda/representation/basis/_fdatabasis.py | 4 ++-- skfda/representation/grid.py | 4 ++-- skfda/representation/irregular.py | 24 +++++++++---------- skfda/tests/test_pandas_fdatabasis.py | 2 +- skfda/tests/test_pandas_fdatagrid.py | 2 +- 23 files changed, 84 insertions(+), 83 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 83c27fae0..66a3a6c34 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,7 @@ dependencies = [ "lazy_loader", "matplotlib", "multimethod>=1.5, !=1.11, != 1.11.1", - "numpy>=1.16, <2", + "numpy>=1.16", "pandas>=1.0", "rdata", "scikit-datasets[cran]>=0.2.2", @@ -61,6 +61,7 @@ docs = [ "sphinxcontrib-bibtex", ] test = [ + "numpy>=2", # Changes in array representation. "pytest", "pytest-env", "pytest-subtests", diff --git a/skfda/_utils/_utils.py b/skfda/_utils/_utils.py index aebb5189f..a7bbdd278 100644 --- a/skfda/_utils/_utils.py +++ b/skfda/_utils/_utils.py @@ -201,22 +201,22 @@ def _cartesian_product( # noqa: WPS234 >>> from skfda._utils import _cartesian_product >>> axes = [[0,1],[2,3]] >>> _cartesian_product(axes) - array([[0, 2], - [0, 3], - [1, 2], - [1, 3]]) + array([[ 0, 2], + [ 0, 3], + [ 1, 2], + [ 1, 3]]) >>> axes = [[0,1],[2,3],[4]] >>> _cartesian_product(axes) - array([[0, 2, 4], - [0, 3, 4], - [1, 2, 4], - [1, 3, 4]]) + array([[ 0, 2, 4], + [ 0, 3, 4], + [ 1, 2, 4], + [ 1, 3, 4]]) >>> axes = [[0,1]] >>> _cartesian_product(axes) - array([[0], - [1]]) + array([[ 0], + [ 1]]) """ cartesian = np.stack(np.meshgrid(*axes, indexing='ij'), -1) diff --git a/skfda/exploratory/outliers/_directional_outlyingness.py b/skfda/exploratory/outliers/_directional_outlyingness.py index a0860e1b1..a61512d98 100644 --- a/skfda/exploratory/outliers/_directional_outlyingness.py +++ b/skfda/exploratory/outliers/_directional_outlyingness.py @@ -316,7 +316,7 @@ class MSPlotOutlierDetector( # noqa: WPS230 >>> fd = skfda.FDataGrid(data_matrix, grid_points) >>> out_detector = MSPlotOutlierDetector() >>> out_detector.fit_predict(fd) - array([1, 1, 1, 1]) + array([ 1, 1, 1, 1]) References: Dai, Wenlin, and Genton, Marc G. "Multivariate functional data diff --git a/skfda/misc/_math.py b/skfda/misc/_math.py index 5d63f9100..91f1487c7 100644 --- a/skfda/misc/_math.py +++ b/skfda/misc/_math.py @@ -257,14 +257,14 @@ def inner_product( >>> array1 = np.array([1, 2, 3]) >>> array2 = np.array([4, 5, 6]) >>> inner_product(array1, array2) - 32 + np.int64(32) If the arrays contain more than one sample >>> array1 = np.array([[1, 2, 3], [2, 3, 4]]) >>> array2 = np.array([[4, 5, 6], [1, 1, 1]]) >>> inner_product(array1, array2) - array([32, 9]) + array([ 32, 9]) The inner product of the :math:`f(x) = x` and the constant :math:`y=1` defined over the interval :math:`[0,1]` is the area of @@ -592,7 +592,7 @@ def cosine_similarity( >>> array1 = np.array([1, 2, 3]) >>> array2 = np.array([4, 5, 6]) >>> cosine_similarity(array1, array2) - 0.9746318461970762 + np.float64(0.9746318461970762) If the arrays contain more than one sample diff --git a/skfda/misc/covariances.py b/skfda/misc/covariances.py index 298fcf64d..f16ed8e27 100644 --- a/skfda/misc/covariances.py +++ b/skfda/misc/covariances.py @@ -29,7 +29,7 @@ def _squared_norms(x: NDArrayFloat, y: NDArrayFloat) -> NDArrayFloat: def _transform_to_2d(t: ArrayLike) -> NDArrayFloat: """Transform 1d arrays in column vectors.""" - t = np.asfarray(t) + t = np.asarray(t, dtype=np.float64) dim = t.ndim assert dim <= 2 diff --git a/skfda/misc/operators/_linear_differential_operator.py b/skfda/misc/operators/_linear_differential_operator.py index 84fdbe70c..ddd834fbc 100644 --- a/skfda/misc/operators/_linear_differential_operator.py +++ b/skfda/misc/operators/_linear_differential_operator.py @@ -616,14 +616,14 @@ def _optimized_operator_evaluation_in_grid( [ 0., 0., 0., 0., 1., -2., 1., 4.], [ 0., 0., 0., 0., 0., 1., -2., -5.], [ 0., 0., 0., 0., 0., 0., 1., 2.]]), - array([[0, 1], - [0, 2], - [0, 3], - [0, 4], - [3, 7], - [4, 7], - [5, 7], - [6, 7]])) + array([[ 0, 1], + [ 0, 2], + [ 0, 3], + [ 0, 4], + [ 3, 7], + [ 4, 7], + [ 5, 7], + [ 6, 7]])) Explanation: Each row of the first array contains the values of the linear operator diff --git a/skfda/ml/classification/_centroid_classifiers.py b/skfda/ml/classification/_centroid_classifiers.py index 4bac8ad9e..83b421100 100644 --- a/skfda/ml/classification/_centroid_classifiers.py +++ b/skfda/ml/classification/_centroid_classifiers.py @@ -63,7 +63,7 @@ class and return a :class:`FData` object with only one sample We can predict the class of new samples >>> neigh.predict(fd[::2]) # Predict labels for even samples - array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]) + array([ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]) See also: :class:`~skfda.ml.classification.DTMClassifier` @@ -164,7 +164,7 @@ class DTMClassifier(NearestCentroid[Input, Target]): We can predict the class of new samples >>> clf.predict(X_test) # Predict labels for test samples - array([1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, + array([ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1]) Finally, we calculate the mean accuracy for the test data diff --git a/skfda/ml/classification/_depth_classifiers.py b/skfda/ml/classification/_depth_classifiers.py index c258d608b..8f0ded049 100644 --- a/skfda/ml/classification/_depth_classifiers.py +++ b/skfda/ml/classification/_depth_classifiers.py @@ -106,8 +106,8 @@ class DDClassifier( We can predict the class of new samples >>> clf.predict(X_test) - array([1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, - 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1]) + array([ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, + 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1]) Finally, we calculate the mean accuracy for the test data @@ -278,8 +278,8 @@ class DDGClassifier( We can predict the class of new samples >>> clf.predict(X_test) - array([1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, - 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1]) + array([ 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, + 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1]) Finally, we calculate the mean accuracy for the test data @@ -299,8 +299,8 @@ class DDGClassifier( >>> clf.fit(X_train, y_train) DDGClassifier(...) >>> clf.predict(X_test) - array([1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, - 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1]) + array([ 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, + 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1]) >>> clf.score(X_test, y_test) 0.875 @@ -475,7 +475,7 @@ class _ArgMaxClassifier( We can predict the class of new samples >>> clf.predict(X) # Predict labels for test samples - array([1, 0, 0]) + array([ 1, 0, 0]) """ def fit(self, X: NDArrayFloat, y: Target) -> _ArgMaxClassifier[Target]: @@ -543,8 +543,8 @@ class MaximumDepthClassifier(DDGClassifier[Input, Target]): We can predict the class of new samples >>> clf.predict(X_test) # Predict labels for test samples - array([1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, - 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1]) + array([ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, + 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1]) Finally, we calculate the mean accuracy for the test data diff --git a/skfda/ml/classification/_neighbors_classifiers.py b/skfda/ml/classification/_neighbors_classifiers.py index 4827eaae5..6a4391b97 100644 --- a/skfda/ml/classification/_neighbors_classifiers.py +++ b/skfda/ml/classification/_neighbors_classifiers.py @@ -89,7 +89,7 @@ class KNeighborsClassifier( We can predict the class of new samples >>> neigh.predict(fd[::2]) # Predict labels for even samples - array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]) + array([ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]) And the estimated probabilities. @@ -254,7 +254,7 @@ class RadiusNeighborsClassifier( We can predict the class of new samples. >>> neigh.predict(fd[::2]) # Predict labels for even samples - array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]) + array([ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]) See also: :class:`~skfda.ml.classification.KNeighborsClassifier` diff --git a/skfda/ml/classification/_qda.py b/skfda/ml/classification/_qda.py index f1fca3f4c..ffee147ee 100644 --- a/skfda/ml/classification/_qda.py +++ b/skfda/ml/classification/_qda.py @@ -95,9 +95,9 @@ class QuadraticDiscriminantAnalysis( We can predict the class of new samples. - >>> list(qda.predict(X_test)) - [0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, - 1, 0, 1, 0, 1, 0, 1, 1] + >>> qda.predict(X_test) + array([ 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 0, 1, 1], dtype=int8) Finally, we calculate the mean accuracy for the test data. diff --git a/skfda/ml/clustering/_hierarchical.py b/skfda/ml/clustering/_hierarchical.py index 75e06e8f3..d8a403407 100644 --- a/skfda/ml/clustering/_hierarchical.py +++ b/skfda/ml/clustering/_hierarchical.py @@ -144,7 +144,7 @@ class AgglomerativeClustering( # noqa: WPS230 >>> clustering.fit(X) AgglomerativeClustering(...) >>> clustering.labels_.astype(np.int_) - array([0, 0, 1, 0, 0, 1]) + array([ 0, 0, 1, 0, 0, 1]) """ LinkageCriterion = LinkageCriterion diff --git a/skfda/ml/regression/_historical_linear_model.py b/skfda/ml/regression/_historical_linear_model.py index a26ce4257..4067d2359 100644 --- a/skfda/ml/regression/_historical_linear_model.py +++ b/skfda/ml/regression/_historical_linear_model.py @@ -264,19 +264,19 @@ class HistoricalLinearRegression( >>> import scipy.integrate >>> random_state = np.random.RandomState(0) - >>> data_matrix = random_state.choice(10, size=(8, 6)) + >>> data_matrix = random_state.choice(10, size=(8, 6)).astype(float) >>> data_matrix - array([[5, 0, 3, 3, 7, 9], - [3, 5, 2, 4, 7, 6], - [8, 8, 1, 6, 7, 7], - [8, 1, 5, 9, 8, 9], - [4, 3, 0, 3, 5, 0], - [2, 3, 8, 1, 3, 3], - [3, 7, 0, 1, 9, 9], - [0, 4, 7, 3, 2, 7]]) - >>> intercept = random_state.choice(10, size=(1, 6)) + array([[ 5., 0., 3., 3., 7., 9.], + [ 3., 5., 2., 4., 7., 6.], + [ 8., 8., 1., 6., 7., 7.], + [ 8., 1., 5., 9., 8., 9.], + [ 4., 3., 0., 3., 5., 0.], + [ 2., 3., 8., 1., 3., 3.], + [ 3., 7., 0., 1., 9., 9.], + [ 0., 4., 7., 3., 2., 7.]]) + >>> intercept = random_state.choice(10, size=(1, 6)).astype(float) >>> intercept - array([[2, 0, 0, 4, 5, 5]]) + array([[ 2., 0., 0., 4., 5., 5.]]) >>> y_data = scipy.integrate.cumulative_trapezoid( ... data_matrix, ... initial=0, diff --git a/skfda/preprocessing/dim_reduction/variable_selection/recursive_maxima_hunting.py b/skfda/preprocessing/dim_reduction/variable_selection/recursive_maxima_hunting.py index 2a66ffa19..ef43ebd46 100644 --- a/skfda/preprocessing/dim_reduction/variable_selection/recursive_maxima_hunting.py +++ b/skfda/preprocessing/dim_reduction/variable_selection/recursive_maxima_hunting.py @@ -44,7 +44,7 @@ def _transform_to_2d(t: ArrayLike) -> NDArrayFloat: - t = np.asfarray(t) + t = np.asarray(t, dtype=np.float64) dim = t.ndim assert dim <= 2 @@ -907,7 +907,7 @@ def fit( # type: ignore[override] # noqa: D102 """Recursive maxima hunting algorithm.""" self.features_shape_ = X.data_matrix.shape[1:] - y = np.asfarray(y) + y = np.asarray(y, dtype=np.float64) self.correction_ = ( UniformCorrection() diff --git a/skfda/preprocessing/feature_construction/_function_transformers.py b/skfda/preprocessing/feature_construction/_function_transformers.py index 07d99d9b9..4e2c1bf30 100644 --- a/skfda/preprocessing/feature_construction/_function_transformers.py +++ b/skfda/preprocessing/feature_construction/_function_transformers.py @@ -226,7 +226,7 @@ class NumberCrossingsTransformer( FDataGrid created. >>> tf = NumberCrossingsTransformer(levels=0, direction="up") >>> tf.fit_transform(fd_grid) - array([[2]]) + array([[ 2]]) """ def __init__( diff --git a/skfda/preprocessing/feature_construction/_functions.py b/skfda/preprocessing/feature_construction/_functions.py index 2ac6d8428..33716893d 100644 --- a/skfda/preprocessing/feature_construction/_functions.py +++ b/skfda/preprocessing/feature_construction/_functions.py @@ -349,7 +349,7 @@ def number_crossings( FDataGrid created. >>> number_crossings(fd_grid, levels=0, direction="up") - array([[2]]) + array([[ 2]]) """ # This is only defined for univariate functions check_fdata_dimensions(fd, dim_domain=1, dim_codomain=1) diff --git a/skfda/preprocessing/feature_construction/_per_class_transformer.py b/skfda/preprocessing/feature_construction/_per_class_transformer.py index 2a08ae5b4..eb8c1fd2d 100644 --- a/skfda/preprocessing/feature_construction/_per_class_transformer.py +++ b/skfda/preprocessing/feature_construction/_per_class_transformer.py @@ -107,7 +107,7 @@ class PerClassTransformer(TransformerMixin[Input, Output, NDArrayInt]): Finally we can predict and check the score: >>> neigh1.predict(X_test1) - array([0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, + array([ 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1], dtype=int8) >>> round(neigh1.score(X_test1, y_test1), 3) @@ -145,7 +145,7 @@ class PerClassTransformer(TransformerMixin[Input, Output, NDArrayInt]): >>> neigh2 = KNeighborsClassifier() >>> neigh2 = neigh2.fit(X_train2, y_train2) >>> neigh2.predict(X_test2) - array([1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, + array([ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1], dtype=int8) >>> round(neigh2.score(X_test2, y_test2), 3) diff --git a/skfda/preprocessing/registration/validation.py b/skfda/preprocessing/registration/validation.py index a948d1350..e4e2d8e6f 100644 --- a/skfda/preprocessing/registration/validation.py +++ b/skfda/preprocessing/registration/validation.py @@ -103,10 +103,10 @@ class AmplitudePhaseDecompositionStats(): :func:`mse_r_squared`, returned when `return_stats` is `True`. Args: - r_square (float): Squared correlation index :math:`R^2`. - mse_amplitude (float): Mean square error of amplitude + r_square: Squared correlation index :math:`R^2`. + mse_amplitude: Mean square error of amplitude :math:`\text{MSE}_{amp}`. - mse_phase (float): Mean square error of phase :math:`\text{MSE}_{pha}`. + mse_phase: Mean square error of phase :math:`\text{MSE}_{pha}`. c_r (float): Constant :math:`C_R`. """ @@ -297,10 +297,10 @@ def stats( X_mean = X.mean() y_mean = y.mean() - c_r = np.sum(l2_norm(X)**2) / np.sum(l2_norm(y)**2) + c_r = float(np.sum(l2_norm(X)**2) / np.sum(l2_norm(y)**2)) - mse_amplitude = c_r * np.mean(l2_distance(y, y.mean())**2) - mse_phase = (c_r * l2_norm(y_mean)**2 - l2_norm(X_mean)**2).item() + mse_amplitude = float(c_r * np.mean(l2_distance(y, y.mean())**2)) + mse_phase = float(c_r * l2_norm(y_mean)**2 - l2_norm(X_mean)**2) # Should be equal to np.mean(l2_distance(X, X_mean)**2) mse_total = mse_amplitude + mse_phase diff --git a/skfda/preprocessing/smoothing/validation.py b/skfda/preprocessing/smoothing/validation.py index 1bb0bf2e8..10c628ae8 100644 --- a/skfda/preprocessing/smoothing/validation.py +++ b/skfda/preprocessing/smoothing/validation.py @@ -210,7 +210,7 @@ class SmoothingParameterSearch( >>> np.array(grid.cv_results_['mean_test_score']).round(2) array([-11.67, -12.37]) >>> round(grid.best_score_, 2) - -11.67 + np.float64(-11.67) >>> grid.best_params_['kernel_estimator__n_neighbors'] 2 >>> grid.best_estimator_.hat_matrix().round(2) diff --git a/skfda/representation/basis/_fdatabasis.py b/skfda/representation/basis/_fdatabasis.py index b943f7932..b528e3098 100644 --- a/skfda/representation/basis/_fdatabasis.py +++ b/skfda/representation/basis/_fdatabasis.py @@ -435,7 +435,7 @@ def sum( # noqa: WPS125 if min_count > 0: valid = ~np.isnan(self.coefficients) n_valid = np.sum(valid, axis=0) - coefs[n_valid < min_count] = np.NaN + coefs[n_valid < min_count] = np.nan return self.copy( coefficients=coefs, @@ -1026,7 +1026,7 @@ def construct_array_type(cls) -> Type[FDataBasis]: # noqa: D102 def _na_repr(self) -> FDataBasis: return FDataBasis( basis=self.basis, - coefficients=((np.NaN,) * self.basis.n_basis,), + coefficients=((np.nan,) * self.basis.n_basis,), ) def __eq__(self, other: Any) -> bool: diff --git a/skfda/representation/grid.py b/skfda/representation/grid.py index 292062204..50bb96169 100644 --- a/skfda/representation/grid.py +++ b/skfda/representation/grid.py @@ -591,7 +591,7 @@ def sum( # noqa: WPS125 if min_count > 0: valid = ~np.isnan(self.data_matrix) n_valid = np.sum(valid, axis=0) - data[n_valid < min_count] = np.NaN + data[n_valid < min_count] = np.nan return self.copy( data_matrix=data, @@ -1518,7 +1518,7 @@ def _na_repr(self) -> FDataGrid: + (self.dim_codomain,) ) - data_matrix = np.full(shape=shape, fill_value=np.NaN) + data_matrix = np.full(shape=shape, fill_value=np.nan) return FDataGrid( grid_points=self.grid_points, diff --git a/skfda/representation/irregular.py b/skfda/representation/irregular.py index 07cce5e77..cf19c8cae 100644 --- a/skfda/representation/irregular.py +++ b/skfda/representation/irregular.py @@ -197,17 +197,17 @@ class FDataIrregular(FData): # noqa: WPS214 >>> values = [[1], [2], [3], [4], [5]] >>> FDataIrregular(indices, arguments, values) FDataIrregular( - start_indices=array([0, 2]), - points=array([[1], - [2], - [3], - [4], - [5]]), - values=array([[1], - [2], - [3], - [4], - [5]]), + start_indices=array([ 0, 2]), + points=array([[ 1], + [ 2], + [ 3], + [ 4], + [ 5]]), + values=array([[ 1], + [ 2], + [ 3], + [ 4], + [ 5]]), domain_range=((1.0, 5.0),), ...) @@ -963,7 +963,7 @@ def concatenate(self: T, *others: T, as_coordinates: bool = False) -> T: >>> fd_2 = FDataIrregular(indices, arguments_2, values_2) >>> fd.concatenate(fd_2) FDataIrregular( - start_indices=array([0, 2, 5, 7]), + start_indices=array([ 0, 2, 5, 7]), points=array([[ 0.], [ 1.], [ 2.], diff --git a/skfda/tests/test_pandas_fdatabasis.py b/skfda/tests/test_pandas_fdatabasis.py index 32c735dbb..e9c2bf0ea 100644 --- a/skfda/tests/test_pandas_fdatabasis.py +++ b/skfda/tests/test_pandas_fdatabasis.py @@ -62,7 +62,7 @@ def data_missing(basis: Basis) -> FDataBasis: 2 * basis.n_basis, dtype=np.float64, ).reshape(2, basis.n_basis) - coef_matrix[0, :] = np.NaN + coef_matrix[0, :] = np.nan return FDataBasis(basis=basis, coefficients=coef_matrix) diff --git a/skfda/tests/test_pandas_fdatagrid.py b/skfda/tests/test_pandas_fdatagrid.py index 00dde5c6a..0d5078eff 100644 --- a/skfda/tests/test_pandas_fdatagrid.py +++ b/skfda/tests/test_pandas_fdatagrid.py @@ -68,7 +68,7 @@ def data_missing() -> ExtensionArray: 2 * 10 * 10 * 3, dtype=np.float64, ).reshape(2, 10, 10, 3) - data_matrix[0, ...] = np.NaN + data_matrix[0, ...] = np.nan grid_points = [ np.arange(10), np.arange(10) / 10,