Skip to content

Commit

Permalink
Merge pull request #632 from GAA-UAM/feature/numpy2
Browse files Browse the repository at this point in the history
Allow to use NumPy 2.
  • Loading branch information
vnmabus authored Aug 31, 2024
2 parents 2f1fb20 + 58796e1 commit d0bc82f
Show file tree
Hide file tree
Showing 23 changed files with 84 additions and 83 deletions.
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ dependencies = [
"lazy_loader",
"matplotlib",
"multimethod>=1.5, !=1.11, != 1.11.1",
"numpy>=1.16, <2",
"numpy>=1.16",
"pandas>=1.0",
"rdata",
"scikit-datasets[cran]>=0.2.2",
Expand All @@ -61,6 +61,7 @@ docs = [
"sphinxcontrib-bibtex",
]
test = [
"numpy>=2", # Changes in array representation.
"pytest",
"pytest-env",
"pytest-subtests",
Expand Down
20 changes: 10 additions & 10 deletions skfda/_utils/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,22 +201,22 @@ def _cartesian_product( # noqa: WPS234
>>> from skfda._utils import _cartesian_product
>>> axes = [[0,1],[2,3]]
>>> _cartesian_product(axes)
array([[0, 2],
[0, 3],
[1, 2],
[1, 3]])
array([[ 0, 2],
[ 0, 3],
[ 1, 2],
[ 1, 3]])
>>> axes = [[0,1],[2,3],[4]]
>>> _cartesian_product(axes)
array([[0, 2, 4],
[0, 3, 4],
[1, 2, 4],
[1, 3, 4]])
array([[ 0, 2, 4],
[ 0, 3, 4],
[ 1, 2, 4],
[ 1, 3, 4]])
>>> axes = [[0,1]]
>>> _cartesian_product(axes)
array([[0],
[1]])
array([[ 0],
[ 1]])
"""
cartesian = np.stack(np.meshgrid(*axes, indexing='ij'), -1)

Expand Down
2 changes: 1 addition & 1 deletion skfda/exploratory/outliers/_directional_outlyingness.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,7 @@ class MSPlotOutlierDetector( # noqa: WPS230
>>> fd = skfda.FDataGrid(data_matrix, grid_points)
>>> out_detector = MSPlotOutlierDetector()
>>> out_detector.fit_predict(fd)
array([1, 1, 1, 1])
array([ 1, 1, 1, 1])
References:
Dai, Wenlin, and Genton, Marc G. "Multivariate functional data
Expand Down
6 changes: 3 additions & 3 deletions skfda/misc/_math.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,14 +257,14 @@ def inner_product(
>>> array1 = np.array([1, 2, 3])
>>> array2 = np.array([4, 5, 6])
>>> inner_product(array1, array2)
32
np.int64(32)
If the arrays contain more than one sample
>>> array1 = np.array([[1, 2, 3], [2, 3, 4]])
>>> array2 = np.array([[4, 5, 6], [1, 1, 1]])
>>> inner_product(array1, array2)
array([32, 9])
array([ 32, 9])
The inner product of the :math:`f(x) = x` and the constant
:math:`y=1` defined over the interval :math:`[0,1]` is the area of
Expand Down Expand Up @@ -592,7 +592,7 @@ def cosine_similarity(
>>> array1 = np.array([1, 2, 3])
>>> array2 = np.array([4, 5, 6])
>>> cosine_similarity(array1, array2)
0.9746318461970762
np.float64(0.9746318461970762)
If the arrays contain more than one sample
Expand Down
2 changes: 1 addition & 1 deletion skfda/misc/covariances.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def _squared_norms(x: NDArrayFloat, y: NDArrayFloat) -> NDArrayFloat:

def _transform_to_2d(t: ArrayLike) -> NDArrayFloat:
"""Transform 1d arrays in column vectors."""
t = np.asfarray(t)
t = np.asarray(t, dtype=np.float64)

dim = t.ndim
assert dim <= 2
Expand Down
16 changes: 8 additions & 8 deletions skfda/misc/operators/_linear_differential_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -616,14 +616,14 @@ def _optimized_operator_evaluation_in_grid(
[ 0., 0., 0., 0., 1., -2., 1., 4.],
[ 0., 0., 0., 0., 0., 1., -2., -5.],
[ 0., 0., 0., 0., 0., 0., 1., 2.]]),
array([[0, 1],
[0, 2],
[0, 3],
[0, 4],
[3, 7],
[4, 7],
[5, 7],
[6, 7]]))
array([[ 0, 1],
[ 0, 2],
[ 0, 3],
[ 0, 4],
[ 3, 7],
[ 4, 7],
[ 5, 7],
[ 6, 7]]))
Explanation:
Each row of the first array contains the values of the linear operator
Expand Down
4 changes: 2 additions & 2 deletions skfda/ml/classification/_centroid_classifiers.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ class and return a :class:`FData` object with only one sample
We can predict the class of new samples
>>> neigh.predict(fd[::2]) # Predict labels for even samples
array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1])
array([ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1])
See also:
:class:`~skfda.ml.classification.DTMClassifier`
Expand Down Expand Up @@ -164,7 +164,7 @@ class DTMClassifier(NearestCentroid[Input, Target]):
We can predict the class of new samples
>>> clf.predict(X_test) # Predict labels for test samples
array([1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
array([ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1])
Finally, we calculate the mean accuracy for the test data
Expand Down
18 changes: 9 additions & 9 deletions skfda/ml/classification/_depth_classifiers.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,8 @@ class DDClassifier(
We can predict the class of new samples
>>> clf.predict(X_test)
array([1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1])
array([ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1])
Finally, we calculate the mean accuracy for the test data
Expand Down Expand Up @@ -278,8 +278,8 @@ class DDGClassifier(
We can predict the class of new samples
>>> clf.predict(X_test)
array([1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1])
array([ 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1])
Finally, we calculate the mean accuracy for the test data
Expand All @@ -299,8 +299,8 @@ class DDGClassifier(
>>> clf.fit(X_train, y_train)
DDGClassifier(...)
>>> clf.predict(X_test)
array([1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1])
array([ 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1])
>>> clf.score(X_test, y_test)
0.875
Expand Down Expand Up @@ -475,7 +475,7 @@ class _ArgMaxClassifier(
We can predict the class of new samples
>>> clf.predict(X) # Predict labels for test samples
array([1, 0, 0])
array([ 1, 0, 0])
"""

def fit(self, X: NDArrayFloat, y: Target) -> _ArgMaxClassifier[Target]:
Expand Down Expand Up @@ -543,8 +543,8 @@ class MaximumDepthClassifier(DDGClassifier[Input, Target]):
We can predict the class of new samples
>>> clf.predict(X_test) # Predict labels for test samples
array([1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1])
array([ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1])
Finally, we calculate the mean accuracy for the test data
Expand Down
4 changes: 2 additions & 2 deletions skfda/ml/classification/_neighbors_classifiers.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ class KNeighborsClassifier(
We can predict the class of new samples
>>> neigh.predict(fd[::2]) # Predict labels for even samples
array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1])
array([ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1])
And the estimated probabilities.
Expand Down Expand Up @@ -254,7 +254,7 @@ class RadiusNeighborsClassifier(
We can predict the class of new samples.
>>> neigh.predict(fd[::2]) # Predict labels for even samples
array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1])
array([ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1])
See also:
:class:`~skfda.ml.classification.KNeighborsClassifier`
Expand Down
6 changes: 3 additions & 3 deletions skfda/ml/classification/_qda.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,9 @@ class QuadraticDiscriminantAnalysis(
We can predict the class of new samples.
>>> list(qda.predict(X_test))
[0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1,
1, 0, 1, 0, 1, 0, 1, 1]
>>> qda.predict(X_test)
array([ 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1,
1, 0, 1, 0, 1, 0, 1, 1], dtype=int8)
Finally, we calculate the mean accuracy for the test data.
Expand Down
2 changes: 1 addition & 1 deletion skfda/ml/clustering/_hierarchical.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ class AgglomerativeClustering( # noqa: WPS230
>>> clustering.fit(X)
AgglomerativeClustering(...)
>>> clustering.labels_.astype(np.int_)
array([0, 0, 1, 0, 0, 1])
array([ 0, 0, 1, 0, 0, 1])
"""

LinkageCriterion = LinkageCriterion
Expand Down
22 changes: 11 additions & 11 deletions skfda/ml/regression/_historical_linear_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,19 +264,19 @@ class HistoricalLinearRegression(
>>> import scipy.integrate
>>> random_state = np.random.RandomState(0)
>>> data_matrix = random_state.choice(10, size=(8, 6))
>>> data_matrix = random_state.choice(10, size=(8, 6)).astype(float)
>>> data_matrix
array([[5, 0, 3, 3, 7, 9],
[3, 5, 2, 4, 7, 6],
[8, 8, 1, 6, 7, 7],
[8, 1, 5, 9, 8, 9],
[4, 3, 0, 3, 5, 0],
[2, 3, 8, 1, 3, 3],
[3, 7, 0, 1, 9, 9],
[0, 4, 7, 3, 2, 7]])
>>> intercept = random_state.choice(10, size=(1, 6))
array([[ 5., 0., 3., 3., 7., 9.],
[ 3., 5., 2., 4., 7., 6.],
[ 8., 8., 1., 6., 7., 7.],
[ 8., 1., 5., 9., 8., 9.],
[ 4., 3., 0., 3., 5., 0.],
[ 2., 3., 8., 1., 3., 3.],
[ 3., 7., 0., 1., 9., 9.],
[ 0., 4., 7., 3., 2., 7.]])
>>> intercept = random_state.choice(10, size=(1, 6)).astype(float)
>>> intercept
array([[2, 0, 0, 4, 5, 5]])
array([[ 2., 0., 0., 4., 5., 5.]])
>>> y_data = scipy.integrate.cumulative_trapezoid(
... data_matrix,
... initial=0,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@


def _transform_to_2d(t: ArrayLike) -> NDArrayFloat:
t = np.asfarray(t)
t = np.asarray(t, dtype=np.float64)

dim = t.ndim
assert dim <= 2
Expand Down Expand Up @@ -907,7 +907,7 @@ def fit( # type: ignore[override] # noqa: D102
"""Recursive maxima hunting algorithm."""
self.features_shape_ = X.data_matrix.shape[1:]

y = np.asfarray(y)
y = np.asarray(y, dtype=np.float64)

self.correction_ = (
UniformCorrection()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ class NumberCrossingsTransformer(
FDataGrid created.
>>> tf = NumberCrossingsTransformer(levels=0, direction="up")
>>> tf.fit_transform(fd_grid)
array([[2]])
array([[ 2]])
"""

def __init__(
Expand Down
2 changes: 1 addition & 1 deletion skfda/preprocessing/feature_construction/_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ def number_crossings(
FDataGrid created.
>>> number_crossings(fd_grid, levels=0, direction="up")
array([[2]])
array([[ 2]])
"""
# This is only defined for univariate functions
check_fdata_dimensions(fd, dim_domain=1, dim_codomain=1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ class PerClassTransformer(TransformerMixin[Input, Output, NDArrayInt]):
Finally we can predict and check the score:
>>> neigh1.predict(X_test1)
array([0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1,
array([ 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1], dtype=int8)
>>> round(neigh1.score(X_test1, y_test1), 3)
Expand Down Expand Up @@ -145,7 +145,7 @@ class PerClassTransformer(TransformerMixin[Input, Output, NDArrayInt]):
>>> neigh2 = KNeighborsClassifier()
>>> neigh2 = neigh2.fit(X_train2, y_train2)
>>> neigh2.predict(X_test2)
array([1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1,
array([ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1], dtype=int8)
>>> round(neigh2.score(X_test2, y_test2), 3)
Expand Down
12 changes: 6 additions & 6 deletions skfda/preprocessing/registration/validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,10 +103,10 @@ class AmplitudePhaseDecompositionStats():
:func:`mse_r_squared`, returned when `return_stats` is `True`.
Args:
r_square (float): Squared correlation index :math:`R^2`.
mse_amplitude (float): Mean square error of amplitude
r_square: Squared correlation index :math:`R^2`.
mse_amplitude: Mean square error of amplitude
:math:`\text{MSE}_{amp}`.
mse_phase (float): Mean square error of phase :math:`\text{MSE}_{pha}`.
mse_phase: Mean square error of phase :math:`\text{MSE}_{pha}`.
c_r (float): Constant :math:`C_R`.
"""
Expand Down Expand Up @@ -297,10 +297,10 @@ def stats(
X_mean = X.mean()
y_mean = y.mean()

c_r = np.sum(l2_norm(X)**2) / np.sum(l2_norm(y)**2)
c_r = float(np.sum(l2_norm(X)**2) / np.sum(l2_norm(y)**2))

mse_amplitude = c_r * np.mean(l2_distance(y, y.mean())**2)
mse_phase = (c_r * l2_norm(y_mean)**2 - l2_norm(X_mean)**2).item()
mse_amplitude = float(c_r * np.mean(l2_distance(y, y.mean())**2))
mse_phase = float(c_r * l2_norm(y_mean)**2 - l2_norm(X_mean)**2)

# Should be equal to np.mean(l2_distance(X, X_mean)**2)
mse_total = mse_amplitude + mse_phase
Expand Down
2 changes: 1 addition & 1 deletion skfda/preprocessing/smoothing/validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ class SmoothingParameterSearch(
>>> np.array(grid.cv_results_['mean_test_score']).round(2)
array([-11.67, -12.37])
>>> round(grid.best_score_, 2)
-11.67
np.float64(-11.67)
>>> grid.best_params_['kernel_estimator__n_neighbors']
2
>>> grid.best_estimator_.hat_matrix().round(2)
Expand Down
4 changes: 2 additions & 2 deletions skfda/representation/basis/_fdatabasis.py
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ def sum( # noqa: WPS125
if min_count > 0:
valid = ~np.isnan(self.coefficients)
n_valid = np.sum(valid, axis=0)
coefs[n_valid < min_count] = np.NaN
coefs[n_valid < min_count] = np.nan

return self.copy(
coefficients=coefs,
Expand Down Expand Up @@ -1026,7 +1026,7 @@ def construct_array_type(cls) -> Type[FDataBasis]: # noqa: D102
def _na_repr(self) -> FDataBasis:
return FDataBasis(
basis=self.basis,
coefficients=((np.NaN,) * self.basis.n_basis,),
coefficients=((np.nan,) * self.basis.n_basis,),
)

def __eq__(self, other: Any) -> bool:
Expand Down
4 changes: 2 additions & 2 deletions skfda/representation/grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -591,7 +591,7 @@ def sum( # noqa: WPS125
if min_count > 0:
valid = ~np.isnan(self.data_matrix)
n_valid = np.sum(valid, axis=0)
data[n_valid < min_count] = np.NaN
data[n_valid < min_count] = np.nan

return self.copy(
data_matrix=data,
Expand Down Expand Up @@ -1518,7 +1518,7 @@ def _na_repr(self) -> FDataGrid:
+ (self.dim_codomain,)
)

data_matrix = np.full(shape=shape, fill_value=np.NaN)
data_matrix = np.full(shape=shape, fill_value=np.nan)

return FDataGrid(
grid_points=self.grid_points,
Expand Down
Loading

0 comments on commit d0bc82f

Please sign in to comment.