Skip to content

Commit

Permalink
Backports v0.13.1 (#2886)
Browse files Browse the repository at this point in the history
* Speedup is_uniform for PandasDataset. (#2878)

* Fix docstrings in torch lightning modules (#2880)

* Fix default scale in torch DeepAR (#2885)

---------

Co-authored-by: Jasper <[email protected]>
  • Loading branch information
lostella and Jasper authored May 24, 2023
1 parent 2730601 commit 33cffc2
Show file tree
Hide file tree
Showing 10 changed files with 41 additions and 46 deletions.
6 changes: 3 additions & 3 deletions src/gluonts/dataset/pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

import logging
from dataclasses import dataclass, field, InitVar
from typing import Any, Iterable, Optional, Type, Union
from typing import Any, Iterable, Optional, Type, Union, cast

import numpy as np
import pandas as pd
Expand Down Expand Up @@ -350,5 +350,5 @@ def is_uniform(index: pd.PeriodIndex) -> bool:
>>> is_uniform(pd.DatetimeIndex(ts).to_period("2H"))
False
"""
other = pd.period_range(index[0], periods=len(index), freq=index.freq)
return (other == index).all()

return cast(bool, np.all(np.diff(index.asi8) == index.freq.n))
11 changes: 5 additions & 6 deletions src/gluonts/torch/model/d_linear/lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,14 @@ class DLinearLightningModule(pl.LightningModule):
Parameters
----------
model
``DLinearModel`` to be trained.
model_kwargs
Keyword arguments to construct the ``DLinearModel`` to be trained.
loss
Loss function to be used for training,
default: ``NegativeLogLikelihood()``.
Loss function to be used for training.
lr
Learning rate, default: ``1e-3``.
Learning rate.
weight_decay
Weight decay regularization parameter, default: ``1e-8``.
Weight decay regularization parameter.
"""

@validated()
Expand Down
2 changes: 1 addition & 1 deletion src/gluonts/torch/model/deepar/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def __init__(
distr_output: DistributionOutput = StudentTOutput(),
loss: DistributionLoss = NegativeLogLikelihood(),
scaling: bool = True,
default_scale: float = 0.0,
default_scale: Optional[float] = None,
lags_seq: Optional[List[int]] = None,
time_features: Optional[List[TimeFeature]] = None,
num_parallel_samples: int = 100,
Expand Down
13 changes: 6 additions & 7 deletions src/gluonts/torch/model/deepar/lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,16 @@ class DeepARLightningModule(pl.LightningModule):
Parameters
----------
model
``DeepARModel`` to be trained.
model_kwargs
Keyword arguments to construct the ``DeepARModel`` to be trained.
loss
Loss function to be used for training,
default: ``NegativeLogLikelihood()``.
Loss function to be used for training.
lr
Learning rate, default: ``1e-3``.
Learning rate.
weight_decay
Weight decay regularization parameter, default: ``1e-8``.
Weight decay regularization parameter.
patience
Patience parameter for learning rate scheduler, default: ``10``.
Patience parameter for learning rate scheduler.
"""

@validated()
Expand Down
2 changes: 1 addition & 1 deletion src/gluonts/torch/model/deepar/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def __init__(
distr_output: DistributionOutput = StudentTOutput(),
lags_seq: Optional[List[int]] = None,
scaling: bool = True,
default_scale: float = 0.0,
default_scale: Optional[float] = None,
num_parallel_samples: int = 100,
) -> None:
super().__init__()
Expand Down
11 changes: 5 additions & 6 deletions src/gluonts/torch/model/lag_tst/lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,14 @@ class LagTSTLightningModule(pl.LightningModule):
Parameters
----------
model
``LagTSTModel`` to be trained.
model_kwargs
Keyword arguments to construct the ``LagTSTModel`` to be trained.
loss
Loss function to be used for training,
default: ``NegativeLogLikelihood()``.
Loss function to be used for training.
lr
Learning rate, default: ``1e-3``.
Learning rate.
weight_decay
Weight decay regularization parameter, default: ``1e-8``.
Weight decay regularization parameter.
"""

@validated()
Expand Down
10 changes: 5 additions & 5 deletions src/gluonts/torch/model/mqf2/lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,14 @@ class MQF2MultiHorizonLightningModule(pl.LightningModule):
Parameters
----------
model
An MQF2MultiHorizonModel instance
model_kwargs
Keyword arguments to construct the ``MQF2MultiHorizonModel`` to be trained.
loss
Distribution loss
Distribution loss.
lr
Learning rate
Learning rate.
weight_decay
Weight decay during training
Weight decay during training.
patience
Patience parameter for learning rate scheduler, default: ``10``.
"""
Expand Down
11 changes: 5 additions & 6 deletions src/gluonts/torch/model/patch_tst/lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,14 @@ class PatchTSTLightningModule(pl.LightningModule):
Parameters
----------
model
``PatchTSTModel`` to be trained.
model_kwargs
Keyword arguments to construct the ``PatchTSTModel`` to be trained.
loss
Loss function to be used for training,
default: ``NegativeLogLikelihood()``.
Loss function to be used for training.
lr
Learning rate, default: ``1e-3``.
Learning rate.
weight_decay
Weight decay regularization parameter, default: ``1e-8``.
Weight decay regularization parameter.
"""

@validated()
Expand Down
11 changes: 5 additions & 6 deletions src/gluonts/torch/model/simple_feedforward/lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,14 @@ class SimpleFeedForwardLightningModule(pl.LightningModule):
Parameters
----------
model
``SimpleFeedForwardModel`` to be trained.
model_kwargs
Keyword arguments to construct the ``SimpleFeedForwardModel`` to be trained.
loss
Loss function to be used for training,
default: ``NegativeLogLikelihood()``.
Loss function to be used for training.
lr
Learning rate, default: ``1e-3``.
Learning rate.
weight_decay
Weight decay regularization parameter, default: ``1e-8``.
Weight decay regularization parameter.
"""

@validated()
Expand Down
10 changes: 5 additions & 5 deletions src/gluonts/torch/model/tft/lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,14 @@ class TemporalFusionTransformerLightningModule(pl.LightningModule):
Parameters
----------
model
``TemporalFusionTransformerModel`` to be trained.
model_kwargs
Keyword arguments to construct the ``TemporalFusionTransformerModel`` to be trained.
lr
Learning rate, default: ``1e-3``.
Learning rate.
weight_decay
Weight decay regularization parameter, default: ``1e-8``.
Weight decay regularization parameter.
patience
Patience parameter for learning rate scheduler, default: ``10``.
Patience parameter for learning rate scheduler.
"""

@validated()
Expand Down

0 comments on commit 33cffc2

Please sign in to comment.