-
-
Notifications
You must be signed in to change notification settings - Fork 8.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[python-package][PySpark] Expose Training and Validation Metrics #11133
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
"""Xgboost training summary integration submodule.""" | ||
|
||
from dataclasses import dataclass, field | ||
from typing import Dict, List | ||
|
||
|
||
@dataclass | ||
class XGBoostTrainingSummary: | ||
""" | ||
A class that holds the training and validation objective history | ||
of an XGBoost model during its training process. | ||
""" | ||
|
||
train_objective_history: Dict[str, List[float]] = field(default_factory=dict) | ||
validation_objective_history: Dict[str, List[float]] = field(default_factory=dict) | ||
|
||
@staticmethod | ||
def from_metrics( | ||
metrics: Dict[str, Dict[str, List[float]]] | ||
) -> "XGBoostTrainingSummary": | ||
""" | ||
Create an XGBoostTrainingSummary instance from a nested dictionary of metrics. | ||
|
||
Parameters | ||
---------- | ||
metrics : dict of str to dict of str to list of float | ||
A dictionary containing training and validation metrics. | ||
Example format: | ||
{ | ||
"training": {"logloss": [0.1, 0.08]}, | ||
"validation": {"logloss": [0.12, 0.1]} | ||
} | ||
|
||
Returns | ||
------- | ||
A new instance of XGBoostTrainingSummary. | ||
|
||
""" | ||
train_objective_history = metrics.get("training", {}) | ||
validation_objective_history = metrics.get("validation", {}) | ||
return XGBoostTrainingSummary( | ||
train_objective_history, validation_objective_history | ||
) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,233 @@ | ||
import logging | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm wondering, if we could put the tests in this file into the existing test_spark_local.py and reuse the existing test data? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, I can move them there without much effort. Let me know if you'd like me to proceed with that. If we decide to keep the tests in this file, I can either leave the examples here as they are, or, as you suggested, for better modularity and data reuse, we could import them from test_spark_local.py. Another option is to store all shared data in a separate file, allowing both test_spark_local.py and test_xgboost_summary.py to import what they need from it. Let me know what you think, I have no strong opinion on this. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, That's good point. Originally, I would like to separate the tests per the estimators. like XGBoostClassifier/Regressor/Ranker, instead of per features. So you can share the same dataset for different features. |
||
from typing import Union | ||
|
||
import pytest | ||
from pyspark.ml.linalg import Vectors | ||
from pyspark.sql import DataFrame, SparkSession | ||
from pyspark.sql.functions import lit | ||
|
||
from xgboost import testing as tm | ||
from xgboost.spark import ( | ||
SparkXGBClassifier, | ||
SparkXGBClassifierModel, | ||
SparkXGBRanker, | ||
SparkXGBRankerModel, | ||
SparkXGBRegressor, | ||
SparkXGBRegressorModel, | ||
) | ||
|
||
from .test_spark_local import spark as spark_local | ||
|
||
logging.getLogger("py4j").setLevel(logging.INFO) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is this for debug? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, and since it was also set in test_spark_local.py, I kept it. Do you prefer that we remove it ? |
||
|
||
pytestmark = [tm.timeout(60), pytest.mark.skipif(**tm.no_spark())] | ||
|
||
|
||
@pytest.fixture | ||
def clf_and_reg_df(spark_local: SparkSession) -> DataFrame: | ||
""" | ||
Fixture to create a DataFrame with example data. | ||
""" | ||
data = [ | ||
(Vectors.dense([1.0, 2.0, 3.0]), 1), | ||
(Vectors.dense([4.0, 5.0, 6.0]), 1), | ||
(Vectors.dense([9.0, 4.0, 8.0]), 0), | ||
(Vectors.dense([6.0, 2.0, 2.0]), 1), | ||
(Vectors.dense([5.0, 4.0, 3.0]), 0), | ||
] | ||
columns = ["features", "label"] | ||
return spark_local.createDataFrame(data, schema=columns) | ||
|
||
|
||
@pytest.fixture | ||
def clf_and_reg_df_with_validation(clf_and_reg_df: DataFrame) -> DataFrame: | ||
""" | ||
Fixture to create a DataFrame with example data. | ||
""" | ||
# split data into training and validation sets | ||
train_df, validation_df = clf_and_reg_df.randomSplit([0.8, 0.2], seed=42) | ||
|
||
# Add a column to indicate validation rows | ||
train_df = train_df.withColumn("validation_indicator_col", lit(False)) | ||
validation_df = validation_df.withColumn("validation_indicator_col", lit(True)) | ||
return train_df.union(validation_df) | ||
|
||
|
||
@pytest.fixture | ||
def ranker_df(spark_local: SparkSession) -> DataFrame: | ||
""" | ||
Fixture to create a DataFrame with sample data for ranking tasks. | ||
""" | ||
data = [ | ||
(Vectors.dense([1.0, 2.0, 3.0]), 0, 0), | ||
(Vectors.dense([4.0, 5.0, 6.0]), 1, 0), | ||
(Vectors.dense([9.0, 4.0, 8.0]), 0, 0), | ||
(Vectors.dense([6.0, 2.0, 2.0]), 1, 0), | ||
(Vectors.dense([5.0, 4.0, 3.0]), 0, 0), | ||
] | ||
columns = ["features", "label", "qid"] | ||
return spark_local.createDataFrame(data, schema=columns) | ||
|
||
|
||
@pytest.fixture | ||
def ranker_df_with_validation(ranker_df: DataFrame) -> DataFrame: | ||
""" | ||
Fixture to split the ranking DataFrame into training and validation sets, | ||
add validation indicator, and merge them back into a single DataFrame. | ||
""" | ||
# Split the data into training and validation sets (80-20 split) | ||
train_df, validation_df = ranker_df.randomSplit([0.8, 0.2], seed=42) | ||
|
||
# Add a column to indicate whether the row is from the validation set | ||
train_df = train_df.withColumn("validation_indicator_col", lit(False)) | ||
validation_df = validation_df.withColumn("validation_indicator_col", lit(True)) | ||
|
||
# Union the training and validation DataFrames | ||
return train_df.union(validation_df) | ||
|
||
|
||
class TestXGBoostTrainingSummary: | ||
@staticmethod | ||
def assert_empty_validation_objective_history( | ||
xgb_model: Union[ | ||
SparkXGBClassifierModel, SparkXGBRankerModel, SparkXGBRegressorModel | ||
] | ||
) -> None: | ||
assert hasattr(xgb_model.training_summary, "validation_objective_history") | ||
assert isinstance(xgb_model.training_summary.validation_objective_history, dict) | ||
assert not xgb_model.training_summary.validation_objective_history | ||
|
||
@staticmethod | ||
def assert_non_empty_training_objective_history( | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm wondering if we could get the evaluate_results from xgboost itself and the training summary from xgboost-pyspark on the same dataset, and then check if they are equal? You can see some tests in test_spark_local.py are doing same comparison. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, absolutely. Thank you for pointing this out ! I tested this on a simple DataFrame locally, and the results matched perfectly. We should definitely add such tests, I’ll take care of that ! |
||
xgb_model: Union[ | ||
SparkXGBClassifierModel, SparkXGBRankerModel, SparkXGBRegressorModel | ||
], | ||
metric: str, | ||
n_estimators: int, | ||
) -> None: | ||
assert hasattr(xgb_model.training_summary, "train_objective_history") | ||
assert isinstance(xgb_model.training_summary.train_objective_history, dict) | ||
|
||
assert metric in xgb_model.training_summary.train_objective_history | ||
assert ( | ||
len(xgb_model.training_summary.train_objective_history[metric]) | ||
== n_estimators | ||
) | ||
|
||
for ( | ||
training_metric, | ||
loss_evolution, | ||
) in xgb_model.training_summary.train_objective_history.items(): | ||
assert isinstance(training_metric, str) | ||
assert len(loss_evolution) == n_estimators | ||
assert all(isinstance(value, float) for value in loss_evolution) | ||
|
||
@staticmethod | ||
def assert_non_empty_validation_objective_history( | ||
xgb_model: Union[ | ||
SparkXGBClassifierModel, SparkXGBRankerModel, SparkXGBRegressorModel | ||
], | ||
metric: str, | ||
n_estimators: int, | ||
) -> None: | ||
assert hasattr(xgb_model.training_summary, "validation_objective_history") | ||
assert isinstance(xgb_model.training_summary.validation_objective_history, dict) | ||
|
||
assert metric in xgb_model.training_summary.validation_objective_history | ||
assert ( | ||
len(xgb_model.training_summary.validation_objective_history[metric]) | ||
== n_estimators | ||
) | ||
|
||
for ( | ||
validation_metric, | ||
loss_evolution, | ||
) in xgb_model.training_summary.validation_objective_history.items(): | ||
assert isinstance(validation_metric, str) | ||
assert len(loss_evolution) == n_estimators | ||
assert all(isinstance(value, float) for value in loss_evolution) | ||
|
||
@pytest.mark.parametrize( | ||
"spark_xgb_estimator, metric", | ||
[ | ||
(SparkXGBClassifier, "logloss"), | ||
(SparkXGBClassifier, "error"), | ||
(SparkXGBRegressor, "rmse"), | ||
(SparkXGBRegressor, "mae"), | ||
], | ||
) | ||
def test_xgb_summary_classification_regression( | ||
self, | ||
clf_and_reg_df: DataFrame, | ||
spark_xgb_estimator: Union[SparkXGBClassifier, SparkXGBRegressor], | ||
metric: str, | ||
) -> None: | ||
n_estimators = 10 | ||
spark_xgb_model = spark_xgb_estimator( | ||
eval_metric=metric, n_estimators=n_estimators | ||
).fit(clf_and_reg_df) | ||
self.assert_non_empty_training_objective_history( | ||
spark_xgb_model, metric, n_estimators | ||
) | ||
self.assert_empty_validation_objective_history(spark_xgb_model) | ||
|
||
@pytest.mark.parametrize( | ||
"spark_xgb_estimator, metric", | ||
[ | ||
(SparkXGBClassifier, "logloss"), | ||
(SparkXGBClassifier, "error"), | ||
(SparkXGBRegressor, "rmse"), | ||
(SparkXGBRegressor, "mae"), | ||
], | ||
) | ||
def test_xgb_summary_classification_regression_with_validation( | ||
self, | ||
clf_and_reg_df_with_validation: DataFrame, | ||
spark_xgb_estimator: Union[SparkXGBClassifier, SparkXGBRegressor], | ||
metric: str, | ||
) -> None: | ||
n_estimators = 10 | ||
spark_xgb_model = spark_xgb_estimator( | ||
eval_metric=metric, | ||
validation_indicator_col="validation_indicator_col", | ||
n_estimators=n_estimators, | ||
).fit(clf_and_reg_df_with_validation) | ||
|
||
self.assert_non_empty_training_objective_history( | ||
spark_xgb_model, metric, n_estimators | ||
) | ||
self.assert_non_empty_validation_objective_history( | ||
spark_xgb_model, metric, n_estimators | ||
) | ||
|
||
@pytest.mark.parametrize("metric", ["ndcg", "map"]) | ||
def test_xgb_summary_ranker(self, ranker_df: DataFrame, metric: str) -> None: | ||
n_estimators = 10 | ||
xgb_ranker = SparkXGBRanker( | ||
qid_col="qid", eval_metric=metric, n_estimators=n_estimators | ||
) | ||
xgb_ranker_model = xgb_ranker.fit(ranker_df) | ||
|
||
self.assert_non_empty_training_objective_history( | ||
xgb_ranker_model, metric, n_estimators | ||
) | ||
self.assert_empty_validation_objective_history(xgb_ranker_model) | ||
|
||
@pytest.mark.parametrize("metric", ["ndcg", "map"]) | ||
def test_xgb_summary_ranker_with_validation( | ||
self, ranker_df_with_validation: DataFrame, metric: str | ||
) -> None: | ||
n_estimators = 10 | ||
xgb_ranker_model = SparkXGBRanker( | ||
qid_col="qid", | ||
validation_indicator_col="validation_indicator_col", | ||
eval_metric=metric, | ||
n_estimators=n_estimators, | ||
).fit(ranker_df_with_validation) | ||
|
||
self.assert_non_empty_training_objective_history( | ||
xgb_ranker_model, metric, n_estimators | ||
) | ||
self.assert_non_empty_validation_objective_history( | ||
xgb_ranker_model, metric, n_estimators | ||
) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@trivialfis, Could you check this is ok by enabling it by default?