-
Notifications
You must be signed in to change notification settings - Fork 59
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
For standardized benchmark. Categorize existing datasets into 9 categories with different task types (binary cls, multi-class cls, regression) and different scales (small, medium, large). Usage is as follows: ```python dataset = DataFrameBenchmark(root, task_type=TaskType.BINARY_CLASSIFICATION, scale = "medium", idx = 2) # Get fixed split train_dataset = dataset.get_split_dataset('train') val_dataset = dataset.get_split_dataset('val') test_dataset = dataset.get_split_dataset('test') ``` Dataset documentation [here](https://pyg-team-pytorch-frame--107.com.readthedocs.build/en/107/generated/torch_frame.datasets.DataFrameBenchmark.html#torch_frame.datasets.DataFrameBenchmark).
- Loading branch information
Showing
12 changed files
with
1,034 additions
and
18 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,134 @@ | ||
import pytest | ||
|
||
from torch_frame.datasets import DataFrameBenchmark | ||
from torch_frame.typing import TaskType | ||
|
||
|
||
@pytest.mark.parametrize('scale', ["small", "medium", "large"]) | ||
@pytest.mark.parametrize('task_type', [ | ||
TaskType.BINARY_CLASSIFICATION, TaskType.MULTICLASS_CLASSIFICATION, | ||
TaskType.REGRESSION | ||
]) | ||
def test_data_frame_benchmark_match(task_type, scale): | ||
# Make sure task_type, scale, idx triple map to the fixed underlying | ||
# dataset. New dataset can be appeneded, but the existing mapping needes to | ||
# be preserved. | ||
datasets = DataFrameBenchmark.datasets_available(task_type=task_type, | ||
scale=scale) | ||
if task_type == TaskType.BINARY_CLASSIFICATION: | ||
if scale == 'small': | ||
assert datasets[0] == ('AdultCensusIncome', {}) | ||
assert datasets[1] == ('Mushroom', {}) | ||
assert datasets[2] == ('BankMarketing', {}) | ||
assert datasets[3] == ('TabularBenchmark', { | ||
'name': 'MagicTelescope' | ||
}) | ||
assert datasets[4] == ('TabularBenchmark', { | ||
'name': 'bank-marketing' | ||
}) | ||
assert datasets[5] == ('TabularBenchmark', {'name': 'california'}) | ||
assert datasets[6] == ('TabularBenchmark', {'name': 'credit'}) | ||
assert datasets[7] == ('TabularBenchmark', { | ||
'name': 'default-of-credit-card-clients' | ||
}) | ||
assert datasets[8] == ('TabularBenchmark', {'name': 'electricity'}) | ||
assert datasets[9] == ('TabularBenchmark', { | ||
'name': 'eye_movements' | ||
}) | ||
assert datasets[10] == ('TabularBenchmark', {'name': 'heloc'}) | ||
assert datasets[11] == ('TabularBenchmark', {'name': 'house_16H'}) | ||
assert datasets[12] == ('TabularBenchmark', {'name': 'pol'}) | ||
assert datasets[13] == ('Yandex', {'name': 'adult'}) | ||
elif scale == 'medium': | ||
assert datasets[0] == ('Dota2', {}) | ||
assert datasets[1] == ('KDDCensusIncome', {}) | ||
assert datasets[2] == ('TabularBenchmark', { | ||
'name': 'Diabetes130US' | ||
}) | ||
assert datasets[3] == ('TabularBenchmark', {'name': 'MiniBooNE'}) | ||
assert datasets[4] == ('TabularBenchmark', {'name': 'albert'}) | ||
assert datasets[5] == ('TabularBenchmark', {'name': 'covertype'}) | ||
assert datasets[6] == ('TabularBenchmark', {'name': 'jannis'}) | ||
assert datasets[7] == ('TabularBenchmark', {'name': 'road-safety'}) | ||
assert datasets[8] == ('Yandex', {'name': 'higgs_small'}) | ||
elif scale == 'large': | ||
assert datasets[0] == ('TabularBenchmark', {'name': 'Higgs'}) | ||
elif task_type == TaskType.MULTICLASS_CLASSIFICATION: | ||
if scale == 'small': | ||
assert len(datasets) == 0 | ||
elif scale == 'medium': | ||
assert datasets[0] == ('Yandex', {'name': 'aloi'}) | ||
assert datasets[1] == ('Yandex', {'name': 'helena'}) | ||
assert datasets[2] == ('Yandex', {'name': 'jannis'}) | ||
elif scale == 'large': | ||
assert datasets[0] == ('ForestCoverType', {}) | ||
assert datasets[1] == ('PokerHand', {}) | ||
assert datasets[2] == ('Yandex', {'name': 'covtype'}) | ||
elif task_type == TaskType.REGRESSION: | ||
if scale == 'small': | ||
assert datasets[0] == ('TabularBenchmark', { | ||
'name': 'Bike_Sharing_Demand' | ||
}) | ||
assert datasets[1] == ('TabularBenchmark', { | ||
'name': 'Brazilian_houses' | ||
}) | ||
assert datasets[2] == ('TabularBenchmark', {'name': 'cpu_act'}) | ||
assert datasets[3] == ('TabularBenchmark', {'name': 'elevators'}) | ||
assert datasets[4] == ('TabularBenchmark', {'name': 'house_sales'}) | ||
assert datasets[5] == ('TabularBenchmark', {'name': 'houses'}) | ||
assert datasets[6] == ('TabularBenchmark', {'name': 'sulfur'}) | ||
assert datasets[7] == ('TabularBenchmark', { | ||
'name': 'superconduct' | ||
}) | ||
assert datasets[8] == ('TabularBenchmark', {'name': 'topo_2_1'}) | ||
assert datasets[9] == ('TabularBenchmark', { | ||
'name': 'visualizing_soil' | ||
}) | ||
assert datasets[10] == ('TabularBenchmark', { | ||
'name': 'wine_quality' | ||
}) | ||
assert datasets[11] == ('TabularBenchmark', {'name': 'yprop_4_1'}) | ||
assert datasets[12] == ('Yandex', {'name': 'california_housing'}) | ||
elif scale == 'medium': | ||
assert datasets[0] == ('TabularBenchmark', { | ||
'name': 'Allstate_Claims_Severity' | ||
}) | ||
assert datasets[1] == ('TabularBenchmark', { | ||
'name': 'SGEMM_GPU_kernel_performance' | ||
}) | ||
assert datasets[2] == ('TabularBenchmark', {'name': 'diamonds'}) | ||
assert datasets[3] == ('TabularBenchmark', { | ||
'name': 'medical_charges' | ||
}) | ||
assert datasets[4] == ('TabularBenchmark', { | ||
'name': 'particulate-matter-ukair-2017' | ||
}) | ||
assert datasets[5] == ('TabularBenchmark', { | ||
'name': 'seattlecrime6' | ||
}) | ||
elif scale == 'large': | ||
assert datasets[0] == ('TabularBenchmark', { | ||
'name': 'Airlines_DepDelay_1M' | ||
}) | ||
assert datasets[1] == ('TabularBenchmark', { | ||
'name': 'delays_zurich_transport' | ||
}) | ||
assert datasets[2] == ('TabularBenchmark', { | ||
'name': 'nyc-taxi-green-dec-2016' | ||
}) | ||
assert datasets[3] == ('Yandex', {'name': 'microsoft'}) | ||
assert datasets[4] == ('Yandex', {'name': 'yahoo'}) | ||
assert datasets[5] == ('Yandex', {'name': 'year'}) | ||
|
||
|
||
def test_data_frame_benchmark_object(tmp_path): | ||
dataset = DataFrameBenchmark(tmp_path, TaskType.BINARY_CLASSIFICATION, | ||
'small', 1) | ||
assert str(dataset) == ("DataFrameBenchmark(\n" | ||
" task_type=binary_classification,\n" | ||
" scale=small,\n" | ||
" idx=1,\n" | ||
" cls=Mushroom()\n" | ||
")") | ||
assert dataset.num_rows == 8124 | ||
dataset.materialize() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,21 @@ | ||
import numpy as np | ||
|
||
from torch_frame.utils.split import SPLIT_TO_NUM, generate_random_split | ||
|
||
|
||
def test_generate_random_split(): | ||
num_data = 20 | ||
train_ratio = 0.8 | ||
val_ratio = 0.1 | ||
test_ratio = 0.1 | ||
|
||
split = generate_random_split(num_data, seed=42, train_ratio=train_ratio, | ||
val_ratio=val_ratio) | ||
assert (split == SPLIT_TO_NUM['train']).sum() == int(num_data * | ||
train_ratio) | ||
assert (split == SPLIT_TO_NUM['val']).sum() == int(num_data * val_ratio) | ||
assert (split == SPLIT_TO_NUM['test']).sum() == int(num_data * test_ratio) | ||
assert np.allclose( | ||
split, | ||
np.array([0, 1, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0]), | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.