Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update fip #112

Merged
merged 2 commits into from
Nov 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "causica"
version = "0.4.3"
version = "0.4.4"
description = ""
readme = "README.md"
authors = ["Microsoft Research - Causica"]
Expand Down Expand Up @@ -64,13 +64,15 @@ known_first_party = ["causica"]
treat_comments_as_code = ["# %%"]

[tool.pytest.ini_options]
norecursedirs = "integration"
addopts = "--durations=200"
junit_family = "xunit1"

[tool.mypy]
ignore_missing_imports = true
exclude = [
"research_experiments/avid"
"research_experiments/avid",
"research_experiments/fip",
]

[tool.pylint.main]
Expand Down
5,870 changes: 4,185 additions & 1,685 deletions research_experiments/fip/poetry.lock

Large diffs are not rendered by default.

30 changes: 28 additions & 2 deletions research_experiments/fip/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "fip"
version = "0.1.0"
version = "0.1.1"
description = "A Fixed-Point Approach for Causal Generative Modeling"
readme = "README.md"
authors = ["Meyer Scetbon", "Joel Jennings", "Agrin Hilmkil", "Cheng Zhang", "Chao Ma"]
Expand All @@ -11,8 +11,34 @@ license = "MIT"

[tool.poetry.dependencies]
python = "~3.10"
causica = "0.4.1"
causica = "0.4.2"

[tool.poetry.group.dev.dependencies]
black = {version="^22.6.0", extras=["jupyter"]}
isort = "^5.10.1"
jupyter = "^1.0.0"
jupytext = "^1.13.8"
mypy = "^1.0.0"
pre-commit = "^2.19.0"
pylint = "^2.14.4"
pytest = "^7.1.2"
pytest-cov = "^3.0.0"
seaborn = "^0.12.2"
types-python-dateutil = "^2.8.18"
types-requests = "^2.31.0.10"


[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"

[tool.black]
line-length = 120

[tool.isort]
line_length = 120
profile = "black"
py_version = 310
known_first_party = ["fip"]
# Keep import sorts by code jupytext percent block (https://github.com/PyCQA/isort/issues/1338)
treat_comments_as_code = ["# %%"]
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
seed_everything: 234
seed_everything: 234

model:
class_path: fip.tasks.amortization.leaf_prediction.LeafPrediction
init_args:
Expand All @@ -14,6 +15,7 @@ model:
num_to_keep_training: 10
distributed: false
elimination_type: "self"

trainer:
max_epochs: 2000
accelerator: gpu
Expand All @@ -22,16 +24,19 @@ trainer:
profiler: "simple"
devices: 1
accumulate_grad_batches: 1

best_checkpoint_callback:
dirpath: "./outputs/"
filename: "best_model"
save_top_k: 1
mode: "min"
monitor: "val_loss"
every_n_epochs: 1

last_checkpoint_callback:
save_last: true
save_top_k: 0 # only the last checkpoint is saved

early_stopping_callback:
monitor: "val_loss"
min_delta: 0.0001
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
seed_everything: 234

model:
class_path: fip.tasks.scm_learning_with_ground_truth.scm_learning_true_graph.SCMLearningTrueGraph
init_args:
Expand All @@ -9,13 +10,14 @@ model:
num_heads: 8
d_feedforward: 128
total_nodes: 4
total_layers: 2
total_layers: 1
dropout_prob: 0.
mask_type: "none"
attn_type: "causal"
cost_type: "dot_product"
learnable_loss: false
distributed: false

trainer:
max_epochs: 1000
accelerator: gpu
Expand All @@ -24,19 +26,22 @@ trainer:
log_every_n_steps: 10
inference_mode: false
profiler: "simple"

early_stopping_callback:
monitor: "val_loss"
min_delta: 0.0001
patience: 500
verbose: False
mode: "min"

best_checkpoint_callback:
dirpath: "./outputs/"
filename: "best_model"
save_top_k: 1
mode: "min"
monitor: "val_loss"
every_n_epochs: 1

last_checkpoint_callback:
save_last: true
save_top_k: 0 # only the last checkpoint is saved
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
seed_everything: 5000

model:
class_path: fip.tasks.scm_learning_with_ground_truth.scm_learning_true_perm.SCMLearningTruePerm
init_args:
Expand All @@ -16,6 +17,7 @@ model:
cost_type: "dot_product"
learnable_loss: false
distributed: false

trainer:
max_epochs: 1000
accelerator: gpu
Expand All @@ -24,19 +26,22 @@ trainer:
log_every_n_steps: 10
inference_mode: false
profiler: "simple"

early_stopping_callback:
monitor: "val_loss"
min_delta: 0.0001
patience: 500
verbose: False
mode: "min"

best_checkpoint_callback:
dirpath: "./outputs/"
filename: "best_model"
save_top_k: 1
mode: "min"
monitor: "val_loss"
every_n_epochs: 1

last_checkpoint_callback:
save_last: true
save_top_k: 0 # only the last checkpoint is saved
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
seed_everything: 5000
seed_everything: 234

model:
class_path: fip.tasks.scm_learning_with_predicted_truth.scm_learning_predicted_leaf.SCMLearningPredLeaf
init_args:
Expand All @@ -18,26 +19,30 @@ model:
cost_type: "dot_product"
learnable_loss: false
distributed: false

trainer:
max_epochs: 1000
accelerator: gpu
devices: 1
check_val_every_n_epoch: 1
log_every_n_steps: 10
inference_mode: false

early_stopping_callback:
monitor: "val_loss"
min_delta: 0.0001
patience: 500
verbose: False
mode: "min"

best_checkpoint_callback:
dirpath: "./outputs/"
filename: "best_model"
save_top_k: 1
mode: "min"
monitor: "val_loss"
every_n_epochs: 1

last_checkpoint_callback:
save_last: true
save_top_k: 0 # only the last checkpoint is saved
Loading
Loading