-
Notifications
You must be signed in to change notification settings - Fork 184
/
Copy path5.6-part2-logging-tensorboard.py
38 lines (29 loc) · 1.14 KB
/
5.6-part2-logging-tensorboard.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# Unit 5.6. The Benefits of Logging Your Model Training
# Part 2. Logging with TensorBoard
import lightning as L
import torch
from shared_utilities import LightningModel, MNISTDataModule, PyTorchMLP
from watermark import watermark
if __name__ == "__main__":
print(watermark(packages="torch,lightning", python=True))
print("Torch CUDA available?", torch.cuda.is_available())
torch.manual_seed(123)
dm = MNISTDataModule()
pytorch_model = PyTorchMLP(num_features=784, num_classes=10)
lightning_model = LightningModel(model=pytorch_model, learning_rate=0.05)
trainer = L.Trainer(
max_epochs=10,
accelerator="cpu",
devices="auto",
deterministic=True,
default_root_dir="~/Desktop", # New !!!
)
trainer.fit(model=lightning_model, datamodule=dm)
train_acc = trainer.validate(dataloaders=dm.train_dataloader())[0]["val_acc"]
val_acc = trainer.validate(datamodule=dm)[0]["val_acc"]
test_acc = trainer.test(datamodule=dm)[0]["test_acc"]
print(
f"Train Acc {train_acc*100:.2f}%"
f" | Val Acc {val_acc*100:.2f}%"
f" | Test Acc {test_acc*100:.2f}%"
)