Skip to content

Commit

Permalink
fixes issues
Browse files Browse the repository at this point in the history
  • Loading branch information
ThomasHelfer committed Jun 24, 2024
1 parent af4deae commit 2cf2c2d
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 7 deletions.
9 changes: 5 additions & 4 deletions configs/config.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
ADAMsteps: 200
n_steps: 1
res_level: 7
scaling_factor: 0.00005
n_steps: 210
res_level: 5
scaling_factor: 0.001
# rescale factor
factor: 2
# Data Source
filenamesX: "/home/thelfer1/scr4_tedwar42/thelfer1/high_end_data/outputXdata_level{res_level}_step*.dat"
# Restarting
restart: False
file_path: "model_epoch_counter_0000000049_data_time_1718669354.pth"
file_path: "model_epoch_counter_0000000049_data_time_1718669354.pth"
lambda_fac: 0.0
16 changes: 13 additions & 3 deletions learn_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,13 @@ def load_config(config_path):
return yaml.safe_load(file)


# Function to copy the configuration file
def copy_config_file(source, destination):
if not os.path.exists(destination):
os.makedirs(destination)
shutil.copy(source, destination)


def main():
default_job_id = "local_run"
# Parse the arguments
Expand Down Expand Up @@ -81,6 +88,9 @@ def main():
# Load configuration
config = load_config(args.config)

# Copy the configuration file to the tracking directory
copy_config_file(args.config, folder_name)

# Access configuration variables
ADAMsteps = config["ADAMsteps"]
n_steps = config["n_steps"]
Expand All @@ -91,6 +101,7 @@ def main():
restart = config["restart"]
file_path = config["file_path"]
lambda_fac = config["lambda_fac"]
print(f"lambda_fac {type(scaling_factor)}")

num_varsX = 25
dataX = get_box_format(filenamesX, num_varsX)
Expand Down Expand Up @@ -152,8 +163,8 @@ def forward(self, x):

x = self.interpolation(x)
tmp = x.clone()
x = x + self.encoder(tmp) * self.scaling_factor

x = x + self.encoder(tmp) * self.scaling_factor
return x, tmp

# Instantiate the model
Expand Down Expand Up @@ -228,7 +239,7 @@ def forward(self, x):
class Hamiltonian_loss:
def __init__(self, oneoverdx: float, lambda_fac: float = 0):
self.oneoverdx = oneoverdx
self.lambda_fac = float(lambda_fac)
self.lambda_fac = lambda_fac

def __call__(
self, output: torch.tensor, y_interp: torch.tensor
Expand Down Expand Up @@ -315,7 +326,6 @@ def closure():
total_loss_train += loss_train.item()
# Calculate the average training loss
average_loss_train = total_loss_train / len(train_loader)
print(average_loss_train)
# Log the average training loss
writer.add_scalar("loss/train", average_loss_train, counter)
losses_train.append(average_loss_train)
Expand Down

0 comments on commit 2cf2c2d

Please sign in to comment.