Skip to content

Commit

Permalink
Fix seed of simulation
Browse files Browse the repository at this point in the history
  • Loading branch information
yura-hb committed Feb 24, 2024
1 parent a38b620 commit 27c7ba8
Show file tree
Hide file tree
Showing 15 changed files with 199 additions and 86 deletions.
6 changes: 5 additions & 1 deletion diploma_thesis/breakdown/dynamic.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@

import torch

from environment import Breakdown, Machine
from typing import Dict
Expand All @@ -18,6 +18,10 @@ def __init__(self, configuration: Configuration):

self.configuration = configuration

def connect(self, generator: torch.Generator):
self.configuration.arrival.connect(generator)
self.configuration.duration.connect(generator)

def sample_next_breakdown_time(self, machine: Machine):
return self.configuration.arrival.sample((1,))

Expand Down
4 changes: 4 additions & 0 deletions diploma_thesis/breakdown/no.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
import torch

from environment import Breakdown, Machine
from typing import Dict


class No(Breakdown):

def connect(self, generator: torch.Generator):
pass

def sample_next_breakdown_time(self, machine: Machine):
return float('inf')

Expand Down
201 changes: 132 additions & 69 deletions diploma_thesis/configuration/jsp.yml
Original file line number Diff line number Diff line change
@@ -1,50 +1,144 @@
jsp: &jsp
name: 'model'
output_dir: 'tmp/jsp'
log_stdout: False

machine_agent:
kind: 'mod'
parameters:
base_path: 'configuration/mods/machine_agent/model.yml'
mods: [ ]

work_center_agent:
kind: 'static'
parameters:
model:
kind: 'static'
parameters:
rule: 'et'
encoder:
kind: 'plain'

tape:
machine_reward:
kind: 'global_tardiness'
parameters:
span: 256

work_center_reward:
kind: 'no'

simulator:
kind: 'td'

run:
kind: 'mod'
parameters:
base_path: 'configuration/mods/run/jsp/run.yml'
mods: [ ]

rl_agents: &rl_agents
[
'rl/trainer/dqn.yml',
'rl/trainer/ddqn.yml'
]

memory: &memory
[
'memory/plain.yml',
'memory/prioritized.yml'
]

loss: &loss
[
# 'loss/huber.yml',
'loss/mse.yml',
# 'loss/smooth_l1.yml'
]

agent: &agent
[
'rl/agent/single.yml',
'rl/agent/multi_agent.yml',
'rl/agent/centralized.yml'
]

optimizer: &optimizer
[
'optimizer/adam.yml',
# 'optimizer/sgd.yml'
]

action_set: &action_set
[
'model/rules/all.yml',
'model/rules/marl.yml'
]

reward: &reward
- kind: 'global_tardiness'
- kind: 'global_decomposed_tardiness'
- kind: 'surrogate_tardiness'

run: &run
mods:
# - [ 'multi.yml' ]
- [ 'multi.yml', 'concurrent.yml' ]
# - [ 'util_70.yml' ]
# - [ 'util_80.yml' ]
# - [ 'util_90.yml' ]

task:
kind: 'multi_task'
n_workers: 8
debug: False

tasks:
# AS
- kind: 'multi_value'
parameters:
base:
name: 'model'
output_dir: 'tmp/jsp'
log_stdout: False

*jsp
values:
machine_agent:
kind: 'mod'
parameters:
base_path: 'configuration/mods/machine_agent/model.yml'
mods: [ ]

work_center_agent:
kind: 'static'
parameters:
model:
kind: 'static'
parameters:
rule: 'et'
encoder:
kind: 'plain'
mods:
# Reserved keywords to build GridFactory resulting a list
__factory__:
# RL Trainers
- *rl_agents
# RL Agent Kind
- *agent
# Memory
- *memory
# Loss
- *loss
# Optimizer
- *optimizer
# State Encoding
- [
'encoding/indirect.yml',
]
# Action Set
- *action_set
# Model
- [
'model/model/marl_as.yml'
]

tape:
machine_reward:
kind: 'global_tardiness'
parameters:
span: 256

work_center_reward:
kind: 'no'

simulator:
kind: 'td'
*reward

run:
kind: 'mod'
parameters:
base_path: 'configuration/mods/run/jsp/run.yml'
mods: [ ]
paramemeters:
*run

# MR
- kind: 'multi_value'
parameters:
base:
*jsp

values:
machine_agent:
Expand All @@ -53,61 +147,30 @@ task:
# Reserved keywords to build GridFactory resulting a list
__factory__:
# RL Trainers
- [
'rl/trainer/dqn.yml',
'rl/trainer/ddqn.yml'
]
- *rl_agents
# RL Agent Kind
- [
'rl/agent/single.yml',
# 'rl/agent/multi_agent.yml',
# 'rl/agent/centralized.yml'
]
- *agent
# Memory
- [
'memory/plain.yml',
'memory/prioritized.yml'
]
- *memory
# Loss
- [
'loss/huber.yml',
'loss/mse.yml',
'loss/smooth_l1.yml'
]
- *loss
# Optimizer
- [
'optimizer/adam.yml',
'optimizer/sgd.yml'
]
- *optimizer
# State Encoding
- [
'encoding/indirect.yml',
'encoding/mr.yml'
]
# Action Set
- [
'model/rules/all.yml',
'model/rules/marl.yml'
]
- *action_set
# Model
- [
'model/model/marl_as.yml',
'model/model/marl_mr.yml'
]

tape:
machine_reward:
- kind: 'global_tardiness'
- kind: 'global_decomposed_tardiness'
- kind: 'surrogate_tardiness'
*reward

run:
parameters:
mods:
# - ['test.yml']
- [ 'multi.yml' ]
# - [ 'multi.yml', 'concurrent.yml' ]
# - [ 'util_70.yml' ]
# - [ 'util_80.yml' ]
# - [ 'util_90.yml' ]

*run
7 changes: 6 additions & 1 deletion diploma_thesis/environment/breakdown.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
import torch
from abc import ABCMeta
import environment


class Breakdown:
class Breakdown(metaclass=ABCMeta):

def connect(self, generator: torch.Generator):
pass

def sample_next_breakdown_time(self, machine: 'environment.Machine') -> float:
pass
Expand Down
4 changes: 4 additions & 0 deletions diploma_thesis/environment/job_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@ def __init__(self, problem: Configuration, environment: simpy.Environment):
self.problem = problem
self.environment = environment

def connect(self, generator: torch.Generator):
pass

@abstractmethod
def number_of_jobs(self):
return

Expand Down
6 changes: 5 additions & 1 deletion diploma_thesis/environment/shop_floor.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,11 @@ def __init__(self, id: int, name: str, configuration: Configuration, logger: log
self.did_finish_simulation_event = configuration.environment.event()

def simulate(self):
torch.manual_seed(self.configuration.problem.seed)
generator = torch.Generator()
generator.manual_seed(self.configuration.problem.seed)

self.configuration.breakdown.connect(generator)
self.configuration.sampler.connect(generator)

self.history.with_started_at(self.configuration.environment.now)
self.delegate.did_start_simulation(context=self.__make_context__())
Expand Down
1 change: 0 additions & 1 deletion diploma_thesis/environment/statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@

import pandas as pd
import torch
import torchsnapshot
from tensordict import TensorDict

import environment
Expand Down
4 changes: 4 additions & 0 deletions diploma_thesis/job_sampler/dynamic/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ def sample(initial_work_center_idx: int):
return sampler.sample([self.problem.work_center_count])

self.job_sampler._step_sampler = sample
self.job_sampler.store(sampler)

return self

Expand All @@ -33,6 +34,7 @@ def sample(shape: Tuple[int]) -> torch.FloatTensor:
return times

self.job_sampler._processing_time_sampler = sample
self.job_sampler.store(sampler)

return self

Expand All @@ -47,6 +49,7 @@ def sample(job: Job, moment: torch.FloatTensor) -> torch.FloatTensor:
return torch.round(weight * tightness + moment)

self.job_sampler._due_time_sampler = sample
self.job_sampler.store(sampler)

return self

Expand All @@ -56,6 +59,7 @@ def sample() -> torch.FloatTensor:

self.job_sampler.arrival_time_sampler = sample
self.job_sampler._number_of_jobs = n_jobs
self.job_sampler.store(sampler)

return self

Expand Down
9 changes: 9 additions & 0 deletions diploma_thesis/job_sampler/dynamic/job_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import torch

from environment import Configuration, Job, JobSampler as JSampler
from sampler import Sampler


class JobSampler(JSampler):
Expand All @@ -16,8 +17,13 @@ def __init__(self, problem: Configuration, environment: simpy.Environment):
self._processing_time_sampler: Callable[[Tuple[int]], torch.FloatTensor] = None
self._step_sampler: Callable[[int], torch.LongTensor] = None
self._due_time_sampler: Callable[['Job', int], torch.FloatTensor] = None
self.samplers: [Sampler] = []
self.arrival_time_sampler = None

def connect(self, generator: torch.Generator):
for sampler in self.samplers:
sampler.connect(generator)

def number_of_jobs(self):
return self._number_of_jobs

Expand All @@ -39,3 +45,6 @@ def sample(self, job_id: int, initial_work_center_idx: int, moment: torch.FloatT

def sample_next_arrival_time(self) -> torch.FloatTensor:
return self.arrival_time_sampler()

def store(self, sampler: Sampler):
self.samplers += [sampler]
2 changes: 2 additions & 0 deletions diploma_thesis/sampler/constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
class Constant(NumericSampler):

def __init__(self, value: float):
super().__init__()

self.value = value

def sample(self, shape):
Expand Down
Loading

0 comments on commit 27c7ba8

Please sign in to comment.