Skip to content

Commit

Permalink
Merge branch 'main' into feature/parallel_sgs
Browse files Browse the repository at this point in the history
# Conflicts:
#	sampo/scheduler/genetic/operators.py
#	sampo/scheduler/genetic/schedule_builder.py
#	tests/scheduler/genetic/converter_test.py
  • Loading branch information
StannisMod committed Oct 26, 2023
2 parents 9fe3c25 + 78d2b66 commit 0955439
Show file tree
Hide file tree
Showing 8 changed files with 176 additions and 192 deletions.
12 changes: 5 additions & 7 deletions sampo/generator/pipeline/project.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
MAX_BOREHOLES_PER_BLOCK, BRANCHING_PROBABILITY
from sampo.generator.pipeline.cluster import get_cluster_works, _add_addition_work
from sampo.generator.pipeline.types import SyntheticGraphType, StageType
from sampo.generator.utils.graph_node_operations import count_node_ancestors
from sampo.generator.utils.graph_node_operations import count_ancestors
from sampo.schemas.graph import GraphNode, WorkGraph, EdgeType
from sampo.schemas.utils import uuid_str
from sampo.schemas.works import WorkUnit
Expand Down Expand Up @@ -109,18 +109,16 @@ def get_graph(mode: SyntheticGraphType | None = SyntheticGraphType.GENERAL,

root_stage = get_root_stage(stages, branching_probability, rand)
checkpoints, roads = _get_cluster_graph(root_stage, f'{cluster_name_prefix}{masters_clusters_ind}',
addition_cluster_probability=addition_cluster_probability, rand=rand)
tmp_finish = get_finish_stage(checkpoints)
count_works = count_node_ancestors(tmp_finish, root_stage)
addition_cluster_probability=addition_cluster_probability, rand=rand)

if 0 < top_border < (count_works + works_generated):
break
count_works = count_ancestors(checkpoints, root_stage)

stages += [(c, roads) for c in checkpoints]
masters_clusters_ind += 1
works_generated += count_works

if 0 < bottom_border <= works_generated or 0 < cluster_counts <= (len(stages) - 1):
if (0 < bottom_border <= works_generated or top_border < (count_works + works_generated)
or 0 < cluster_counts <= (len(stages) - 1)):
break

if len(stages) == 1:
Expand Down
17 changes: 7 additions & 10 deletions sampo/generator/utils/graph_node_operations.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,25 @@
import queue

from sampo.schemas.graph import GraphNode


def count_node_ancestors(finish: GraphNode, root: GraphNode) -> int:
def count_ancestors(first_ancestors: list[GraphNode], root: GraphNode) -> int:
"""
Counts the number of ancestors of the whole graph.
:param finish: The node for which ancestors are to be counted.
:param first_ancestors: First ancestors of ancestors which must be counted.
:param root: The root node of the graph.
:return:
"""
q = queue.Queue()
count = 0
q = list(first_ancestors)
count = len(first_ancestors)
used = set()
used.add(root)
q.put(finish)
while not q.empty():
node = q.get()
while q:
node = q.pop()
for parent in node.parents:
if parent in used:
continue
used.add(parent)
q.put(parent)
q.insert(0, parent)
count += 1

return count
6 changes: 3 additions & 3 deletions sampo/scheduler/genetic/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from sampo.scheduler.base import Scheduler, SchedulerType
from sampo.scheduler.genetic.operators import FitnessFunction, TimeFitness
from sampo.scheduler.genetic.schedule_builder import build_schedule
from sampo.scheduler.genetic.converter import ChromosomeType
from sampo.scheduler.heft.base import HEFTScheduler, HEFTBetweenScheduler
from sampo.scheduler.heft.prioritization import prioritization
from sampo.scheduler.resource.average_req import AverageReqResourceOptimizer
Expand Down Expand Up @@ -38,7 +39,7 @@ def __init__(self,
seed: Optional[float or None] = None,
n_cpu: int = 1,
weights: list[int] = None,
fitness_constructor: Callable[[Time | None], FitnessFunction] = TimeFitness,
fitness_constructor: Callable[[Callable[[list[ChromosomeType]], list[Schedule]]], FitnessFunction] = TimeFitness,
scheduler_type: SchedulerType = SchedulerType.Genetic,
resource_optimizer: ResourceOptimizer = IdentityResourceOptimizer(),
work_estimator: WorkTimeEstimator = DefaultWorkEstimator(),
Expand Down Expand Up @@ -214,7 +215,6 @@ def schedule_with_cache(self,

mutate_order, mutate_resources, mutate_zones, size_of_population = self.get_params(wg.vertex_count)
worker_pool = get_worker_contractor_pool(contractors)
fitness_object = self.fitness_constructor(self._deadline)
deadline = None if self._optimize_resources else self._deadline

scheduled_works, schedule_start_time, timeline, order_nodes = build_schedule(wg,
Expand All @@ -229,7 +229,7 @@ def schedule_with_cache(self,
self.rand,
spec,
landscape,
fitness_object,
self.fitness_constructor,
self.work_estimator,
self._n_cpu,
assigned_parent_time,
Expand Down
6 changes: 2 additions & 4 deletions sampo/scheduler/genetic/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@
ChromosomeType = tuple[np.ndarray, np.ndarray, np.ndarray, ScheduleSpec, np.ndarray]


def convert_schedule_to_chromosome(wg: WorkGraph,
work_id2index: dict[str, int],
def convert_schedule_to_chromosome(work_id2index: dict[str, int],
worker_name2index: dict[str, int],
contractor2index: dict[str, int],
contractor_borders: np.ndarray,
Expand All @@ -32,7 +31,6 @@ def convert_schedule_to_chromosome(wg: WorkGraph,
"""
Receive a result of scheduling algorithm and transform it to chromosome
:param wg:
:param work_id2index:
:param worker_name2index:
:param contractor2index:
Expand All @@ -45,7 +43,7 @@ def convert_schedule_to_chromosome(wg: WorkGraph,
"""

order: list[GraphNode] = order if order is not None else [work for work in schedule.works
if not wg[work.work_unit.id].is_inseparable_son()]
if work.work_unit.id in work_id2index]

# order works part of chromosome
order_chromosome: np.ndarray = np.array([work_id2index[work.work_unit.id] for work in order])
Expand Down
Loading

0 comments on commit 0955439

Please sign in to comment.