diff --git a/examples/field_development/field_development_scheduling.py b/examples/field_development/field_development_scheduling.py index f61b4fd2..a9fb01eb 100644 --- a/examples/field_development/field_development_scheduling.py +++ b/examples/field_development/field_development_scheduling.py @@ -7,7 +7,6 @@ from sampo.schemas.contractor import Contractor from sampo.schemas.graph import WorkGraph from sampo.structurator.base import graph_restructuring -from sampo.utilities.schedule import remove_service_tasks from sampo.utilities.visualization.base import VisualizationMode from sampo.utilities.visualization.schedule import schedule_gant_chart_fig from sampo.utilities.visualization.work_graph import work_graph_fig @@ -48,7 +47,7 @@ # Schedule field development tasks schedule = scheduler_type.schedule(structured_wg, contractors, validate=True) -schedule_df = remove_service_tasks(schedule.merged_stages_datetime_df(start_date)) +schedule_df = schedule.merged_stages_datetime_df(start_date) # Schedule's gant chart visualization gant_fig = schedule_gant_chart_fig(schedule_df, diff --git a/examples/simple_synthetic_graph_scheduling.py b/examples/simple_synthetic_graph_scheduling.py index 7a52e64d..1a61d5c2 100644 --- a/examples/simple_synthetic_graph_scheduling.py +++ b/examples/simple_synthetic_graph_scheduling.py @@ -5,8 +5,6 @@ from sampo.utilities.visualization.schedule import schedule_gant_chart_fig -from sampo.utilities.schedule import remove_service_tasks - from sampo.generator.base import SimpleSynthetic from sampo.scheduler.heft.base import HEFTScheduler from sampo.schemas.time import Time @@ -47,7 +45,7 @@ # Schedule works schedule = scheduler.schedule(wg, contractors) -schedule_df = remove_service_tasks(schedule.merged_stages_datetime_df(start_date)) +schedule_df = schedule.merged_stages_datetime_df(start_date) # Schedule's gant chart visualization gant_fig = schedule_gant_chart_fig(schedule_df, fig_file_name=gant_chart_filename, diff --git a/experiments/algo_performance_comparison.py b/experiments/algo_performance_comparison.py index 8bdf6eac..f769c4dd 100644 --- a/experiments/algo_performance_comparison.py +++ b/experiments/algo_performance_comparison.py @@ -2,7 +2,7 @@ from pathos.multiprocessing import ProcessingPool -from sampo.generator import SimpleSynthetic +from sampo.generator.base import SimpleSynthetic from sampo.generator.types import SyntheticGraphType from sampo.scheduler.base import SchedulerType from sampo.scheduler.generate import generate_schedule diff --git a/experiments/algorithms_2_multi_agency.py b/experiments/algorithms_2_multi_agency.py index 057c8917..d4233bcc 100644 --- a/experiments/algorithms_2_multi_agency.py +++ b/experiments/algorithms_2_multi_agency.py @@ -2,7 +2,7 @@ from random import Random from typing import IO -from sampo.generator import SimpleSynthetic +from sampo.generator.base import SimpleSynthetic from sampo.scheduler.heft.base import HEFTScheduler, HEFTBetweenScheduler from sampo.scheduler.multi_agency.block_generator import SyntheticBlockGraphType, generate_block_graph from sampo.scheduler.multi_agency.multi_agency import Agent, Manager diff --git a/experiments/algorithms_efficiency.py b/experiments/algorithms_efficiency.py index e6d26573..ca1b9598 100644 --- a/experiments/algorithms_efficiency.py +++ b/experiments/algorithms_efficiency.py @@ -4,7 +4,7 @@ from pathos.multiprocessing import ProcessingPool -from sampo.generator import SimpleSynthetic +from sampo.generator.base import SimpleSynthetic from sampo.scheduler.heft.base import HEFTBetweenScheduler from sampo.scheduler.heft.base import HEFTScheduler from sampo.scheduler.multi_agency.block_generator import SyntheticBlockGraphType, generate_block_graph diff --git a/experiments/genetic2baseline.py b/experiments/genetic2baseline.py index 64a990fd..1c66cd4b 100644 --- a/experiments/genetic2baseline.py +++ b/experiments/genetic2baseline.py @@ -1,6 +1,6 @@ from uuid import uuid4 -from sampo.generator import SimpleSynthetic +from sampo.generator.base import SimpleSynthetic from sampo.scheduler.genetic.base import GeneticScheduler from sampo.schemas.contractor import Contractor from sampo.schemas.graph import WorkGraph diff --git a/experiments/genetic_2_multi_agency.py b/experiments/genetic_2_multi_agency.py index 75daaffb..456a86dc 100644 --- a/experiments/genetic_2_multi_agency.py +++ b/experiments/genetic_2_multi_agency.py @@ -1,6 +1,6 @@ from random import Random -from sampo.generator import SimpleSynthetic +from sampo.generator.base import SimpleSynthetic from sampo.scheduler.genetic.base import GeneticScheduler from sampo.scheduler.multi_agency.block_generator import SyntheticBlockGraphType, generate_block_graph from sampo.scheduler.multi_agency.multi_agency import Agent, Manager diff --git a/experiments/modular_examples/multi_agency_comparison.py b/experiments/modular_examples/multi_agency_comparison.py index 94f933e4..53a762fb 100644 --- a/experiments/modular_examples/multi_agency_comparison.py +++ b/experiments/modular_examples/multi_agency_comparison.py @@ -2,7 +2,7 @@ from random import Random from typing import IO -from sampo.generator import SimpleSynthetic +from sampo.generator.base import SimpleSynthetic from sampo.scheduler.base import Scheduler from sampo.scheduler.genetic.base import GeneticScheduler from sampo.scheduler.heft.base import HEFTBetweenScheduler diff --git a/pyproject.toml b/pyproject.toml index 4bea5169..f5b9c098 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "sampo" -version = "0.1.1.225" +version = "0.1.1.231" description = "Open-source framework for adaptive manufacturing processes scheduling" authors = ["iAirLab "] license = "BSD-3-Clause" @@ -11,16 +11,15 @@ license = "BSD-3-Clause" [tool.poetry.dependencies] python = ">=3.10,<3.11" sortedcontainers = ">=2.4.0,<2.5.0" -numpy = ">=1.23.5,<1.24.0" -pandas = ">=1.5.2,<1.6.0" +numpy = ">=1.23.5" +pandas = ">=2.0.0" scipy = ">=1.9.3,<1.10.0" toposort = ">=1.7,<2.0" deap = ">=1.3.3,<1.4.0" seaborn = ">=0.12.1,<0.13.0" matplotlib = ">=3.6.2,<3.7.0" -plotly = ">=5.11.0,<5.12.0" +plotly = ">=5.17.0,<5.18.0" pytest = ">=7.2.0,<7.3.0" -pathos = ">=0.3.0,<0.3.1" [build-system] diff --git a/requirements.txt b/requirements.txt index 3347bce5..5e9ef996 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,12 +1,12 @@ sortedcontainers~=2.4.0 numpy~=1.23.5 -pandas~=1.5.2 +pandas~=2.0.0 scipy~=1.9.3 toposort~=1.7 deap~=1.3.3 seaborn~=0.12.1 matplotlib~=3.6.2 -plotly~=5.11.0 +plotly~=5.17.0 pytest~=7.2.0 pytest-xdist~=3.1.0 pathos~=0.3.0 diff --git a/sampo/schemas/schedule.py b/sampo/schemas/schedule.py index 4bd2f4c5..6c9baa19 100644 --- a/sampo/schemas/schedule.py +++ b/sampo/schemas/schedule.py @@ -106,7 +106,7 @@ def merged_stages_datetime_df(self, offset: Union[datetime, str]) -> DataFrame: :param offset: Start of schedule, to add as an offset. :return: Shifted schedule DataFrame with merged tasks. """ - result = fix_split_tasks(self.offset_schedule(offset)) + result = self.offset_schedule(offset) return result def offset_schedule(self, offset: Union[datetime, str]) -> DataFrame: @@ -160,7 +160,7 @@ def sed(time1, time2) -> tuple: ) for i, w in enumerate(works)] data_frame = DataFrame.from_records(data_frame, columns=Schedule._columns) - data_frame = data_frame.set_index('idx') + data_frame = data_frame.set_index('idx', drop=False) if ordered_task_ids: data_frame.task_id = data_frame.task_id.astype('category') diff --git a/sampo/schemas/time_estimator.py b/sampo/schemas/time_estimator.py index 441bdfcd..9f05dcfd 100644 --- a/sampo/schemas/time_estimator.py +++ b/sampo/schemas/time_estimator.py @@ -6,6 +6,7 @@ import numpy.random +from sampo.schemas.requirements import WorkerReq from sampo.schemas.resources import Worker from sampo.schemas.resources import WorkerProductivityMode from sampo.schemas.time import Time @@ -33,7 +34,8 @@ def set_productivity_mode(self, mode: WorkerProductivityMode = WorkerProductivit ... @abstractmethod - def find_work_resources(self, work_name: str, work_volume: float, resource_name: list[str] = None) -> dict[str, int]: + def find_work_resources(self, work_name: str, work_volume: float, resource_name: list[str] | None = None) \ + -> list[WorkerReq]: ... @abstractmethod @@ -51,10 +53,14 @@ def __init__(self, self._productivity_mode = WorkerProductivityMode.Static def find_work_resources(self, work_name: str, work_volume: float, resource_name: list[str] | None = None) \ - -> dict[str, int]: + -> list[WorkerReq]: if resource_name is None: resource_name = ['driver', 'fitter', 'manager', 'handyman', 'electrician', 'engineer'] - return dict((name, numpy.random.poisson(work_volume ** 0.5, 1)[0]) for name in resource_name) + return [WorkerReq(kind=name, + volume=work_volume * numpy.random.poisson(work_volume ** 0.5, 1)[0], + min_count=numpy.random.poisson(work_volume ** 0.2, 1)[0], + max_count=numpy.random.poisson(work_volume * 3, 1)[0]) + for name in resource_name] def set_estimation_mode(self, use_idle: bool = True, mode: WorkEstimationMode = WorkEstimationMode.Realistic): self._use_idle = use_idle diff --git a/sampo/userinput/parser/csv_parser.py b/sampo/userinput/parser/csv_parser.py index 915e4a33..c9222bf4 100644 --- a/sampo/userinput/parser/csv_parser.py +++ b/sampo/userinput/parser/csv_parser.py @@ -125,7 +125,8 @@ def work_graph_and_contractors(works_info: pd.DataFrame, works_info.activity_name = works_info.activity_name.apply(lambda name: unique_work_names_mapper[name]) if contractor_info is None: - resources = [work_resource_estimator.find_work_resources(w[0], float(w[1])) + resources = [dict((worker_req.name, int(worker_req.volume)) + for worker_req in work_resource_estimator.find_work_resources(w[0], float(w[1]))) for w in works_info.loc[:, ['activity_name', 'volume']].to_numpy()] contractors = [get_contractor_for_resources_schedule(resources, contractor_capacity=contractor_types[i], @@ -134,7 +135,8 @@ def work_graph_and_contractors(works_info: pd.DataFrame, for i in range(contractors_number)] elif isinstance(contractor_info, list): contractors = contractor_info - resources = [work_resource_estimator.find_work_resources(w[0], float(w[1])) + resources = [dict((worker_req.name, int(worker_req.volume)) + for worker_req in work_resource_estimator.find_work_resources(w[0], float(w[1]))) for w in works_info.loc[:, ['activity_name', 'volume']].to_numpy()] else: # if contractor info is given or contractor info and work resource estimator are received simultaneously @@ -152,11 +154,8 @@ def work_graph_and_contractors(works_info: pd.DataFrame, equipments=dict()) ) resource_names = contractor_df.columns[1:].to_list() - if len(contractors) == 0 and isinstance(work_resource_estimator, DefaultWorkEstimator): - raise InputDataException( - 'you have neither info about contractors nor work resource estimator.' - ) - resources = [work_resource_estimator.find_work_resources(w[0], float(w[1]), resource_names) + resources = [dict((worker_req.name, int(worker_req.volume)) + for worker_req in work_resource_estimator.find_work_resources(w[0], float(w[1]), resource_names)) for w in works_info.loc[:, ['activity_name', 'volume']].to_numpy()] unique_res = list(set(chain(*[r.keys() for r in resources]))) diff --git a/sampo/utilities/schedule.py b/sampo/utilities/schedule.py index 9ed1451f..cec51e67 100644 --- a/sampo/utilities/schedule.py +++ b/sampo/utilities/schedule.py @@ -20,7 +20,7 @@ def fix_split_tasks(baps_schedule_df: pd.DataFrame) -> pd.DataFrame: task_stages_df = baps_schedule_df.loc[ baps_schedule_df.task_id.str.startswith(f'{task_id}{STAGE_SEP}') | (baps_schedule_df.task_id == task_id) - ] + ] task_series = merge_split_stages(task_stages_df.reset_index(drop=True)) df.loc[df.shape[0]] = task_series # append @@ -37,14 +37,11 @@ def merge_split_stages(task_df: pd.DataFrame) -> pd.Series: :param task_df: pd.DataFrame: one real task's stages dataframe, sorted by start time :return: pd.Series with the full information about the task """ - if len(task_df) == 1: - df = task_df.copy() - df['successors'] = [[tuple([x[0].split(STAGE_SEP)[0], x[1]]) for x in df.loc[0, 'successors']]] - return df.loc[0, :] - else: + if len(task_df) > 1: task_df = task_df.sort_values(by='task_name_mapped') task_df = task_df.reset_index(drop=True) df = task_df.copy() + df = df.iloc[-1:].reset_index(drop=True) for column in ['task_id', 'task_name']: df.loc[0, column] = df.loc[0, column].split(STAGE_SEP)[0] # fix task id and name @@ -53,46 +50,11 @@ def merge_split_stages(task_df: pd.DataFrame) -> pd.Series: df.loc[0, 'volume'] = sum(task_df.loc[:, 'volume']) df.loc[0, 'workers'] = task_df.loc[0, 'workers'] - # fix connections through all stages - fixed_connections_lst = [] - for connections_lst in task_df.loc[:, 'successors']: - for connection in connections_lst: - if connection[1] != 'IFS': - fixed_connections_lst.append(tuple([connection[0].split('_')[0], connection[1]])) - fixed_connections_lst = list(set(fixed_connections_lst)) - df.loc[:, 'successors'] = [fixed_connections_lst] - # fix task's start time and duration df.loc[0, 'start'] = task_df.loc[0, 'start'] df.loc[0, 'finish'] = task_df.loc[len(task_df) - 1, 'finish'] df.loc[0, 'duration'] = (df.loc[0, 'finish'] - df.loc[0, 'start']).days + 1 + else: + df = task_df.copy() - return df.loc[0, :] - - -def remove_service_tasks(service_schedule_df: pd.DataFrame) -> pd.DataFrame: - """ - Remove 'start', 'finish' and milestone tasks from the schedule - - :param service_schedule_df: pd.DataFrame: schedule (with merges stages in the case of baps) with service tasks - :return: pd.DataFrame: schedule without information about service tasks - """ - schedule_df = service_schedule_df.copy() - - service_df = schedule_df.loc[:, 'task_name'].str.contains('start|finish') - - # Prepare list with service tasks ids - service_tasks_ids = set(schedule_df.loc[service_df].loc[:, 'task_id']) - - # Remove rows with service tasks from DataFrame - schedule_df = schedule_df.loc[~service_df] - - # Fix connections linked to the service tasks - fixed_connections_lst = [] - for connections_lst in schedule_df.loc[:, 'successors']: - fixed_connections_lst.append([]) - for connection in connections_lst: - if connection[0] not in service_tasks_ids: - fixed_connections_lst[-1].append(connection) - schedule_df.loc[:, 'successors'] = pd.Series(fixed_connections_lst) - return schedule_df + return df.loc[0, :] diff --git a/sampo/utilities/visualization/schedule.py b/sampo/utilities/visualization/schedule.py index 1725cade..edb68bff 100644 --- a/sampo/utilities/visualization/schedule.py +++ b/sampo/utilities/visualization/schedule.py @@ -32,37 +32,41 @@ def schedule_gant_chart_fig(schedule_dataframe: pd.DataFrame, visualization_start_delta = timedelta(days=2) visualization_finish_delta = timedelta(days=(schedule_finish - schedule_start).days // 3) - def create_zone_row(i, work_name, zone_names, zone) -> dict: + def create_zone_row(i, zone_names, zone) -> dict: return {'idx': i, 'contractor': 'Access cards', 'cost': 0, 'volume': 0, 'duration': 0, 'measurement': 'unit', - 'successors': [], 'workers_dict': '', 'workers': '', 'task_name_mapped': zone_names, 'task_name': '', - 'start': timedelta(int(zone.start_time)) + schedule_start - visualization_start_delta + timedelta(1), - 'finish': timedelta(int(zone.end_time)) + schedule_start - visualization_start_delta + timedelta(1)} + 'zone_information': '', + 'start': timedelta(int(zone.start_time)) + schedule_start, + 'finish': timedelta(int(zone.end_time)) + schedule_start} sworks = schedule_dataframe['scheduled_work_object'].copy() idx = schedule_dataframe['idx'].copy() def get_zone_usage_info(swork) -> str: - return '
' + '
'.join([f'{zone.kind}: {zone.required_status}' for zone in swork.work_unit.zone_reqs]) + return '
' + '
'.join([f'{zone.name}: {zone.to_status}' for zone in swork.zones_pre]) schedule_dataframe['zone_information'] = sworks.apply(get_zone_usage_info) + access_cards = [] + # create zone information for i, swork in zip(idx, sworks): zone_names = '
' + '
'.join([zone.name for zone in swork.zones_pre]) for zone in swork.zones_pre: - schedule_dataframe = schedule_dataframe.append(create_zone_row(i, swork.work_unit.name, zone_names, zone), ignore_index=True) + access_cards.append(create_zone_row(i, zone_names, zone)) zone_names = '
' + '
'.join([zone.name for zone in swork.zones_post]) for zone in swork.zones_post: - schedule_dataframe = schedule_dataframe.append(create_zone_row(i, swork.work_unit.name, zone_names, zone), ignore_index=True) + access_cards.append(create_zone_row(i, zone_names, zone)) + + schedule_dataframe = pd.concat([schedule_dataframe, pd.DataFrame.from_records(access_cards)]) schedule_dataframe['color'] = schedule_dataframe[['task_name', 'contractor']] \ .apply(lambda r: 'Defect' if ':' in r['task_name'] else r['contractor'], axis=1)