Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Neural network multi-agent system #58

Merged
merged 48 commits into from
Nov 1, 2023
Merged
Changes from 1 commit
Commits
Show all changes
48 commits
Select commit Hold shift + click to select a range
1a2386a
The test NeuralNetwork with target architecture
StannisMod Aug 22, 2023
9db1442
First version of dataset generator
StannisMod Aug 23, 2023
13eacd8
Merge branch 'main' into feature/nn
StannisMod Aug 23, 2023
ea5c01b
Made dataset loader
StannisMod Aug 23, 2023
b8d0bb9
Received first nn results
StannisMod Aug 23, 2023
ede54f9
Added neural network to multi-agency comparison
StannisMod Aug 23, 2023
285827a
Made NeuralNet module
StannisMod Aug 24, 2023
ca4f8a3
New graph metrics, fully configurable nn
StannisMod Aug 24, 2023
d585699
Added new features
vanoha01 Aug 30, 2023
5494722
Parallel generation introduced
vanoha01 Aug 30, 2023
23d545f
Parallel generation introduced
vanoha01 Aug 31, 2023
6b126f7
Parallel generation introduced
vanoha01 Aug 31, 2023
e1724fd
Parallel generation introduced
vanoha01 Aug 31, 2023
21e084e
Merge remote-tracking branch 'origin/main' into feature/nn
vanoha01 Sep 1, 2023
8a8eefd
Merge remote-tracking branch 'origin/main' into feature/nn
StannisMod Sep 1, 2023
3cd083b
WorkGraph and GraphNode delete function introduced
vanoha Sep 3, 2023
c3c1fc2
-
StannisMod Sep 4, 2023
853e4d5
Merge branch 'feature/nn' of https://github.com/Industrial-AI-Researc…
StannisMod Sep 4, 2023
e5fd20e
Merge remote-tracking branch 'origin/feature/nn' into feature/nn
vanoha01 Sep 4, 2023
a78a257
Cosmetic changes
vanoha01 Sep 4, 2023
676a4b5
Cosmetic changes
vanoha01 Sep 4, 2023
a37357a
Cosmetic changes
vanoha01 Sep 5, 2023
db31efd
Delete sklearn from requirements.txt due to 'pip install' deny to ins…
vanoha01 Sep 5, 2023
335dcbe
Normalized metrics
vanoha01 Sep 11, 2023
13fd021
Change hyperparameters of net
vanoha01 Sep 12, 2023
d443b0e
Found good hyperparameters.
vanoha01 Sep 13, 2023
649ba95
Hyper parameters search was constructed with the Ray Tune framework.
vanoha01 Sep 22, 2023
3ad132a
Pre-trained models are added
vanoha01 Sep 22, 2023
d083306
Added dataset with 10.000 objects
vanoha01 Sep 26, 2023
da04ad8
Comparison of neural network and multi agency algo presented
vanoha01 Oct 10, 2023
3605cd9
New neural network presented.
vanoha01 Oct 10, 2023
77c06bb
Refactored neural manager and added grid search for a regression task
vanoha01 Oct 16, 2023
5895738
Refactored grid search for classification in accordance to new arhite…
vanoha01 Oct 16, 2023
72c7b03
Refactored code
vanoha01 Oct 16, 2023
6b0dfe6
Refactored grid search.
vanoha01 Oct 18, 2023
a8fe281
Upgraded experiments neural manager and merely manager of MA
vanoha01 Oct 20, 2023
c851a3c
Upgraded experiments neural manager and merely manager of MA
vanoha01 Oct 20, 2023
5d82f96
Merge remote-tracking branch 'origin/feature/nn' into feature/nn
vanoha01 Oct 20, 2023
e731cdf
Upgraded experiments neural manager and merely manager of MA
vanoha01 Oct 20, 2023
a26d470
Cosmetic changes
vanoha01 Oct 23, 2023
be86e94
Cosmetic changes
vanoha01 Oct 23, 2023
05e51a9
Merge remote-tracking branch 'origin/main' into feature/nn
vanoha01 Oct 24, 2023
f96ec8f
Merge remote-tracking branch 'origin/main' into feature/nn
vanoha01 Oct 24, 2023
392865f
Cosmetic changes
vanoha01 Oct 25, 2023
fb69437
Merge remote-tracking branch 'origin/feature/nn' into feature/nn
vanoha01 Oct 25, 2023
38e7b3c
Added datasets
vanoha01 Oct 25, 2023
3b16d08
Cosmetic changes based on GitHub requests
vanoha01 Oct 26, 2023
b9c3cad
Merge branch 'main' into feature/nn
Timotshak Oct 31, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Cosmetic changes based on GitHub requests
vanoha01 committed Oct 26, 2023
commit 3b16d0821cb4723d750bae96b0f836e5ee1d53b1
43 changes: 24 additions & 19 deletions experiments/neural_network/grid_search_cv_classification.py
Original file line number Diff line number Diff line change
@@ -12,8 +12,6 @@
from sampo.scheduler.selection.neural_net import NeuralNetTrainer, NeuralNet, NeuralNetType
from sampo.scheduler.selection.validation import cross_val_score

# from ray.train import Checkpoint

path = os.path.join(os.getcwd(), 'datasets/wg_algo_dataset_10k.csv')
vanoha marked this conversation as resolved.
Show resolved Hide resolved
dataset = pd.read_csv(path, index_col='index')
for col in dataset.columns[:-1]:
@@ -34,8 +32,13 @@
scaled_dataset = pd.DataFrame(scaled_dataset, columns=x_ts.columns)
x_ts = scaled_dataset

def train(config):
# checkpoint = session.get_checkpoint()

def train(config: dict) -> None:
"""
Training function for ray tune process

:param config: search space of the model's hyperparameters
"""
model = NeuralNet(input_size=13,
layer_size=config['layer_size'],
layer_count=config['layer_count'],
@@ -44,29 +47,29 @@ def train(config):
optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'])
scorer = torchmetrics.classification.BinaryAccuracy()
net = NeuralNetTrainer(model, criterion, optimizer, scorer, 2)
device = 'cpu'

x_train, x_test, y_train, y_test = x_tr, x_ts, y_tr, y_ts
best_trainer: NeuralNetTrainer | None = None
score, best_loss, best_trainer = cross_val_score(X=x_train,
y=y_train,
model=net,
epochs=config['epochs'],
folds=config['cv'],
shuffle=True,
type_task=NeuralNetType.CLASSIFICATION)
# Checkpoint - structure of the saved model
checkpoint_data = {
'model_state_dict': best_trainer.model.state_dict(),
'optimizer_state_dict': best_trainer.optimizer.state_dict()
}
checkpoint = Checkpoint.from_dict(checkpoint_data)
# Report loss and score immediate metrics
session.report({'loss': best_loss, 'score': score}, checkpoint=checkpoint)
print('accuracy:', score)
print('Finished Training')
print('------------------------------------------------------------------------')


def best_model(best_trained_model):
def best_test_score(best_trained_model: NeuralNetTrainer) -> None:
x_train, x_test, y_train, y_test = x_tr, x_ts, y_tr, y_ts

predicted = best_trained_model.predict_proba([torch.Tensor(v) for v in x_test.values])
@@ -79,14 +82,11 @@ def best_model(best_trained_model):


def main():
# Dict represents the search space by model's hyperparameters
config = {
# 'iters': tune.grid_search([i for i in range(15)]),
'layer_size': tune.grid_search([i for i in range(10, 11)]),
# 'layer_size': tune.qrandint(5, 30),
# 'layer_count': tune.qrandint(5, 35),
'layer_count': tune.grid_search([i for i in range(5, 6)]),
'lr': tune.loguniform(1e-5, 1e-3),
# 'lr': tune.grid_search([0.0001, 0.000055, 0.000075, 0.000425]),
'epochs': tune.grid_search([2]),
'cv': tune.grid_search([5])
}
@@ -103,6 +103,7 @@ def main():
metric_columns=['loss', 'score']
)

# Here you can change the number of CPU's you want to use for tuning
result = tune.run(
train,
resources_per_trial={'cpu': 6},
vanoha marked this conversation as resolved.
Show resolved Hide resolved
@@ -112,6 +113,7 @@ def main():
progress_reporter=reporter,
)

# Receive the trial with the best results
best_trial = result.get_best_trial('loss', 'min', 'last')
best_checkpoint = best_trial.checkpoint.to_air_checkpoint()
best_checkpoint_data = None
@@ -120,22 +122,25 @@ def main():
except Exception as e:
Exception(f'{best_checkpoint} with {e}')

best_trained_model = NeuralNet(13, layer_size=best_trial.config['layer_size'],
# Construct the best trainer based on the best checkpoint data
best_trained_model = NeuralNet(input_size=13,
layer_size=best_trial.config['layer_size'],
layer_count=best_trial.config['layer_count'],
out_size=2,
task_type=NeuralNetType.CLASSIFICATION)
best_trained_model.load_state_dict(best_checkpoint_data['model_state_dict'])
best_trained_optimizer = torch.optim.Adam(best_trained_model.model.parameters(), lr=best_trial.config['lr'])
best_trained_optimizer.load_state_dict(best_checkpoint_data['optimizer_state_dict'])
best_trainer = NeuralNetTrainer(best_trained_model, torch.nn.CrossEntropyLoss(), best_trained_optimizer,
scorer=torchmetrics.classification.BinaryAccuracy(), batch_size=2)

best_model(best_trainer)
best_trainer = NeuralNetTrainer(best_trained_model,
torch.nn.CrossEntropyLoss(),
best_trained_optimizer,
scorer=torchmetrics.classification.BinaryAccuracy(),
batch_size=2)

f = open(os.path.join(os.getcwd(), 'checkpoints/best_model_10k_algo.pth'), 'w')
f.close()
# Print score of the best trainer on test sample
best_test_score(best_trainer)

best_trainer.save_checkpoint(os.path.join(os.getcwd(), 'checkpoints/'), 'best_model_10k_algo.pth')
best_trainer.save_checkpoint(os.path.join(os.getcwd(), 'checkpoints/'), '1.pth')

print(f'Best trial config: {best_trial.config}')
print(f'Best trial validation loss: {best_trial.last_result["loss"]}')
43 changes: 15 additions & 28 deletions experiments/neural_network/grid_search_cv_regression.py
Original file line number Diff line number Diff line change
@@ -22,20 +22,12 @@
x_tr, x_ts, y_tr, y_ts = train_test_split(dataset.drop(columns=['label']), dataset['label'])


# scaler = StandardScaler()
# scaler.fit(x_tr)
# scaled_dataset = scaler.transform(x_tr)
# scaled_dataset = pd.DataFrame(scaled_dataset, columns=x_tr.columns)
# x_tr = scaled_dataset
#
# scaler = StandardScaler()
# scaler.fit(x_ts)
# scaled_dataset = scaler.transform(x_ts)
# scaled_dataset = pd.DataFrame(scaled_dataset, columns=x_ts.columns)
# x_ts = scaled_dataset

def train(config):
checkpoint = session.get_checkpoint()
def train(config: dict) -> None:
"""
Training function for ray tune process

:param config: search space of the model's hyperparameters
"""
model = NeuralNet(input_size=13,
layer_size=config['layer_size'],
layer_count=config['layer_count'],
@@ -45,29 +37,29 @@ def train(config):
optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'])
scorer = torchmetrics.regression.MeanSquaredError()
net = NeuralNetTrainer(model, criterion, optimizer, scorer, 32)
device = 'cpu'

x_train, x_test, y_train, y_test = x_tr, x_ts, y_tr, y_ts
best_trainer: NeuralNetTrainer | None = None
score, best_loss, best_trainer = cross_val_score(X=x_train,
y=y_train,
model=net,
epochs=config['epochs'],
folds=config['cv'],
shuffle=True,
type_task=NeuralNetType.REGRESSION)
# Checkpoint - structure of the saved model
checkpoint_data = {
'model_state_dict': best_trainer.model.state_dict(),
'optimizer_state_dict': best_trainer.optimizer.state_dict()
}
# Report loss and score immediate metrics
checkpoint = Checkpoint.from_dict(checkpoint_data)
session.report({'loss': best_loss, 'score': score}, checkpoint=checkpoint)
print('MSE:', score)
print('Finished Training')
print('------------------------------------------------------------------------')


def best_model(best_trained_model):
def best_test_score(best_trained_model: NeuralNetTrainer) -> None:
x_train, x_test, y_train, y_test = x_tr, x_ts, y_tr, y_ts

predicted = best_trained_model.predict([torch.Tensor(v) for v in x_test.values])
@@ -79,18 +71,14 @@ def best_model(best_trained_model):


def main():
# Dict represents the search space by model's hyperparameters
config = {
'iters': tune.grid_search([i for i in range(1)]),
# 'layer_size': 5,
'layer_size': tune.qrandint(5, 30),
'layer_count': tune.qrandint(5, 35),
# 'layer_count': 5,
'lr': tune.loguniform(1e-4, 1e-1),
# 'lr': 0.001,
'epochs': tune.grid_search([2]),
# 'epochs': 10,
'cv': tune.grid_search([2]),
# 'cv': 5
}

scheduler = ASHAScheduler(
@@ -105,6 +93,7 @@ def main():
metric_columns=['loss', 'score']
)

# Here you can change the number of CPU's you want to use for tuning
result = tune.run(
train,
resources_per_trial={'cpu': 6},
@@ -114,6 +103,7 @@ def main():
progress_reporter=reporter,
)

# Receive the trial with the best results
best_trial = result.get_best_trial('loss', 'min', 'last')
best_checkpoint = best_trial.checkpoint.to_air_checkpoint()
best_checkpoint_data = None
@@ -122,6 +112,7 @@ def main():
except Exception as e:
Exception(f'{best_checkpoint} with {e}')

# Construct the best trainer based on the best checkpoint data
best_trained_model = NeuralNet(13, layer_size=best_trial.config['layer_size'],
layer_count=best_trial.config['layer_count'],
out_size=6)
@@ -131,19 +122,15 @@ def main():
scorer = torchmetrics.regression.MeanSquaredError()
best_trainer = NeuralNetTrainer(best_trained_model, torch.nn.CrossEntropyLoss(), best_trained_optimizer, scorer, 32)

best_model(best_trainer)

f = open(os.path.join(os.getcwd(), 'checkpoints/best_model_wg_and_contractor.pth'), 'w')
f.close()
# Print score of the best trainer on test sample
best_test_score(best_trainer)

best_trainer.save_checkpoint(os.path.join(os.getcwd(), 'checkpoints/'), 'best_model_wg_and_contractor.pth')

print(f'Best trial config: {best_trial.config}')
print(f'Best trial validation loss: {best_trial.last_result["loss"]}')
print(f'Best trial final validation accuracy: {best_trial.last_result["score"]}')

# train(config)


if __name__ == '__main__':
main()
16 changes: 11 additions & 5 deletions experiments/neural_network/wg_algo_dataset_generation.py
Original file line number Diff line number Diff line change
@@ -28,6 +28,9 @@ def argmin(array) -> int:


def display_top(snapshot, key_type='lineno', limit=3):
"""
For tracking the volume of RAM used
"""
snapshot = snapshot.filter_traces((
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<unknown>"),
@@ -44,7 +47,7 @@ def display_top(snapshot, key_type='lineno', limit=3):
print("Total allocated size: %.1f KiB" % (total / 1024))


def generate():
def generate() -> tuple:
wg = ss.work_graph(top_border=GRAPHS_TOP_BORDER)
encoding = encode_graph(wg)
schedulers_results = [int(scheduler.schedule(wg, contractors).execution_time) for scheduler in schedulers]
@@ -55,13 +58,15 @@ def generate():
return generated_label, encoding


def generate_graph(label: int):
def generate_graph(label: int) -> tuple:
while True:
tracemalloc.start()
# Uncomment for tracking the volume of RAM used
# tracemalloc.start()
generated_label, encoding = generate()
if generated_label == label:
snapshot = tracemalloc.take_snapshot()
display_top(snapshot)
# Uncomment for tracking the volume of RAM used
# snapshot = tracemalloc.take_snapshot()
# display_top(snapshot)
print(f'{generated_label} processed')
return tuple([encoding, generated_label])

@@ -70,6 +75,7 @@ def generate_graph(label: int):
result = []
with Pool() as pool:
for i_scheduler in range(len(schedulers)):
# int(CRAPH_COUNT / 4) - number of parallel processes
tasks = [[i_scheduler] * int(GRAPHS_COUNT / 4)] * 4
for task in tasks:
result.extend(pool.map(generate_graph, task))
13 changes: 8 additions & 5 deletions experiments/neural_network/wg_contractor_dataset_generation.py
Original file line number Diff line number Diff line change
@@ -14,7 +14,10 @@
ss = SimpleSynthetic()


def display_top(snapshot, key_type='lineno', limit=3):
def display_top(snapshot, key_type='lineno', limit=3) -> None:
"""
For tracking the volume of RAM used
"""
snapshot = snapshot.filter_traces((
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<unknown>"),
@@ -38,28 +41,28 @@ def get_resources_from_contractor(contractor: Contractor) -> list[int]:
return resources


def generate(index: int):
def generate(index: int) -> tuple:
wg = ss.work_graph(top_border=GRAPHS_TOP_BORDER)
encoding = encode_graph(wg)
contractor = get_contractor_by_wg(wg)
resources = get_resources_from_contractor(contractor)
del wg
print('Generated')

return tuple([encoding, resources])


if __name__ == '__main__':
result = []
# CRAPH_COUNT // 4 - number of parallel processes
graph_index = [[0] * (GRAPHS_COUNT // 4)] * 4

with Pool() as pool:
for i_graph in graph_index:
result.extend(pool.map(generate, i_graph))

dataset_transpose = np.array(result, dtype=object).T
df = pd.DataFrame.from_records(dataset_transpose[0])
df['label'] = dataset_transpose[1]
df['label'] = df['label'].apply(lambda x: ' '.join(str(i) for i in x))
df.fillna(value=0, inplace=True)
# dataset_size = min(df.groupby('label', group_keys=False).apply(lambda x: len(x)))
# df = df.groupby('label', group_keys=False).apply(lambda x: x.sample(dataset_size))
df.to_csv('datasets/wg_contractor_dataset_100000_objs.csv', index_label='index')
2 changes: 0 additions & 2 deletions experiments/neural_network_2_multi_agency.py
Original file line number Diff line number Diff line change
@@ -105,8 +105,6 @@ def run_interation(iter: int, blocks_num: int = 10, graph_size: int = 200) -> No
print(f'Neural Multi-agency res: {max(sblock.end_time for sblock in scheduled_blocks.values())}')
print(f'Times of systems:')
print(f'Multi-agency time is {ma_time} and neural network is {net_time}')
del bg1
del bg


if __name__ == '__main__':
19 changes: 6 additions & 13 deletions sampo/scheduler/selection/metrics.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from collections import defaultdict
from math import ceil

import numpy as np
@@ -27,22 +28,14 @@ def metric_resource_constrainedness(wg: WorkGraph) -> list[float]:
:param wg: Work graph
:return: List of RC coefficients for each resource type
"""
rc_coefs = []
resource_dict = {}
resource_dict = defaultdict(lambda: [0, 0])

for node in wg.nodes:
for req in node.work_unit.worker_reqs:
resource_dict[req.kind] = {'activity_amount': 1, 'volume': 0}
resource_dict[req.kind][0] += 1
resource_dict[req.kind][1] += req.volume

for node in wg.nodes:
for req in node.work_unit.worker_reqs:
resource_dict[req.kind]['activity_amount'] += 1
resource_dict[req.kind]['volume'] += req.volume

for name, value in resource_dict.items():
rc_coefs.append(value['activity_amount'] / value['volume'])

return rc_coefs
return [value[0] / value[1] for name, value in resource_dict.items()]


def metric_graph_parallelism_degree(wg: WorkGraph) -> list[float]:
@@ -84,7 +77,7 @@ def metric_graph_parallelism_degree(wg: WorkGraph) -> list[float]:

def metric_longest_path(wg: WorkGraph) -> float:
scheduler = TopologicalScheduler()
stack = scheduler._topological_sort(wg, None)
stack = scheduler.prioritization(wg, None)

dist = {}
for node in stack:
16 changes: 6 additions & 10 deletions sampo/scheduler/selection/neural_net.py
Original file line number Diff line number Diff line change
@@ -25,20 +25,16 @@ def __init__(self, input_size: int = 13, layer_size: int = 15, layer_count: int
self._linear0 = torch.nn.Linear(input_size, layer_size)
self.model = nn.Sequential(self._linear0)
for i in range(layer_count):
self.__dict__[f'_linear{i + 1}'] = torch.nn.Linear(layer_size, layer_size)
self.model.add_module(name=f'_relu',
module=torch.nn.ReLU())
self.model.add_module(name=f'_linear{i + 1}',
module=self.__dict__[f'_linear{i + 1}'])
self.__dict__[f'_linear{layer_count + 1}'] = torch.nn.Linear(layer_size, out_size)
module=torch.nn.Linear(layer_size, layer_size))
self.model.add_module(name=f'_linear{layer_count + 1}',
module=self.__dict__[f'_linear{layer_count + 1}'])
module=torch.nn.Linear(layer_size, out_size))

def forward(self, X):
X = self._linear0(X)
for i in range(1, self._layers_count + 2):
linear = self.__dict__[f'_linear{i}']
X = F.relu(X)
X = linear(X)
if self.task_type is NeuralNetType.CLASSIFICATION:
X = self.model(X)
if self.task_type == NeuralNetType.CLASSIFICATION:
X = F.softmax(X, dim=0)
else:
X = X
15 changes: 5 additions & 10 deletions sampo/scheduler/selection/validation.py
Original file line number Diff line number Diff line change
@@ -31,18 +31,13 @@ def cross_val_score(X: pd.DataFrame,
scores = 0
best_loss = 0
best_trainer: NeuralNetTrainer | None = None
transform_data = lambda x: one_hot_encode(x, 2) if type_task == NeuralNetType.CLASSIFICATION else x

for fold, (train_idx, test_idx) in enumerate(kf.split(X)):
if type_task == NeuralNetType.CLASSIFICATION:
train_tensor = torch.stack([torch.Tensor(v) for v in X.iloc[train_idx, :].values])
train_target_tensor = torch.stack([torch.Tensor(one_hot_encode(v, 2)) for v in y.iloc[train_idx].values])
test_tensor = torch.stack([torch.Tensor(v) for v in X.iloc[test_idx, :].values])
test_target_tensor = torch.stack([torch.Tensor(one_hot_encode(v, 2)) for v in y.iloc[test_idx].values])
else:
train_tensor = torch.stack([torch.Tensor(v) for v in X.iloc[train_idx, :].values])
train_target_tensor = torch.stack([torch.Tensor(v) for v in y.iloc[train_idx].values])
test_tensor = torch.stack([torch.Tensor(v) for v in X.iloc[test_idx, :].values])
test_target_tensor = torch.stack([torch.Tensor(v) for v in y.iloc[test_idx].values])
train_tensor = torch.stack([torch.Tensor(v) for v in X.iloc[train_idx, :].values])
train_target_tensor = torch.stack([torch.Tensor(transform_data(v)) for v in y.iloc[train_idx].values])
test_tensor = torch.stack([torch.Tensor(v) for v in X.iloc[test_idx, :].values])
test_target_tensor = torch.stack([torch.Tensor(transform_data(v)) for v in y.iloc[test_idx].values])

model.fit(train_tensor, train_target_tensor, epochs)
tmp_score, loss = model.validate(test_tensor, test_target_tensor)
41 changes: 15 additions & 26 deletions sampo/schemas/graph.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from collections import deque
from dataclasses import dataclass, field
from enum import Enum
from functools import cached_property, cache
from typing import Optional

import numpy as np
@@ -54,11 +55,6 @@ def __init__(self, work_unit: WorkUnit,
self.add_parents(parent_works)
self._children_edges = []

def __del__(self):
# print(f'Deleted node: {self.id}')
for k in list(self.__dict__):
del self.__dict__[k]

def __hash__(self) -> int:
return hash(self.id)

@@ -135,9 +131,6 @@ def is_inseparable_parent(self) -> bool:
def is_inseparable_son(self) -> bool:
return self.inseparable_parent is not None

def is_service(self) -> bool:
return len(self.parents) == 0

def traverse_children(self, topologically: bool = False):
"""
DFS from current vertex to down
@@ -156,8 +149,7 @@ def traverse_children(self, topologically: bool = False):
vertexes_to_visit.extend([p.finish for p in v._children_edges])
yield v

# @cached_property
@property
@cached_property
def inseparable_son(self) -> Optional['GraphNode']:
"""
Return inseparable son (amount of inseparable sons at most 1)
@@ -167,8 +159,7 @@ def inseparable_son(self) -> Optional['GraphNode']:
if x.type == EdgeType.InseparableFinishStart]
return inseparable_children[0] if inseparable_children else None

# @cached_property
@property
@cached_property
def inseparable_parent(self) -> Optional['GraphNode']:
"""
Return predecessor of current vertex in inseparable chain
@@ -177,44 +168,39 @@ def inseparable_parent(self) -> Optional['GraphNode']:
inseparable_parents = [x.start for x in self._parent_edges if x.type == EdgeType.InseparableFinishStart]
return inseparable_parents[0] if inseparable_parents else None

# @cached_property
@property
@cached_property
def parents(self) -> list['GraphNode']:
"""
Return list of predecessors of current vertex
:return: list of parents
"""
return [edge.start for edge in self.edges_to if EdgeType.is_dependency(edge.type)]

# @cached_property
@property
@cached_property
def parents_set(self) -> set['GraphNode']:
"""
Return unique predecessors of current vertex
:return: set of parents
"""
return set(self.parents)

# @cached_property
@property
@cached_property
def children(self) -> list['GraphNode']:
"""
Return list of successors of current vertex
:return: list of children
"""
return [edge.finish for edge in self.edges_from if EdgeType.is_dependency(edge.type)]

# @cached_property
@property
@cached_property
def children_set(self) -> set['GraphNode']:
"""
Return unique successors of current vertex
:return: set of children
"""
return set(self.children)

# @cached_property
@property
@cached_property
def neighbors(self):
"""
Get all edges that have types SS with current vertex
@@ -242,7 +228,7 @@ def work_unit(self) -> WorkUnit:
def id(self) -> str:
return self.work_unit.id

# @cache
@cache
def get_inseparable_chain(self) -> Optional[list['GraphNode']]:
"""
Gets an ordered list of whole chain of nodes, connected with edges of type INSEPARABLE_FINISH_START =
@@ -342,9 +328,12 @@ def __setstate__(self, state):
self.__post_init__()

def __del__(self):
# print(f'Deleting graph with {self.vertex_count} nodes')
for k in list(self.__dict__):
del self.__dict__[k]
self.dict_nodes = None
self.start = None
self.finish = None
for node in self.nodes:
node._parent_edges = None
node._children_edges = None

def _serialize(self) -> T:
return {