Skip to content

Commit

Permalink
bug fix on greedy and annealing with the use cpu and in greede with the
Browse files Browse the repository at this point in the history
largest parameter not passed down
  • Loading branch information
Laouen committed Jun 14, 2024
1 parent 3c48845 commit efddaa0
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 23 deletions.
18 changes: 11 additions & 7 deletions thoi/heuristics/greedy.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,12 @@ def greedy(X:np.ndarray,
covmat = covmat.to(device).contiguous()
current_solution = current_solution.to(device).contiguous()

best_scores = [_evaluate_nplet(covmat, T, current_solution, metric)]
best_scores = [_evaluate_nplet(covmat, T, current_solution, metric, use_cpu=use_cpu)]
for _ in trange(initial_order, order, leave=False, desc='Order'):
best_candidate, best_score = next_order_greedy(covmat, T, current_solution, metric, min)
best_candidate, best_score = next_order_greedy(covmat, T, current_solution,
metric=metric,
largest=largest,
use_cpu=use_cpu)
best_scores.append(best_score)

current_solution = torch.cat((current_solution, best_candidate.unsqueeze(1)) , dim=1)
Expand All @@ -52,7 +55,8 @@ def next_order_greedy(covmat: torch.tensor,
T: int,
initial_solution: torch.tensor,
metric:str='o',
largest:bool=False):
largest:bool=False,
use_cpu:bool=False):

assert metric in ['tc', 'dtc', 'o', 's'], f'metric must be one of tc, dtc, o or s. invalid value: {metric}'

Expand All @@ -76,7 +80,7 @@ def next_order_greedy(covmat: torch.tensor,
# |batch_size| x |order+1|
best_candidates = valid_candidates[:, 0]
# |batch_size|
best_score = _evaluate_nplet(covmat, T, current_solution, metric)
best_score = _evaluate_nplet(covmat, T, current_solution, metric, use_cpu=use_cpu)

if not largest:
best_score = -best_score
Expand All @@ -90,9 +94,9 @@ def next_order_greedy(covmat: torch.tensor,

# Calculate score of new solution
# |batch_size|
new_score = _evaluate_nplet(covmat, T, current_solution, metric)
new_score = _evaluate_nplet(covmat, T, current_solution, metric, use_cpu=use_cpu)

# if minimizing, then return score to optimizing
# if minimizing, then maximize the inverted score
if not largest:
new_score = -new_score

Expand All @@ -116,7 +120,7 @@ def next_order_greedy(covmat: torch.tensor,
best_score
)

# If minimizing, then return score to its real value
# If minimizing, then return score to its original sign
if not largest:
best_score = -best_score

Expand Down
4 changes: 2 additions & 2 deletions thoi/heuristics/scoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from thoi.measures.gaussian_copula import nplets_measures


def _evaluate_nplet(covmat: torch.tensor, T:int, batched_nplets: torch.tensor, metric:str):
def _evaluate_nplet(covmat: torch.tensor, T:int, batched_nplets: torch.tensor, metric:str, use_cpu:bool=False):

"""
X (torch.tensor): The covariance matrix with shape (n_variables, n_variables)
Expand All @@ -15,7 +15,7 @@ def _evaluate_nplet(covmat: torch.tensor, T:int, batched_nplets: torch.tensor, m
metric_idx = METRICS.index(metric)

# |batch_size| x |4 = (tc, dtc, o, s)|
batched_res = nplets_measures(covmat, batched_nplets, T=T, covmat_precomputed=True)
batched_res = nplets_measures(covmat, batched_nplets, T=T, covmat_precomputed=True, use_cpu=use_cpu)

# Return minus the o information score to make it an maximum optimization (energy)
# |batch_size|
Expand Down
35 changes: 21 additions & 14 deletions thoi/heuristics/simulated_annealing.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,14 @@
from thoi.collectors import batch_to_tensor, concat_tensors
from thoi.heuristics.scoring import _evaluate_nplet

def init_lower_order(X: np.ndarray, order:int, lower_order:int, repeat:int, metric:str, largest:bool, use_cpu:bool, device:torch.device):
def init_lower_order(X: np.ndarray,
order:int,
lower_order:int,
repeat:int,
metric:str,
largest:bool,
use_cpu:bool,
device:torch.device):
N = X.shape[1]

# |repeat| x |lower_order|
Expand Down Expand Up @@ -50,17 +57,17 @@ def random_sampler(N:int, order:int, repeat:int, device:torch.device=None):

def simulated_annealing(X: np.ndarray,
order: int,
initial_temp:float=100.0,
cooling_rate:float=0.99,
max_iterations:int=1000,
repeat:int=10,
use_cpu:bool=False,
init_method:str='random', # lower_order, 'random', 'precumputed', 'precomputed_lower_order';
lower_order:int=None,
early_stop:int=100,
current_solution: Optional[torch.tensor]=None,
metric:str='o', # tc, dtc, o, s
largest:bool=False):
initial_temp:float = 100.0,
cooling_rate:float = 0.99,
max_iterations:int = 1000,
repeat:int = 10,
use_cpu:bool = False,
init_method:str = 'random', # lower_order, 'random', 'precumputed', 'precomputed_lower_order';
lower_order:int = None,
early_stop:int = 100,
current_solution:Optional[torch.tensor] = None,
metric:str = 'o', # tc, dtc, o, s
largest:bool = False):

lower_order = order-1 if lower_order is None else lower_order
assert init_method != 'lower_order' or lower_order < order, 'Init from optima lower order cannot start from a lower_order higher than the order to compute.'
Expand All @@ -86,7 +93,7 @@ def simulated_annealing(X: np.ndarray,
assert current_solution is not None, 'current_solution must be a torch tensor'

# |batch_size|
current_energy = _evaluate_nplet(covmat, T, current_solution, metric)
current_energy = _evaluate_nplet(covmat, T, current_solution, metric, use_cpu=use_cpu)

if not largest:
current_energy = -current_energy
Expand Down Expand Up @@ -126,7 +133,7 @@ def simulated_annealing(X: np.ndarray,

# Calculate energy of new solution
# |batch_size|
new_energy = _evaluate_nplet(covmat, T, new_solution, metric)
new_energy = _evaluate_nplet(covmat, T, new_solution, metric, use_cpu=use_cpu)

if not largest:
new_energy = -new_energy
Expand Down

0 comments on commit efddaa0

Please sign in to comment.