diff --git a/thoi/heuristics/greedy.py b/thoi/heuristics/greedy.py index 81122f5..a67a236 100644 --- a/thoi/heuristics/greedy.py +++ b/thoi/heuristics/greedy.py @@ -37,9 +37,12 @@ def greedy(X:np.ndarray, covmat = covmat.to(device).contiguous() current_solution = current_solution.to(device).contiguous() - best_scores = [_evaluate_nplet(covmat, T, current_solution, metric)] + best_scores = [_evaluate_nplet(covmat, T, current_solution, metric, use_cpu=use_cpu)] for _ in trange(initial_order, order, leave=False, desc='Order'): - best_candidate, best_score = next_order_greedy(covmat, T, current_solution, metric, min) + best_candidate, best_score = next_order_greedy(covmat, T, current_solution, + metric=metric, + largest=largest, + use_cpu=use_cpu) best_scores.append(best_score) current_solution = torch.cat((current_solution, best_candidate.unsqueeze(1)) , dim=1) @@ -52,7 +55,8 @@ def next_order_greedy(covmat: torch.tensor, T: int, initial_solution: torch.tensor, metric:str='o', - largest:bool=False): + largest:bool=False, + use_cpu:bool=False): assert metric in ['tc', 'dtc', 'o', 's'], f'metric must be one of tc, dtc, o or s. invalid value: {metric}' @@ -76,7 +80,7 @@ def next_order_greedy(covmat: torch.tensor, # |batch_size| x |order+1| best_candidates = valid_candidates[:, 0] # |batch_size| - best_score = _evaluate_nplet(covmat, T, current_solution, metric) + best_score = _evaluate_nplet(covmat, T, current_solution, metric, use_cpu=use_cpu) if not largest: best_score = -best_score @@ -90,9 +94,9 @@ def next_order_greedy(covmat: torch.tensor, # Calculate score of new solution # |batch_size| - new_score = _evaluate_nplet(covmat, T, current_solution, metric) + new_score = _evaluate_nplet(covmat, T, current_solution, metric, use_cpu=use_cpu) - # if minimizing, then return score to optimizing + # if minimizing, then maximize the inverted score if not largest: new_score = -new_score @@ -116,7 +120,7 @@ def next_order_greedy(covmat: torch.tensor, best_score ) - # If minimizing, then return score to its real value + # If minimizing, then return score to its original sign if not largest: best_score = -best_score diff --git a/thoi/heuristics/scoring.py b/thoi/heuristics/scoring.py index 793f0df..07faba6 100644 --- a/thoi/heuristics/scoring.py +++ b/thoi/heuristics/scoring.py @@ -2,7 +2,7 @@ from thoi.measures.gaussian_copula import nplets_measures -def _evaluate_nplet(covmat: torch.tensor, T:int, batched_nplets: torch.tensor, metric:str): +def _evaluate_nplet(covmat: torch.tensor, T:int, batched_nplets: torch.tensor, metric:str, use_cpu:bool=False): """ X (torch.tensor): The covariance matrix with shape (n_variables, n_variables) @@ -15,7 +15,7 @@ def _evaluate_nplet(covmat: torch.tensor, T:int, batched_nplets: torch.tensor, m metric_idx = METRICS.index(metric) # |batch_size| x |4 = (tc, dtc, o, s)| - batched_res = nplets_measures(covmat, batched_nplets, T=T, covmat_precomputed=True) + batched_res = nplets_measures(covmat, batched_nplets, T=T, covmat_precomputed=True, use_cpu=use_cpu) # Return minus the o information score to make it an maximum optimization (energy) # |batch_size| diff --git a/thoi/heuristics/simulated_annealing.py b/thoi/heuristics/simulated_annealing.py index 57efc4e..6418c56 100644 --- a/thoi/heuristics/simulated_annealing.py +++ b/thoi/heuristics/simulated_annealing.py @@ -8,7 +8,14 @@ from thoi.collectors import batch_to_tensor, concat_tensors from thoi.heuristics.scoring import _evaluate_nplet -def init_lower_order(X: np.ndarray, order:int, lower_order:int, repeat:int, metric:str, largest:bool, use_cpu:bool, device:torch.device): +def init_lower_order(X: np.ndarray, + order:int, + lower_order:int, + repeat:int, + metric:str, + largest:bool, + use_cpu:bool, + device:torch.device): N = X.shape[1] # |repeat| x |lower_order| @@ -50,17 +57,17 @@ def random_sampler(N:int, order:int, repeat:int, device:torch.device=None): def simulated_annealing(X: np.ndarray, order: int, - initial_temp:float=100.0, - cooling_rate:float=0.99, - max_iterations:int=1000, - repeat:int=10, - use_cpu:bool=False, - init_method:str='random', # lower_order, 'random', 'precumputed', 'precomputed_lower_order'; - lower_order:int=None, - early_stop:int=100, - current_solution: Optional[torch.tensor]=None, - metric:str='o', # tc, dtc, o, s - largest:bool=False): + initial_temp:float = 100.0, + cooling_rate:float = 0.99, + max_iterations:int = 1000, + repeat:int = 10, + use_cpu:bool = False, + init_method:str = 'random', # lower_order, 'random', 'precumputed', 'precomputed_lower_order'; + lower_order:int = None, + early_stop:int = 100, + current_solution:Optional[torch.tensor] = None, + metric:str = 'o', # tc, dtc, o, s + largest:bool = False): lower_order = order-1 if lower_order is None else lower_order assert init_method != 'lower_order' or lower_order < order, 'Init from optima lower order cannot start from a lower_order higher than the order to compute.' @@ -86,7 +93,7 @@ def simulated_annealing(X: np.ndarray, assert current_solution is not None, 'current_solution must be a torch tensor' # |batch_size| - current_energy = _evaluate_nplet(covmat, T, current_solution, metric) + current_energy = _evaluate_nplet(covmat, T, current_solution, metric, use_cpu=use_cpu) if not largest: current_energy = -current_energy @@ -126,7 +133,7 @@ def simulated_annealing(X: np.ndarray, # Calculate energy of new solution # |batch_size| - new_energy = _evaluate_nplet(covmat, T, new_solution, metric) + new_energy = _evaluate_nplet(covmat, T, new_solution, metric, use_cpu=use_cpu) if not largest: new_energy = -new_energy