From dba6b846964e83b0fc3542084e16e653a837dae0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tommi=20M=C3=A4klin?= Date: Sat, 17 Aug 2024 09:27:20 +0300 Subject: [PATCH] Revert "Faster computing of group_counts (no need to save memory here)." This reverts commit 7f353cb046778b821ecc0d1fdee27495c97320e5. The optimizations did not end up working on large inputs. --- include/Likelihood.hpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/include/Likelihood.hpp b/include/Likelihood.hpp index 17c5099..d182a89 100644 --- a/include/Likelihood.hpp +++ b/include/Likelihood.hpp @@ -119,16 +119,21 @@ class LL_WOR21 : public Likelihood { #endif // This double loop is currently the slowest part in the input reading - std::vector group_counts(num_ecs*n_groups, 0); + std::vector>> local_counts(n_threads); #pragma omp parallel for schedule(static) for (size_t i = 0; i < num_ecs; ++i) { for (size_t j = 0; j < n_targets; ++j) { if (alignment(i, j)) { - ++group_counts[alignment.get_groups()[j]*num_ecs + i]; + local_counts[omp_get_thread_num()].inc(alignment.get_groups()[j]*num_ecs + i); } } } + bm::sparse_vector> group_counts = std::move(local_counts[0]); + for (size_t i = 1; i < n_threads; ++i) { + group_counts.merge(local_counts[i]); + } + bool mask_groups = min_hits > 0; this->groups_mask = std::vector(n_groups, !mask_groups); std::vector masked_group_sizes;