Skip to content

Commit

Permalink
Merge pull request #2270 from pshriwise/vol-lrg-samples
Browse files Browse the repository at this point in the history
Update volume calc types to mitigate overflow issues
  • Loading branch information
paulromano authored Oct 21, 2022
2 parents dc0731c + 93065d1 commit 9bf069e
Show file tree
Hide file tree
Showing 3 changed files with 37 additions and 19 deletions.
6 changes: 6 additions & 0 deletions include/openmc/constants.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#define OPENMC_CONSTANTS_H

#include <cmath>
#include <cstdint>
#include <limits>

#include "openmc/array.h"
Expand Down Expand Up @@ -339,6 +340,11 @@ enum class RunMode {

enum class GeometryType { CSG, DAG };

//==============================================================================
// Volume Calculation Constants

constexpr uint64_t UINT64_T_MAX {std::numeric_limits<uint64_t>::max()};

} // namespace openmc

#endif // OPENMC_CONSTANTS_H
7 changes: 5 additions & 2 deletions include/openmc/volume_calc.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
#ifndef OPENMC_VOLUME_CALC_H
#define OPENMC_VOLUME_CALC_H

#include <cstdint>
#include <string>

#include "openmc/array.h"
#include "openmc/position.h"
#include "openmc/tallies/trigger.h"
Expand All @@ -10,7 +13,6 @@
#include "xtensor/xtensor.hpp"

#include <gsl/gsl-lite.hpp>
#include <string>

namespace openmc {

Expand Down Expand Up @@ -69,7 +71,8 @@ class VolumeCalculation {
//! \param[in] i_material Index in global materials vector
//! \param[in,out] indices Vector of material indices
//! \param[in,out] hits Number of hits corresponding to each material
void check_hit(int i_material, vector<int>& indices, vector<int>& hits) const;
void check_hit(
int i_material, vector<uint64_t>& indices, vector<uint64_t>& hits) const;
};

//==============================================================================
Expand Down
43 changes: 26 additions & 17 deletions src/volume_calc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,16 +99,16 @@ vector<VolumeCalculation::Result> VolumeCalculation::execute() const
{
// Shared data that is collected from all threads
int n = domain_ids_.size();
vector<vector<int>> master_indices(
vector<vector<uint64_t>> master_indices(
n); // List of material indices for each domain
vector<vector<int>> master_hits(
vector<vector<uint64_t>> master_hits(
n); // Number of hits for each material in each domain
int iterations = 0;

// Divide work over MPI processes
size_t min_samples = n_samples_ / mpi::n_procs;
size_t remainder = n_samples_ % mpi::n_procs;
size_t i_start, i_end;
uint64_t min_samples = n_samples_ / mpi::n_procs;
uint64_t remainder = n_samples_ % mpi::n_procs;
uint64_t i_start, i_end;
if (mpi::rank < remainder) {
i_start = (min_samples + 1) * mpi::rank;
i_end = i_start + min_samples + 1;
Expand All @@ -123,14 +123,14 @@ vector<VolumeCalculation::Result> VolumeCalculation::execute() const
#pragma omp parallel
{
// Variables that are private to each thread
vector<vector<int>> indices(n);
vector<vector<int>> hits(n);
vector<vector<uint64_t>> indices(n);
vector<vector<uint64_t>> hits(n);
Particle p;

// Sample locations and count hits
#pragma omp for
for (size_t i = i_start; i < i_end; i++) {
int64_t id = iterations * n_samples_ + i;
uint64_t id = iterations * n_samples_ + i;
uint64_t seed = init_seed(id, STREAM_VOLUME);

p.n_coord() = 1;
Expand Down Expand Up @@ -223,7 +223,15 @@ vector<VolumeCalculation::Result> VolumeCalculation::execute() const
// bump iteration counter and get total number
// of samples at this point
iterations++;
size_t total_samples = iterations * n_samples_;
uint64_t total_samples = iterations * n_samples_;

// warn user if total sample size is greater than what the size_t type can
// represent
if (total_samples == UINT64_T_MAX) {
warning("The number of samples has exceeded the type used to track hits. "
"Volume "
"results may be inaccurate.");
}

// reset
double trigger_val = -INFTY;
Expand All @@ -246,10 +254,11 @@ vector<VolumeCalculation::Result> VolumeCalculation::execute() const
if (mpi::master) {
for (int j = 1; j < mpi::n_procs; j++) {
int q;
// retrieve results
MPI_Recv(
&q, 1, MPI_INTEGER, j, 2 * j, mpi::intracomm, MPI_STATUS_IGNORE);
vector<int> buffer(2 * q);
MPI_Recv(buffer.data(), 2 * q, MPI_INTEGER, j, 2 * j + 1,
&q, 1, MPI_UINT64_T, j, 2 * j, mpi::intracomm, MPI_STATUS_IGNORE);
vector<uint64_t> buffer(2 * q);
MPI_Recv(buffer.data(), 2 * q, MPI_UINT64_T, j, 2 * j + 1,
mpi::intracomm, MPI_STATUS_IGNORE);
for (int k = 0; k < q; ++k) {
bool already_added = false;
Expand All @@ -268,20 +277,20 @@ vector<VolumeCalculation::Result> VolumeCalculation::execute() const
}
} else {
int q = master_indices[i_domain].size();
vector<int> buffer(2 * q);
vector<uint64_t> buffer(2 * q);
for (int k = 0; k < q; ++k) {
buffer[2 * k] = master_indices[i_domain][k];
buffer[2 * k + 1] = master_hits[i_domain][k];
}

MPI_Send(&q, 1, MPI_INTEGER, 0, 2 * mpi::rank, mpi::intracomm);
MPI_Send(buffer.data(), 2 * q, MPI_INTEGER, 0, 2 * mpi::rank + 1,
MPI_Send(&q, 1, MPI_UINT64_T, 0, 2 * mpi::rank, mpi::intracomm);
MPI_Send(buffer.data(), 2 * q, MPI_UINT64_T, 0, 2 * mpi::rank + 1,
mpi::intracomm);
}
#endif

if (mpi::master) {
int total_hits = 0;
size_t total_hits = 0;
for (int j = 0; j < master_indices[i_domain].size(); ++j) {
total_hits += master_hits[i_domain][j];
double f =
Expand Down Expand Up @@ -464,7 +473,7 @@ void VolumeCalculation::to_hdf5(
}

void VolumeCalculation::check_hit(
int i_material, vector<int>& indices, vector<int>& hits) const
int i_material, vector<uint64_t>& indices, vector<uint64_t>& hits) const
{

// Check if this material was previously hit and if so, increment count
Expand Down

0 comments on commit 9bf069e

Please sign in to comment.