Skip to content

Commit

Permalink
Merge branch 'master' into coordinates-input-refactor
Browse files Browse the repository at this point in the history
  • Loading branch information
johnomotani committed Sep 17, 2024
2 parents e7804d7 + 6af9bb8 commit 4d798ea
Show file tree
Hide file tree
Showing 7 changed files with 53 additions and 11 deletions.
24 changes: 24 additions & 0 deletions docs/src/developing.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,30 @@ String and as a tree of HDF5 variables.
after the input is read, and not stored in the `input_dict`.


## Array types

Most arrays in `moment_kinetics` are declared using a custom array type
[`moment_kinetics.communication.MPISharedArray`](@ref). Most of the time this
type is just an alias for `Array`, and so it needs the same template parameters
(see [Julia's Array
documentation](https://docs.julialang.org/en/v1/manual/arrays/)) - the data
type and the number of dimensions, e.g. `MPISharedArray{mk_float,3}`. Although
these arrays use shared memory, Julia does not know about this. We use
`MPI.Win_allocate_shared()` to allocate the shared memory, then wrap it in an
`Array` in [`moment_kinetics.communication.allocate_shared`](@ref).

The reason for using the alias, is that when the shared-memory debugging mode
is activated, we instead create arrays using a type `DebugMPISharedArray`,
which allows us to track some debugging information along with the array, see
[Shared memory debugging](@ref), and make `MPISharedArray` an alias for
`DebugMPISharedArray` instead. The reason for the alias is that if we declared
our structs with just `Array` type, then when debugging is activated we would
not be able to store `DebugMPISharedArray` instances in those structs, and if
we declared the structs with `AbstractArray`, they would not be concretely
typed, which could impact performance by creating code that is not 'type
stable' (i.e. all concrete types are known at compile time).


## Parallelization

The code is parallelized at the moment using MPI and shared-memory arrays. Arrays representing the pdf, moments, etc. are shared between all processes. Using shared memory means, for example, we can take derivatives along one dimension while parallelising the other for any dimension without having to communicate to re-distribute the arrays. Using shared memory instead of (in future as well as) distributed memory parallelism has the advantage that it is easier to split up the points within each element between processors, giving a finer-grained parallelism which should let the code use larger numbers of processors efficiently.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1006,6 +1006,10 @@ function plots_for_variable(run_info, variable_name; plot_prefix, has_rdim=true,
all(ri.collisions.krook_collisions_option == "none" for ri run_info)
# No Krook collisions active, so do not make plots.
return nothing
elseif variable_name union(electron_moment_variables, electron_source_variables, electron_dfn_variables) &&
all(ri.composition.electron_physics (boltzmann_electron_response, boltzmann_electron_response_with_simple_sheath)
for ri run_info)
return nothing
end

println("Making plots for $variable_name")
Expand Down
3 changes: 3 additions & 0 deletions moment_kinetics/src/communication.jl
Original file line number Diff line number Diff line change
Expand Up @@ -544,6 +544,9 @@ end
end

"""
Type used to declare a shared-memory array. When debugging is not active `MPISharedArray`
is just an alias for `Array`, but when `@debug_shared_array` is activated, it is instead
defined as an alias for `DebugMPISharedArray`.
"""
const MPISharedArray = @debug_shared_array_ifelse(DebugMPISharedArray, Array)

Expand Down
5 changes: 5 additions & 0 deletions moment_kinetics/src/electron_fluid_equations.jl
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,11 @@ function calculate_electron_upar_from_charge_conservation!(upar_e, updated, dens
# convert from parallel particle flux to parallel particle density
upar_e[iz,ir] /= dens_e[iz,ir]
end
else
begin_r_z_region()
@loop_r_z ir iz begin
upar_e[iz,ir] = upar_i[iz,ir,1]
end
end
updated[] = true
end
Expand Down
4 changes: 3 additions & 1 deletion moment_kinetics/src/external_sources.jl
Original file line number Diff line number Diff line change
Expand Up @@ -506,8 +506,10 @@ function initialize_external_source_amplitude!(moments, external_source_settings
end
end
end
end

# now do same for electron sources, which (if present) are mostly mirrors of ion sources
# now do same for electron sources, which (if present) are mostly mirrors of ion sources
for index eachindex(electron_source_settings)
if electron_source_settings[index].active
if electron_source_settings[index].source_type == "energy"
@loop_r_z ir iz begin
Expand Down
20 changes: 12 additions & 8 deletions moment_kinetics/src/initial_conditions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ function initialize_electrons!(pdf, moments, fields, geometry, composition, r, z
# Not restarting, so create initial profiles

# initialise the electron thermal speed profile
init_electron_vth!(moments.electron.vth, moments.ion.vth, composition.T_e, composition.me_over_mi, z.grid)
init_electron_vth!(moments.electron.vth, moments.ion.vth, composition, z.grid)
begin_r_z_region()
# calculate the electron temperature from the thermal speed
@loop_r_z ir iz begin
Expand Down Expand Up @@ -1065,14 +1065,18 @@ initialise the electron thermal speed profile.
for now the only initialisation option for the temperature is constant in z.
returns vth0 = sqrt(2*Ts/Te)
"""
function init_electron_vth!(vth_e, vth_i, T_e, me_over_mi, z)
function init_electron_vth!(vth_e, vth_i, composition, z)
begin_r_z_region()
# @loop_r_z ir iz begin
# vth_e[iz,ir] = sqrt(T_e)
# end
@loop_r_z ir iz begin
vth_e[iz,ir] = vth_i[iz,ir,1] / sqrt(me_over_mi)
#vth_e[iz,ir] = exp(-5*(z[iz]/z[end])^2)/sqrt(me_over_mi)
if composition.electron_physics (boltzmann_electron_response,
boltzmann_electron_response_with_simple_sheath)
@loop_r_z ir iz begin
vth_e[iz,ir] = sqrt(composition.T_e / composition.me_over_mi)
end
else
@loop_r_z ir iz begin
vth_e[iz,ir] = vth_i[iz,ir,1] / sqrt(composition.me_over_mi)
#vth_e[iz,ir] = exp(-5*(z[iz]/z[end])^2)/sqrt(composition.me_over_mi)
end
end
end

Expand Down
4 changes: 2 additions & 2 deletions moment_kinetics/src/moment_kinetics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,8 @@ function run_moment_kinetics(to::Union{TimerOutput,Nothing}, input_dict=Dict();
# Stop code from hanging when running on multiple processes if only one of them
# throws an error
if global_size[] > 1
println("$(typeof(e)) on process $(global_rank[]):")
showerror(stdout, e, catch_backtrace())
println(stderr, "$(typeof(e)) on process $(global_rank[]):")
showerror(stderr, e, catch_backtrace())
flush(stdout)
flush(stderr)
MPI.Abort(comm_world, 1)
Expand Down

0 comments on commit 4d798ea

Please sign in to comment.