diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6e99cd72..3fd21dcb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -87,7 +87,7 @@ jobs: - name: Install dependencies with conda shell: bash -l {0} - run: mamba install -c conda-forge "cosmosis>=3.2" cosmosis-build-standard-library pytest + run: mamba install -c conda-forge "cosmosis==3.9.2" cosmosis-build-standard-library pytest - name: Get Cached Planck uses: actions/cache/restore@v3 @@ -137,8 +137,14 @@ jobs: - name: Install likelihood python dependencies from pip shell: bash -l {0} run: | - pip install "act_dr6_lenslike>=1.0.2" + pip install "act_dr6_lenslike>=1.0.2" "git+https://github.com/carronj/planck_PR4_lensing" + # Not working - don't know why + # - name: Install recent python dependencies + # if: ${{ (matrix.pyversion != '3.7') && (matrix.pyversion != '3.8') }} + # run: | + # pip install "candl-like" + - name: Run Tests shell: bash -l {0} run: | @@ -230,13 +236,15 @@ jobs: - name: Install python dependencies run: | python -m pip install --upgrade pip wheel setuptools - pip install "cosmosis>=3.2" "nautilus-sampler==0.6.*" + pip install "cosmosis==3.9.2" "nautilus-sampler==1.0.1" "scipy<1.14" pip install -v --no-cache-dir --no-binary=mpi4py,camb mpi4py camb - pip install fitsio astropy fast-pt "Cython<3.0" jupyter sacc - - - name: Install likelihood python dependencies + pip install fitsio astropy fast-pt "Cython>=3.0" jupyter sacc "act_dr6_lenslike>=1.0.2" "git+https://github.com/carronj/planck_PR4_lensing" + + - name: Install recent python dependencies + if: ${{ (matrix.python-version != '3.7') && (matrix.python-version != '3.8') }} run: | - pip install "act_dr6_lenslike>=1.0.2" + pip install "candl-like" + - name: Build run: | diff --git a/boltzmann/camb/camb_interface.py b/boltzmann/camb/camb_interface.py index 14e39c78..305982fc 100644 --- a/boltzmann/camb/camb_interface.py +++ b/boltzmann/camb/camb_interface.py @@ -20,6 +20,7 @@ DEFAULT_A_S = 2.1e-9 +C_KMS = 299792.458 # See this table for description: #https://camb.readthedocs.io/en/latest/transfer_variables.html#transfer-variables @@ -430,6 +431,9 @@ def save_derived_parameters(r, block): for k, v in derived.items(): block[names.distances, k] = v block[names.distances, 'rs_zdrag'] = block[names.distances, 'rdrag'] + zstar = derived['zstar'] + shift = r.angular_diameter_distance(zstar) * (1 + zstar) * (p.omegam * p.H0**2)**0.5 / C_KMS + block[names.distances, "cmbshift"] = shift p.omegal = 1 - p.omegam - p.omk p.ommh2 = p.omegam * p.h**2 diff --git a/boltzmann/camb/module.yaml b/boltzmann/camb/module.yaml index fa15152f..f9c94d41 100644 --- a/boltzmann/camb/module.yaml +++ b/boltzmann/camb/module.yaml @@ -464,11 +464,14 @@ outputs: d_l: meaning: Luminosity distance in Mpc type: real 1d + d_v: + meaning: Angular average of comoving angular diameter and line of sight distance in Mpc + type: real 1d mu: meaning: Distance modulus type: real 1d h: - meaning: Hubble parameter with in units of Mpc + meaning: Hubble parameter as function of redshift in units of Mpc type: real 1d age: meaning: Age of universe in GYr @@ -513,6 +516,9 @@ outputs: thetarseq: meaning: Angle 100 r_s(eq)/DA(zstar) type: real + cmbshift: + meaning: CMB shift parameter equation 69 of Komatsu et al 2009 + type: real growth_parameters: z: meaning: Redshift samples of other values in this section, (all if mode=power @@ -602,3 +608,11 @@ outputs: p_k: meaning: Non-linear power spectrum at samples in (Mpc/h)^-3. type: real 2d + transfer_func: + k_h: + meaning: Wavenumbers k of samples in Mpc/h. + Different than the k of power spectra! + type: real 1d + t_k: + meaning: Linear transfer function at z=0. + type: real 1d diff --git a/boltzmann/class/class_interface.py b/boltzmann/class/class_interface.py index 4dd31a73..a59cb504 100644 --- a/boltzmann/class/class_interface.py +++ b/boltzmann/class/class_interface.py @@ -202,7 +202,8 @@ def get_class_outputs(block, c, config): block[distances, 'nz'] = nz # Save distance samples - block[distances, 'd_l'] = np.array([c.luminosity_distance(zi) for zi in z]) + d_l = np.array([c.luminosity_distance(zi) for zi in z]) + block[distances, 'd_l'] = d_l d_a = np.array([c.angular_distance(zi) for zi in z]) block[distances, 'd_a'] = d_a block[distances, 'd_m'] = d_a * (1 + z) @@ -215,6 +216,12 @@ def get_class_outputs(block, c, config): h_z = np.array([c.Hubble(zi) for zi in z]) block[distances, 'H'] = h_z + mu = np.zeros(z.size) + mu[0] = -np.inf + mu[1:] = 5.0 * np.log10(d_l[1:]) + 25.0 + block[distances, 'mu'] = mu + + ## # Now the CMB C_ell ## diff --git a/boltzmann/class/class_v3.2.0/python/classy.pyx b/boltzmann/class/class_v3.2.0/python/classy.pyx index 4d178b62..8b42c83a 100644 --- a/boltzmann/class/class_v3.2.0/python/classy.pyx +++ b/boltzmann/class/class_v3.2.0/python/classy.pyx @@ -30,7 +30,7 @@ def viewdictitems(d): return d.viewitems() ctypedef np.float_t DTYPE_t -ctypedef np.int_t DTYPE_i +# ctypedef np.int_t DTYPE_i @@ -100,10 +100,10 @@ cdef class Class: cdef distortions sd cdef file_content fc - cpdef int computed # Flag to see if classy has already computed with the given pars - cpdef int allocated # Flag to see if classy structs are allocated already - cpdef object _pars # Dictionary of the parameters - cpdef object ncp # Keeps track of the structures initialized, in view of cleaning. + cdef int computed # Flag to see if classy has already computed with the given pars + cdef int allocated # Flag to see if classy structs are allocated already + cdef object _pars # Dictionary of the parameters + cdef object ncp # Keeps track of the structures initialized, in view of cleaning. # Defining two new properties to recover, respectively, the parameters used # or the age (set after computation). Follow this syntax if you want to @@ -128,7 +128,7 @@ cdef class Class: self.set(**_pars) def __cinit__(self, default=False): - cpdef char* dumc + cdef char* dumc self.allocated = False self.computed = False self._pars = {} diff --git a/examples/candl_test.ini b/examples/candl_test.ini new file mode 100644 index 00000000..35280776 --- /dev/null +++ b/examples/candl_test.ini @@ -0,0 +1,64 @@ +[runtime] +sampler = test +root = ${PWD} +resume = T +verbosity = noisy + +[test] +save_dir = output/spt3g_2018_lensing +fatal_errors = T + +[nautilus] +n_live = 1500 +verbose = T + +[emcee] +walkers = 64 +samples = 1000 +nsteps = 5 + + +[pipeline] +; these names refer to sections later in the file: +modules = consistency camb spt3g_2018_lensing +values = examples/candl_test_values.ini +priors = examples/candl_test_priors.ini +debug = F +timing = F +extra_output = cosmological_parameters/sigma_8 cosmological_parameters/omega_m + +[spt3g_2018_lensing] +file = ./likelihood/candl/candl_cosmosis_interface.py ; Location of interface code - change depending on the location of your .ini file +data_set = 'candl.data.SPT3G_2018_Lens' ; Data set or path to .yaml file +variant = 'use_CMB' ; Select a variant of the data set if pointing to an index file +lensing = T ; Switch on for lensing likelihoods +feedback = T ; Switch on to request feedback from candl initialisation +data_selection = "..." ; Select a subset of the data set +clear_1d_internal_priors = F ; Switch off to use candl internal 1d priors +clear_nd_internal_priors = F ; Switch on to ignore candl internal higher dimensional priors. Careful: higher-dimensional priors are not implemented in CosmoSIS itself. +force_ignore_transformations = '' ; Backdoor if you want to ignore certain transformations in the data model. + +[output] +filename = output/spt3g-18-lens.txt + +; The consistency module translates between our chosen parameterization +; and any other that modules in the pipeline may want (e.g. camb) +[consistency] +file = ./utility/consistency/consistency_interface.py +cosmomc_theta = T + +[camb] +file = boltzmann/camb/camb_interface.py +feedback = 0 ; verbosity of output +mode = cmb ; mode to run camb. For CMB lensing only, cmb is sufficient +lmax = 4000 ; max ell to use for cmb calculation +lens_margin = 1250 ; Lmax +AccuracyBoost = 1.0 ; CAMB accuracy boost parameter +lSampleBoost = 1.0 ; CAMB lsample boost parameter +lAccuracyBoost = 1.0 ; CAMB lAccuracy boost parameter +lens_potential_accuracy = 4 ; CAMB lens_potential accuracy paramater +do_tensors = T ;include tensor modes +do_lensing = T ;lensing is required w/ Planck data +NonLinear = lens ; Non-linear calculation +theta_H0_range = "20 100" ; Set bounds in H0 +halofit_version = takahashi ; Halofit version diff --git a/examples/candl_test_priors.ini b/examples/candl_test_priors.ini new file mode 100644 index 00000000..f0921feb --- /dev/null +++ b/examples/candl_test_priors.ini @@ -0,0 +1,3 @@ +[cosmological_parameters] +ombh2 = normal 0.02233 0.00036 +n_s = normal 0.96 0.02 \ No newline at end of file diff --git a/examples/candl_test_values.ini b/examples/candl_test_values.ini new file mode 100644 index 00000000..9ec1c201 --- /dev/null +++ b/examples/candl_test_values.ini @@ -0,0 +1,16 @@ + +[cosmological_parameters] +omch2 = 0.08 0.12 0.16 +ombh2 = 0.01 0.02233 0.03 +log1e10As = 2.9 3.0448 3.2 +n_s = 0.86 0.96 1.06 +cosmomc_theta = 0.9 1.04 1.20 +omega_k = 0.0 +mnu = 0.06 +nnu = 3.046 +yhe = 0.245341 +tau = 0.0543 +num_massive_neutrinos = 3 + +[nuisance_parameters] +A_fg = 0.0 1.0 2.0 diff --git a/examples/desi-figure-2-campaign.yaml b/examples/desi-figure-2-campaign.yaml new file mode 100644 index 00000000..0779af08 --- /dev/null +++ b/examples/desi-figure-2-campaign.yaml @@ -0,0 +1,60 @@ +# This reproduces the chains that make up figure 2 of the DESI paper +# 2404.03002 + +output_dir: "./output/desi-campaign" + +runs: + - name: base + base: examples/desi.ini + params: + - sampler = metropolis + - metropolis.samples = 20000 + - metropolis.use_cobaya = T + - metropolis.tuning_frequency = 100 + - metropolis.tuning_grace = 500 + - metropolis.tuning_end = 2000 + - emcee.walkers = 32 + - emcee.samples = 1000 + - emcee.nsteps = 20 + + - name: BGS + parent: base + params: + - desi.desi_data_sets = BGS + - metropolis.samples = 30000 + + - name: LRG1 + parent: base + params: + - desi.desi_data_sets = LRG1 + + - name: LRG2 + parent: base + params: + - desi.desi_data_sets = LRG2 + + - name: LRG3+ELG1 + parent: base + params: + - desi.desi_data_sets = LRG3+ELG1 + + - name: ELG2 + parent: base + params: + - desi.desi_data_sets = ELG2 + + - name: QSO + parent: base + params: + - desi.desi_data_sets = QSO + + - name: Lya QSO + parent: base + params: + - desi.desi_data_sets = Lya QSO + + - name: All + parent: base + params: + # - pipeline.timing=T + - desi.desi_data_sets = BGS,LRG1,LRG2,LRG3+ELG1,ELG2,QSO,Lya QSO \ No newline at end of file diff --git a/examples/desi-figure-2.py b/examples/desi-figure-2.py new file mode 100644 index 00000000..b4c4e5c8 --- /dev/null +++ b/examples/desi-figure-2.py @@ -0,0 +1,89 @@ +import cosmosis.campaign +from cosmosis.postprocessing.v2 import Chain +import getdist.plots +import matplotlib.pyplot as plt +plt.rcParams['text.usetex'] = True + +# The campaign file defines all the chains that we want to generate +campaign, _ = cosmosis.campaign.parse_yaml_run_file("./examples/desi-figure-2-campaign.yaml") + + +# First make all the chains, by looping through the campaign. +# The next major version of CosmoSIS will do this for you more easily. +for name, run in campaign.items(): + if name != "base": + cosmosis.campaign.launch_run(run) + +# These colors match those used in the DESI paper as +# best I could +colors = { + 'All': [(0.0, 0.0, 0.0), (0.0, 0.0, 0.0)], + 'BGS': [(0.729, 0.811, 0.552), (0.517, 0.745, 0.012)], + 'ELG2': [(0.584, 0.725, 0.772), (0.219, 0.447, 0.6)], + 'LRG1': [(0.996, 0.854, 0.596), (0.956, 0.627, 0.086)], + 'LRG2': [(0.960, 0.678, 0.580), (0.929, 0.262, 0.094)], + 'LRG3+ELG1': [(0.733, 0.603, 0.741), (0.341, 0.211, 0.701)], + 'Lya QSO': [(0.733, 0.556, 0.749), (0.439, 0.0, 0.474)], + 'QSO': [(0.662, 0.815, 0.725), (0.247, 0.552, 0.360)] + } + + +# This is the order we want the contours to be plotted in. It's not the same +# as the legend order. This is because we don't want the big contours to +# cover up the small ones +order = ["BGS", "LRG1", "QSO", "ELG2", "Lya QSO", "LRG3+ELG1", "LRG2", "All"] + + +# The chains are complete now so load them all in +chain_data = {} +for i, (name, run) in enumerate(campaign.items()): + # The base parent run it not used, it's just a template. + # We will soon be able to specify this in the yaml file + if name == "base" : + continue + + # Load the chain using a chain object. This is in the upcoming + # v2 api for cosmosis postprocessing, which will soon replace the + # main version. It uses GetDist to do everything. + chain_file = campaign[name]['params']['output', 'filename'] + chain = Chain.load(chain_file, name=name, burn=0.2) + + # Store the GetDist MCSamples object in a dictionary + chain_data[name] = chain.mcsamples + + +# split the chains into two groups, the single-data set chains +# and the combined one. This is because the contours are all filled +# for the single data sets, but not for the combined one. +combined_sample = chain_data["All"] +single_data_samples = {name: chain_data[name] for name in order if name != "All"} + +# This is the order we want the labels to appear in. +new_order = [order.index(name) for name in colors.keys()] + +# The rest of this is standard GetDist calls. +plotter = getdist.plots.get_single_plotter() + + +plotter.plot_2d(list(single_data_samples.values()), + "DISTANCES--H0RD", + "cosmological_parameters--omega_m", + filled=True, + colors=[colors[name] for name in single_data_samples.keys()], + add_legend_proxy=True, + lims=[70, 130, 0.1, 0.7],) + +plotter.plot_2d(combined_sample, + "DISTANCES--H0RD", + "cosmological_parameters--omega_m", + filled=False, + colors=["k", "k"], + lims=[70, 130, 0.1, 0.7], + add_legend_proxy=True) + + +ax=plotter.get_axes() +plotter.add_legend(list(single_data_samples.keys()) + ["All"], label_order=new_order, legend_loc="upper right") +ax.set_xlabel(r"$\mathrm{H}_0 r_d \, [100 \mathrm{km}\, \mathrm{s}^{-1}]$") +ax.set_ylabel(r"$\Omega_m$") +plotter.export("output/desi.pdf") diff --git a/examples/desi-values.ini b/examples/desi-values.ini new file mode 100644 index 00000000..43cea276 --- /dev/null +++ b/examples/desi-values.ini @@ -0,0 +1,25 @@ +[cosmological_parameters] +; Fix these to fiducial values +h0 = 0.7 +ombh2 = 0.02236 +omega_m = 0.01 0.3 0.99 +rdh = 10.0 100.0 1000.0 + + +; hubble = 20. 70. 120. +; ombh2 = 0.005 0.02 0.1 + +; These are needed to run camb but will have no +; effect on the observables or likelihood +n_s = 0.97 +A_s = 2.19e-9 +w = -1.0 +wa = 0.0 + +; New parametrization and names: +mnu = 0.0 +num_massive_neutrinos = 0 +nnu = 3.046 + +omega_k = 0.0 +tau = 0.0697186 diff --git a/examples/desi.ini b/examples/desi.ini new file mode 100644 index 00000000..7098b9d9 --- /dev/null +++ b/examples/desi.ini @@ -0,0 +1,42 @@ +[runtime] +sampler = test +verbosity = quiet +resume = T + +[pipeline] +modules = consistency camb sample_rdh desi +timing=F +extra_output = distances/rs_zdrag distances/h0rd +values = examples/desi-values.ini + +[maxlike] +max_posterior = T + +[test] +save_dir=output/desi +fatal_errors=T + +[output] +filename = output/desi.txt + +[sample_rdh] +file = utility/rescale_distances_rdh/rescale_distances_rdh.py + +[consistency] +file = utility/consistency/consistency_interface.py + +[camb] +file = boltzmann/camb/camb_interface.py +mode = background +feedback = 0 +AccuracyBoost = 1.0 +zmin_background = 0. +zmax_background = 3. +nz_background = 201 +use_ppf_w = T +want_chistar = F + + +[desi] +file = likelihood/bao/desi1-dr1-arxiv/desi1_dr1_arxiv.py +desi_data_sets = BGS,LRG1 \ No newline at end of file diff --git a/examples/npipe-priors.ini b/examples/npipe-priors.ini new file mode 100644 index 00000000..b9a2e993 --- /dev/null +++ b/examples/npipe-priors.ini @@ -0,0 +1,7 @@ +[cosmological_parameters] +n_s = gaussian 0.96 0.02 +ombh2 = gaussian 0.0222 0.0005 + + +[planck] +a_planck = gaussian 1.0 0.0025 \ No newline at end of file diff --git a/examples/npipe-values.ini b/examples/npipe-values.ini new file mode 100644 index 00000000..6778479f --- /dev/null +++ b/examples/npipe-values.ini @@ -0,0 +1,42 @@ +[planck] +a_planck = 0.9 1.0 1.1 ; Total Planck calibration (relative to 1) at map level, scales all channels + + +[cosmological_parameters] + cosmomc_theta = 0.9 1.040909 1.2 ; this is actually 100 * theta_mc +;h0 = 0.4 0.7 1.0 +omch2 = 0.05 0.12 0.2 +log1e10As = 2.9 3.0448 3.1 ; structure amplitude parameter + +ombh2 = 0.020 0.0222 0.025 +n_s = 0.9 0.96 1.02 + + +omega_k = 0.0 ;spatial curvature + +;reionization +tau = 0.055 ;reionization optical depth + + +;neutrinos +mnu = 0.06 +nnu = 3.046 +num_massive_neutrinos = 1 + +;helium +yhe = 0.245341 ;helium mass fraction + + +k_s = 0.05 ;Power spectrum pivot scale +n_run = 0.0 ;running of scalar spectrum +r_t = 0.0 ;tensor to scalar ratio +n_t = 0.0 ;tensor spectral index + +;dark energy equation of state +w = -1.0 ;equation of state of dark energy +wa = 0.0 ;equation of state of dark energy (redshift dependency) + + +[halo_model_parameters] +A = 3.13 +eta = 0.603 \ No newline at end of file diff --git a/examples/npipe.ini b/examples/npipe.ini new file mode 100644 index 00000000..36fb31ec --- /dev/null +++ b/examples/npipe.ini @@ -0,0 +1,59 @@ +[runtime] +sampler = test +root = ${PWD} +verbosity = standard + +[nautilus] +n_live = 1000 +verbose = T + + + +[test] +save_dir=output/npipe +fatal_errors=T + + +[output] +filename = output/npipe.txt + +[DEFAULT] +; This value is used below as %(planck_path)s +planck_path = likelihood/planck2018/baseline/plc_3.0 + + +[pipeline] +; these names refer to sections later in the file: +modules = consistency camb planck_npipe +values = examples/npipe-values.ini +priors = examples/npipe-priors.ini +debug=T +timing=F +extra_output = cosmological_parameters/sigma_8 cosmological_parameters/omega_m cosmological_parameters/h0 + +[planck_npipe] +;Planck 2018 high ell TT,TE and EE + low ell TT + low ell EE (in Planck notations = TT+lowE) +;without CMB lensing +file = likelihood/planck-npipe/npipe_interface.py +use_marginalized = T + +; The consistency module translates between our chosen parameterization +; and any other that modules in the pipeline may want (e.g. camb) +[consistency] +file = ./utility/consistency/consistency_interface.py +cosmomc_theta = T + + +[camb] +file = boltzmann/camb/camb_interface.py +mode = cmb +lmax = 2800 ;max ell to use for cmb calculation +feedback=0 ;amount of output to print +AccuracyBoost=1.1 ;CAMB accuracy boost parameter +do_tensors = True ;include tensor modes +do_lensing = true ;lensing is required w/ Planck data +NonLinear = lens +accurate_massive_neutrino_transfers = T +theta_H0_range = "40 100" +halofit_version = mead + diff --git a/examples/pantheon-importance.ini b/examples/pantheon-importance.ini new file mode 100644 index 00000000..77f0b974 --- /dev/null +++ b/examples/pantheon-importance.ini @@ -0,0 +1,52 @@ +[runtime] +; The emcee sampler, which uses the Goodman & Weare algorithm +sampler = importance +root = ${PWD} +resume=T +verbosity = quiet + +[importance] +input = output/pantheon.txt +nstep = 50 +; If you just want to add a new likelihood to an existing chain, set add_to_likelihood = T +; If you want an alternative likelihood entirely, set it to F. +add_to_likelihood = T + +[output] +filename = output/pantheon-desi-importance.txt +format = text + +[pipeline] +; In this pipeline we combine with an additional likelihood +modules = consistency camb desi +values = examples/pantheon_values.ini +extra_output = cosmological_parameters/ommh2 +debug=F +timing=F + + +[camb] +; For background-only data we do not need a full +; Boltzmann evaluation, just D(z), etc. +; Setting mode=background means we get this. +file = boltzmann/camb/camb_interface.py +mode = background +feedback = 0 + +; We need quite fine redshift spacing, because the supernovae +; go down to low z where things are pretty sensitive +nz_background = 200 +zmin_background = 0.0 +zmax_background = 2.0 + + +; Combine with a single DESI BAO measurement (not the full set) +[desi] +file = likelihood/bao/desi1-dr1-arxiv/desi1_dr1_arxiv.py +desi_data_sets = BGS + + +; The consistency module translates between our chosen parameterization +; and any other that modules in the pipeline may want (e.g. camb) +[consistency] +file = utility/consistency/consistency_interface.py diff --git a/likelihood/bao/desi1-dr1-arxiv/desi1_dr1_arxiv.py b/likelihood/bao/desi1-dr1-arxiv/desi1_dr1_arxiv.py new file mode 100644 index 00000000..1514e986 --- /dev/null +++ b/likelihood/bao/desi1-dr1-arxiv/desi1_dr1_arxiv.py @@ -0,0 +1,181 @@ +from cosmosis.gaussian_likelihood import GaussianLikelihood +import numpy as np +import scipy.interpolate + +# The three different types of measurement +# of BAO used in this data release +KIND_DV = 1 +KIND_DM = 2 +KIND_DH = 3 + + +# Data from table 1 of https://arxiv.org/pdf/2404.03002 +DESI_DATA_SETS = { + "BGS": { + "kind": "d_v", + "z_eff": 0.295, + "mean": 7.93, + "sigma": 0.15, + }, + "LRG1": { + "kind": "d_m_d_h", + "z_eff": 0.51, + "mean": [13.62, 20.98], + "sigma": [0.25, 0.61], + "corr": -0.445, + }, + "LRG2": { + "kind": "d_m_d_h", + "z_eff": 0.706, + "mean": [16.85, 20.08], + "sigma": [0.32, 0.60], + "corr": -0.420, + }, + "LRG3+ELG1": { + "kind": "d_m_d_h", + "z_eff": 0.930, + "mean": [21.71, 17.88], + "sigma": [0.28, 0.35], + "corr": -0.389, + }, + "ELG2": { + "kind": "d_m_d_h", + "z_eff": 1.317, + "mean": [27.79, 13.82], + "sigma": [0.69, 0.42], + "corr": -0.444, + }, + "QSO": { + "kind": "d_v", + "z_eff": 1.491, + "mean": 26.07, + "sigma": 0.67, + }, + "Lya QSO": { + "kind": "d_m_d_h", + "z_eff": 2.330, + "mean": [39.71, 8.52], + "sigma": [0.94, 0.17], + "corr": -0.477, + }, +} + +class DESILikelihood(GaussianLikelihood): + """ + The 2024 DESI likelihoods from https://arxiv.org/pdf/2404.03002 + + We allow the user to specify which data sets to use, and combine + them all into one. The data sets are: + - BGS + - LRG1 + - LRG2 + - LRG3+ELG1 + - ELG2 + - QSO + - Lya QSO + + """ + # users can override this if they want to use a different name + # which can be useful if you want to keep the different likelihoods + # separately. + like_name = "desi_bao" + x_section = 'distances' + x_name = 'z' + y_section = 'distances' + + def __init__(self, options): + data_sets = options.get_string("desi_data_sets") + data_sets = data_sets.split(',') + + allowed = list(DESI_DATA_SETS.keys()) + for data_set in data_sets: + data_set = data_set.strip() + if data_set not in allowed: + raise ValueError(f"Unknown DESI data set {data_set}. Valid options are: {allowed} (comma-separated to use more than one)") + self.data_sets = data_sets + super().__init__(options) + + + def build_data(self): + z = [] + mu = [] + kinds = [] + for name in self.data_sets: + ds = DESI_DATA_SETS[name] + + # collect the effective redshfits for the measurements + z.append(ds["z_eff"]) + + # The d_v type measurements are just a single number + # but the d_m_d_h measurements are two values + if ds["kind"] == "d_v": + mu.append(ds["mean"]) + kinds.append(KIND_DV) + else: + mu.extend(ds["mean"]) + kinds.append(KIND_DM) + kinds.append(KIND_DH) + # This makes the z array the same length + # as the mu array. But because the D_M and D_H + # measurements are at the same redshift we only + # need to store the redshift once, and this should + # hopefully trigger an error if we mess up later. + z.append(-1.0) + + kinds = np.array(kinds) + z = np.array(z) + mu = np.array(mu) + + # record the indices of the d_v and d_m_d_h measurements + # for later + self.dv_index = np.where(kinds==KIND_DV)[0] + self.dm_index = np.where(kinds==KIND_DM)[0] + self.dh_index = np.where(kinds==KIND_DH)[0] + + self.any_dv = len(self.dv_index) > 0 + self.any_dmdh = len(self.dm_index) > 0 + + return z, mu + + def build_covariance(self): + n = len(self.data_x) + C = np.zeros((n, n)) + i = 0 + for name in self.data_sets: + ds = DESI_DATA_SETS[name] + if ds["kind"] == "d_v": + C[i, i] = ds["sigma"]**2 + i += 1 + else: + C[i, i] = ds["sigma"][0]**2 + C[i+1, i+1] = ds["sigma"][1]**2 + C[i, i+1] = C[i+1, i] = ds["corr"]*ds["sigma"][0]*ds["sigma"][1] + i += 2 + return C + + def extract_theory_points(self, block): + z_theory = block[self.x_section, self.x_name] + y = np.zeros(self.data_x.size) + r_s = block[self.y_section, "rs_zdrag"] + + block["distances", "h0rd"] = block["cosmological_parameters", "h0"] * r_s + + if self.any_dv: + d_v = block[self.y_section, "d_v"] + z_data = self.data_x[self.dv_index] + f = scipy.interpolate.interp1d(z_theory, d_v/r_s, kind=self.kind) + y[self.dv_index] = f(z_data) + + if self.any_dmdh: + z_data = self.data_x[self.dm_index] + + d_m = block[self.y_section, "d_m"] + f = scipy.interpolate.interp1d(z_theory, d_m/r_s, kind=self.kind) + y[self.dm_index] = f(z_data) + + d_h = 1.0 / block[self.y_section, "h"] + f = scipy.interpolate.interp1d(z_theory, d_h/r_s, kind=self.kind) + y[self.dh_index] = f(z_data) + return y + +setup, execute, cleanup = DESILikelihood.build_module() diff --git a/likelihood/bao/desi1-dr1-arxiv/module.yaml b/likelihood/bao/desi1-dr1-arxiv/module.yaml new file mode 100644 index 00000000..16bb5ebb --- /dev/null +++ b/likelihood/bao/desi1-dr1-arxiv/module.yaml @@ -0,0 +1,81 @@ +#This is a template for module description files +name: "desi_dr1_arxiv" +version: "arxiv" +purpose: "DESI BAO likelihood from DR1 data" +url: "https://www.desi.lbl.gov/" +interface: "desi1_dr1_arxiv.py" +attribution: [DESI Team (data), CosmoSIS Team (module)] +rules: + "None" +cite: + - "https://arxiv.org/abs/2404.03002" + +assumptions: + - "DESI reconstruction and measurements of BAO data" + - "Gaussian likelihood" + +explanation: | + The DESI DR1 release provided measurements of the sound horizon imprint on + galaxy and quasar clustering, and the Lyman alpha forest. This module + provides a likelihood for these measurements, assuming a Gaussian likelihood + for the data, as presented in the DESI DR1 arXiv paper. + +# List of parameters that can go in the params.ini file in the section for this module +params: + desi_data_sets: + meaning: "Choice of which DESI data set(s) to include. Comma-separated string of choices from: BGS LRG1 LRG2 LRG3+ELG1 ELG2 QSO Lya QSO" + type: + default: str + +#Inputs for a given choice of a parameter, from the values.ini or from other modules +#If no such choices, just do one of these omitting mode=something part: +inputs: + distances: + rs_zdrag: + meaning: Sound horizon at drag epoch of last scattering + type: real + default: + z: + meaning: Sample points in redshift of distance theory prediction + type: real 1d + default: + d_v: + meaning: Angular average of comoving angular diameter and line of sight distance in Mpc + type: real 1d + default: + d_m: + meaning: Co-moving distance in Mpc + type: real 1d + default: + h: + meaning: Hubble parameter as function of distance in units of Mpc + type: real 1d + default: + +outputs: + likelihoods: + desi_bao_like: + meaning: Gaussian likelihood value. + type: real + data_vector: + desi_bao_data: + meaning: The full vector of data points used in the likelihood + type: real 1d + desi_bao_theory: + meaning: The full vector of theory points used in the likelihood + type: real 1d + desi_bao_covariance: + meaning: The covariance matrix used + type: real 2d + desi_bao_inverse_covariance: + meaning: The inverse covariance matrix (precision matrix) used. + type: real 2d + desi_bao_simulation: + meaning: A simulated data set from the given theory and covariance matrix. + type: real 1d + desi_bao_chi2: + meaning: Chi-squared value for the data and theory + type: real + desi_bao_n: + meaning: Number of data points used + type: int diff --git a/likelihood/candl/README.txt b/likelihood/candl/README.txt new file mode 100644 index 00000000..28609abb --- /dev/null +++ b/likelihood/candl/README.txt @@ -0,0 +1,33 @@ +candl - CosmoSIS interface +--------------------------- + +The files in this folder help you load any candl (https://github.com/Lbalkenhol/candl) likelihood into CosmoSIS. + +Usage +--------------------------- + +In order run chains with e.g. the SPT-3G 2018 lensing likelihood, include the following block in your ``.ini`` file: + +file = ./likelihood/candl/candl_cosmosis_interface.py ; Location of interface code - change depending on the location of your .ini file +data_set = 'candl.data.SPT3G_2018_Lens' ; Data set or path to .yaml file +variant = 'use_CMB' ; Select a variant of the data set if pointing to an index file +lensing = T ; Switch on for lensing likelihoods +feedback = T ; Switch on to request feedback from candl initialisation +data_selection = "..." ; Select a subset of the data set +clear_1d_internal_priors = T ; Switch off to use candl internal 1d priors +clear_nd_internal_priors = F ; Switch on to ignore candl internal higher dimensional priors. Careful: higher-dimensional priors are not implemented in CosmoSIS itself. +force_ignore_transformations = '' ; Backdoor if you want to ignore certain transformations in the data model. + +Mofidy the information above as needed for other data sets. You can also consult the candl documentation for more information (https://candl.readthedocs.io/en/latest/). + +Note that by default the 1-dimensional internal priors declared in candl's data set ``.yaml`` file are ignored, while the multi-dimensional priors are applied. If you want to modify this behaviour, set ``clear_1d_internal_priors`` and ``clear_nd_internal_priors``. + +Credit +--------------------------- + +If you use this wrapper in your work please cite candl (https://arxiv.org/abs/2401.13433) as well as the relevant paper for all likelihoods you have accessed. + +Authors +--------------------------- + +This wrapper was written by L. Balkenhol and Y. Omori, with help from J. Zuntz. If you have any questions or experience issues, please feel free to contact L. Balkenhol (lennart.balkenhol_at_iap.fr). diff --git a/likelihood/candl/candl_cosmosis_interface.py b/likelihood/candl/candl_cosmosis_interface.py new file mode 100644 index 00000000..0771a72a --- /dev/null +++ b/likelihood/candl/candl_cosmosis_interface.py @@ -0,0 +1,175 @@ +try: + from candl.lib import * + import candl + import candl.data +except ImportError: + raise RuntimeError('Can not find candl. Try running: pip install candl-like') + +from cosmosis.datablock import names, SectionOptions +import numpy as np +import os + +class CandlCosmoSISLikelihood: + """ + A thin wrapper to use candl likelihoods in CosmoSIS. + """ + + def __init__(self, options): + """ + Read in options from CosmoSIS ini file and initialise requested candl likelihood. + """ + + # Read requested data set from ini and grab a name under which the logl value will be recorded + data_set_str = options.get_string("data_set", default="") + if data_set_str[:11] == "candl.data.": + # This looks like a short-cut to a pre-installed data set, e.g. "candl.data.SPT3G_2018_Lens" + self.data_set_file = eval(data_set_str) + self.name = "candl." + data_set_str.split(".")[-1] + else: + # Assume this to be a path pointing directly to a data set .yaml file + self.data_set_file = data_set_str + self.name = "candl." + self.data_set_file.split("/")[-1][:-5] + + # Read in other options from ini + self.lensing = options.get_bool('lensing', default=True) + self.clear_1d_internal_priors = options.get_bool('clear_1d_internal_priors', default=True) + self.clear_nd_internal_priors = options.get_bool('clear_nd_internal_priors', default=False)# CosmoSIS only has 1d priors implemented + self.feedback = options.get_bool('feedback', default=True) + + # Optional entries + init_args = {"feedback": self.feedback} + + # Data selection + if options.has_value("data_selection"): + self.data_selection = options.get_string("data_selection", default="...") + data_selection_requested = True + if isinstance(self.data_selection, str): + if self.data_selection == "...": + data_selection_requested = False + if data_selection_requested: + init_args["data_selection"] = self.data_selection + + # Likelihood variant + if options.has_value("variant"): + self.variant_str = options.get_string("variant", default=None) + init_args["variant"] = self.variant_str + + # Force ignore transformations + if options.has_value("force_ignore_transformations"): + self.force_ignore_transformations = options.get_string("force_ignore_transformations", default=None) + init_args["force_ignore_transformations"] = self.force_ignore_transformations + + # Initialise the likelihood + try: + if self.lensing: + self.candl_like = candl.LensLike( + self.data_set_file, + **init_args, + ) + else: + self.candl_like = candl.Like( + self.data_set_file, + **init_args, + ) + except: + raise Exception("candl: likelihood could not be initialised!") + + # By default clear internal priors and assume these are taken care off by CosmoSIS + keep_prior_ix = [] + for i, prior in enumerate(self.candl_like.priors): + if prior.prior_covariance.shape[0] == 1 and not self.clear_1d_internal_priors: + keep_prior_ix.append(i) + elif prior.prior_covariance.shape[0] > 1 and not self.clear_nd_internal_priors: + keep_prior_ix.append(i) + self.candl_like.priors = [self.candl_like.priors[i] for i in keep_prior_ix] + + + def reformat(self,block): + """ + Converting from CosmoSIS to candl format + """ + + model_dict = {} + + # Load all cosmological parameters and add extra params so that candl understands + # These should not be needed by candl, but it doesn't hurt in case anyone builds a likelihood that directly depends on cosmological parameters + cos_par_names = [param_name for param_sec, param_name in block.keys() if param_sec == 'cosmological_parameters'] + model_dict = {par: block[('cosmological_parameters',par)] for par in cos_par_names} + model_dict['H0'] = model_dict['h0']*100 + model_dict['ns'] = model_dict['n_s'] + model_dict['logA'] = model_dict['log1e10as'] + + # Load all nuisance parameters + nuisance_par_names = [param_name for param_sec, param_name in block.keys() if param_sec == 'nuisance_parameters'] + + # Match any nuisance parameters in candl and restore right cases + like_nuisance_pars_lowered = [p.lower() for p in self.candl_like.required_nuisance_parameters] + for i, par in enumerate(nuisance_par_names): + if par in like_nuisance_pars_lowered: + nuisance_par_names[i] = self.candl_like.required_nuisance_parameters[like_nuisance_pars_lowered.index(par)] + + for par in nuisance_par_names: + model_dict[par] = block[('nuisance_parameters',par)]# CosmoSIS doesn't care about cases, so putting them in is easy + + # Read in Cls from CosmoSIS and save them in dict. + # CosmoSIS outputs CAMB Cls in unit of l(l+1)/(2pi) + # For pp it's ell * (ell + 1) / (2 * np.pi) - i.e. missing the customary extra ell * (ell + 1) wrt eg the CAMB standard + # This matches candl expectations for primary CMB - but we need to convert this for lensing + ell = block[names.cmb_cl, 'ell'] + cl_tt = block[names.cmb_cl, 'tt'] + cl_ee = block[names.cmb_cl, 'ee'] + cl_te = block[names.cmb_cl, 'te'] + cl_bb = block[names.cmb_cl, 'bb'] + cl_pp = block[names.cmb_cl, 'pp'] * ell * (ell + 1) + cl_kk = cl_pp * np.pi / 2.0 + + # Figure out ell range of supplied spectra w.r.t. the expectation of the likelihood + N_ell = self.candl_like.ell_max - self.candl_like.ell_min + 1 + + theory_start_ix = ( + np.amax((ell[0], self.candl_like.ell_min)) + - ell[0] + ) + theory_stop_ix = ( + np.amin((ell[-1], self.candl_like.ell_max)) + + 1 + - ell[0] + ) + + like_start_ix = ( + np.amax((ell[0], self.candl_like.ell_min)) - self.candl_like.ell_min + ) + like_stop_ix = ( + np.amin((ell[-1], self.candl_like.ell_max)) + + 1 + - self.candl_like.ell_min + ) + + # Slot supplied CMB spectra into an array of zeros of the correct length + # candl will optionally import JAX which should ensure the two methods below run + model_dict['Dl'] = {'ell': np.arange(self.candl_like.ell_min, self.candl_like.ell_max + 1)} + for spec_type, spec in zip(["pp", "kk", "TT", "EE", "BB", "TE"], [cl_pp, cl_kk, cl_tt, cl_ee, cl_bb, cl_te]): + model_dict['Dl'][spec_type] = jnp.zeros(N_ell) + model_dict['Dl'][spec_type] = jax_optional_set_element( + model_dict['Dl'][spec_type], + np.arange(like_start_ix, like_stop_ix), + spec[theory_start_ix:theory_stop_ix], + ) + + return model_dict + + + def likelihood(self, block): + '''Computes loglike''' + logl = self.candl_like.log_like(self.reformat(block)) + return float(logl) + + +def setup(options): + options = SectionOptions(options) + return CandlCosmoSISLikelihood(options) + +def execute(block, config): + like = config.likelihood(block) + block[names.likelihoods, "%s_like"%config.name] = like + return 0 \ No newline at end of file diff --git a/likelihood/candl/module.yaml b/likelihood/candl/module.yaml new file mode 100644 index 00000000..48b61dd9 --- /dev/null +++ b/likelihood/candl/module.yaml @@ -0,0 +1,83 @@ +name: "candl" +version: "1.0.0" +purpose: "Interface with candl." +url: "https://github.com/Lbalkenhol/candl" +interface: "candl_cosmosis_interface.py" +attribution: ["Y. Omori", "L. Balkenhol"] +rules: + "None" +cite: + - "https://doi.org/10.48550/arXiv.2401.13433" + +assumptions: + - "candl python module" + +explanation: | + "Interface to candl." + +# List of parameters that can go in the params.ini file in the section for this module +params: + data_set: + meaning: "The data set to use" + type: str + variant: + meaning: "Variant of the data set requested, if applicable." + type: str + default: None + lensing: + meaning: "Lensing likelihood (or primary CMB)." + type: bool + default: True + feedback: + meaning: "Whether to generate feedback from the likelihood initialisation." + type: bool + default: True + data_selection: + meaning: "Further data selection" + type: any + default: "..." + clear_1d_internal_priors: + meaning: "Delete all 1d Gaussian priors implemented in candl." + type: bool + default: True + clear_nd_internal_priors: + meaning: "Delete all >1d Gaussian priors implemented in candl. Beware: CosmoSIS does not have support for higher-dimensional Gaussian priors." + type: bool + default: False + force_ignore_transformations: + meaning: "Backdoor to explicitly ignore certain transformations in the data model. See candl documentation for more information." + type: str + default: None + +inputs: + cmb_cl: + ell: + meaning: "CMB power spectrum ell values of other inputs" + type: real 1d + default: + tt: + meaning: "Lensed CMB temperature power spectrum in ell(ell+1)/2pi units" + type: real 1d + default: + te: + meaning: "Lensed CMB temperature-E-mode polarization cross power spectrum in ell(ell+1)/2pi units" + type: real 1d + default: + ee: + meaning: "Lensed CMB E-mode polaration power spectrum in ell(ell+1)/2pi units" + type: real 1d + default: + bb: + meaning: "Lensed CMB B-mode polaration power spectrum in ell(ell+1)/2pi units" + type: real 1d + default: + pp: + meaning: "CMB lensing phi-phi power spectrum in ell(ell+1)/2pi units" + type: real 1d + default: + +outputs: + likelihoods: + candl_like: + meaning: "candl likelihood" + type: real \ No newline at end of file diff --git a/likelihood/planck-npipe/npipe_interface.py b/likelihood/planck-npipe/npipe_interface.py new file mode 100644 index 00000000..03719046 --- /dev/null +++ b/likelihood/planck-npipe/npipe_interface.py @@ -0,0 +1,49 @@ +from cosmosis.datablock import option_section +try: + from planckpr4lensing import PlanckPR4LensingMarged, PlanckPR4Lensing +except ImportError: + raise ImportError("Please install the planckpr4lensing with: pip install git+https://github.com/carronj/planck_PR4_lensing") +import numpy as np + + + +def setup(options): + marged = options.get_bool(option_section, "use_marginalized", default=True) + if marged: + calculator = PlanckPR4LensingMarged() + print("Using primary CMB-marginalized PR4 likelihood. TT, EE, TE, BB, will not be used") + else: + calculator = PlanckPR4Lensing() + print("NOT using primary CMB-marginalized PR4 likelihood. TT, EE, TE, BB, will be used") + return calculator, marged + + + +def execute(block, config): + calculator, marged = config + ell = block['cmb_cl', 'ell'] + A = block['planck', 'a_planck'] + + # Convert from D_ell to the PP pre-factor, ell**2 (ell+1)**2 / 2pi + pp = block['cmb_cl', 'pp'] * ell * (ell + 1.) + cl = {"pp":pp} + + # If we are not using the marginalized version, we need to provide the full set of Cls + # If we are using the marginalized version these are pre-marginalized over + if not marged: + cl["tt"] = block['cmb_cl', 'tt'] + cl["te"] = block['cmb_cl', 'te'] + cl["ee"] = block['cmb_cl', 'ee'] + cl["bb"] = block['cmb_cl', 'bb'] + + + # npipe wants to start at zero + if ell[0] == 2: + ell = np.concatenate([[0, 1], ell]) + for key in cl: + cl[key] = np.concatenate([[0.0, 0.0], cl[key]]) + + block["likelihoods", "npipe_like"] = calculator.log_likelihood(cl, A_planck=A) + + + return 0 diff --git a/likelihood/sacc/sacc_like.py b/likelihood/sacc/sacc_like.py index affaf639..a45cb396 100644 --- a/likelihood/sacc/sacc_like.py +++ b/likelihood/sacc/sacc_like.py @@ -123,7 +123,7 @@ def build_data(self): self.sections_for_names = {} for name in final_names: if self.options.has_value(f"{name}_section"): - section = options[f"{name}_section"] + section = self.options[f"{name}_section"] elif name in default_sections: section = default_sections[name] else: diff --git a/likelihood/tdcosmo/module.yaml b/likelihood/tdcosmo/module.yaml index e291aef8..d130553c 100644 --- a/likelihood/tdcosmo/module.yaml +++ b/likelihood/tdcosmo/module.yaml @@ -22,8 +22,7 @@ assumptions: - Strong lensing modelling details. - Time delay distance structure - Hierarchical inference of the mass model and stellar anisotropy parameters -explanation: - - " +explanation: | This module contain the likelihood of a 7 time-delay lenses, presented in TDCOSMO IV (Birrer et al., 2020). This module allows us to reproduce the hierarchical inference of the cosmological parameters and of the lens population parameters, which are grouped under the block `nuisance_strong_lensing` (see details below). @@ -31,7 +30,6 @@ explanation: Additional data sets such as 'SLACS_SDSS' and 'SLACS_IFU' can be added to the 'tdcosmo7' data set to help constrain these parameters. Adding these 2 data sets requires to make the additional assumption that the lensing galaxy of the 7 TDCOSMO lenses and of the SLACS lenses come from the same population of galaxies. - " params: data_sets: diff --git a/module.yaml b/module.yaml index 86980dcf..c0707cab 100644 --- a/module.yaml +++ b/module.yaml @@ -39,3 +39,4 @@ outputs: meaning: type: default: + diff --git a/number_density/gaussian_window/gaussian_window.py b/number_density/gaussian_window/gaussian_window.py index a03fd9a0..ca2f5a01 100644 --- a/number_density/gaussian_window/gaussian_window.py +++ b/number_density/gaussian_window/gaussian_window.py @@ -1,7 +1,10 @@ from cosmosis.datablock import option_section from cosmosis.datablock import names import numpy as np -import scipy.integrate +try: + from scipy.integrate import trapezoid +except ImportError: + from scipy.integrate import trapz as trapezoid def setup(options): z = options[option_section, "z"] @@ -43,7 +46,7 @@ def execute(block, config): nz_bin = gaussian(z, mu[i - 1], sigma[i - 1]) # the bin may not quite go to zero before we get to the # edges so normalize it - norm = scipy.integrate.trapz(nz_bin, z) + norm = trapezoid(nz_bin, z) nz_bin /= norm # Save n(z) block[section, "BIN_%d" % i] = nz_bin diff --git a/number_density/nz_multirank/nz_gz.py b/number_density/nz_multirank/nz_gz.py index 1fde4181..2f376518 100644 --- a/number_density/nz_multirank/nz_gz.py +++ b/number_density/nz_multirank/nz_gz.py @@ -12,13 +12,16 @@ def nz_to_gchi(z, nz, cosmo=cosmology.Planck15): from numpy import apply_along_axis, gradient, multiply, newaxis - from scipy.integrate import cumtrapz + try: + from scipy.integrate import cumulative_trapezoid + except ImportError: + from scipy.integrate import cumtrapz as cumulative_trapezoid chi = cosmo.comoving_distance(z).value dchi = apply_along_axis(gradient, -1, chi) dz = apply_along_axis(gradient, -1, z) nchi = multiply(nz, dz/dchi) - int1 = cumtrapz(nchi, chi, initial=0) - int2 = cumtrapz(nchi/chi, chi, initial=0) + int1 = cumulative_trapezoid(nchi, chi, initial=0) + int2 = cumulative_trapezoid(nchi/chi, chi, initial=0) gchi = (int1[...,-1,newaxis] - int1) - chi*(int2[...,-1,newaxis] - int2) return chi, gchi diff --git a/shear/point_mass/add_gammat_point_mass.py b/shear/point_mass/add_gammat_point_mass.py index 0c622ad6..89f6ca4c 100644 --- a/shear/point_mass/add_gammat_point_mass.py +++ b/shear/point_mass/add_gammat_point_mass.py @@ -13,6 +13,10 @@ import scipy.integrate import astropy.units as u import astropy.constants as const +try: + from scipy.integrate import simpson +except ImportError: + from scipy.integrate import simps as simpson sigcrit_inv_fac = (4 * np.pi * const.G)/(const.c**2) sigcrit_inv_fac_Mpc_Msun = (sigcrit_inv_fac.to(u.Mpc/u.M_sun)).value @@ -110,7 +114,7 @@ def execute(block, config): nz_source = block[config['source_nz_section'], "bin_%d" % (j2 + 1)][1:] ng_array_source_rep = np.tile(nz_source.reshape(1, len(z_source)), (len(z_lens), 1)) - int_sourcez = sp.integrate.simps(ng_array_source_rep * (num / chi_smat), z_source) + int_sourcez = simpson(ng_array_source_rep * (num / chi_smat), z_source) coeff_ints = sigcrit_inv_fac_Mpc_Msun @@ -120,7 +124,7 @@ def execute(block, config): # Since gamma_t is a scalar it should be same in both physical and comoving coordinates # It is just easier to match the expressions in comoving coordinates to the ones on methods paper. - betaj1j2_pm = sp.integrate.simps(nz_lens * Is * (1./chi_lens**2), z_lens) + betaj1j2_pm = simpson(nz_lens * Is * (1./chi_lens**2), z_lens) if (config['use_fiducial']): config['betaj1j2'][str(j1) + '_' + str(j2)] = betaj1j2_pm else: diff --git a/structure/baccoemu/baccoemu_vendored/matter_powerspectrum.py b/structure/baccoemu/baccoemu_vendored/matter_powerspectrum.py index ab1d84b6..96eaaf9a 100644 --- a/structure/baccoemu/baccoemu_vendored/matter_powerspectrum.py +++ b/structure/baccoemu/baccoemu_vendored/matter_powerspectrum.py @@ -1578,16 +1578,19 @@ def _smeared_bao_pk(k_lin=None, pk_lin=None, k_emu=None, pk_lin_emu=None, pk_nw= :return: smeared BAO pk computed at k_emu :rtype: array_like """ - from scipy.integrate import trapz + try: + from scipy.integrate import trapezoid + except ImportError: + from scipy.integrate import trapz as trapezoid if grid is None: - sigma_star_2 = trapz(k_lin * pk_lin, x=np.log(k_lin)) / (3 * np.pi**2) + sigma_star_2 = trapezoid(k_lin * pk_lin, x=np.log(k_lin)) / (3 * np.pi**2) k_star_2 = 1 / sigma_star_2 G = np.exp(-0.5 * (k_emu**2 / k_star_2)) if pk_nw is None: pk_nw = _nowiggles_pk(k_lin=k_lin, pk_lin=pk_lin, k_emu=k_emu) else: - sigma_star_2 = trapz(k_lin[None,:] * pk_lin, x=np.log(k_lin[None:,]), axis=1) / (3 * np.pi**2) + sigma_star_2 = trapezoid(k_lin[None,:] * pk_lin, x=np.log(k_lin[None:,]), axis=1) / (3 * np.pi**2) k_star_2 = 1 / sigma_star_2 G = np.exp(-0.5 * (k_emu**2 / k_star_2[:,None])) if pk_nw is None: diff --git a/tests/test_cosmosis_standard_library.py b/tests/test_cosmosis_standard_library.py index 58a260c2..6dc185f8 100644 --- a/tests/test_cosmosis_standard_library.py +++ b/tests/test_cosmosis_standard_library.py @@ -1,8 +1,12 @@ #!/usr/bin/env python from cosmosis import run_cosmosis from cosmosis.postprocessing import run_cosmosis_postprocess +from cosmosis.runtime.handler import activate_segfault_handling + import pytest import os +import sys +activate_segfault_handling() def check_likelihood(capsys, expected, *other_possible): captured = capsys.readouterr() @@ -143,6 +147,12 @@ def test_kids(capsys): check_no_camb_warnings(capsys) def test_bacco(): + if sys.version_info > (3, 11): + pytest.skip("No tensorflow support on python 3.12 yet.") + # skip if running on CI with python 3.9 or 3.10 on macOS + if sys.platform == "darwin" and sys.version_info[:2] in [(3, 9), (3, 10)] and os.environ.get("CI"): + pytest.skip("Skipping Bacco on MacOS with Python 3.9 or 3.10 when running CI") + # The baseline version just does non-linear matter power run_cosmosis("examples/bacco.ini") @@ -175,3 +185,23 @@ def test_hsc_real(capsys): pytest.skip("Sacc not installed") run_cosmosis("examples/hsc-y3-shear-real.ini") check_likelihood(capsys, "-122.5") + +def test_npipe(capsys): + try: + import planckpr4lensing + except ImportError: + pytest.skip("Planck PR4 lensing likelihood not found") + run_cosmosis("examples/npipe.ini") + check_likelihood(capsys, "-4.22", "-4.23") + +def test_desi(capsys): + run_cosmosis("examples/desi.ini") + check_likelihood(capsys, "-11.25") + +def test_candl(capsys): + try: + import candl + except ImportError: + pytest.skip("Candl not installed") + run_cosmosis("examples/candl_test.ini") + check_likelihood(capsys, "-5.83") diff --git a/utility/rescale_distances_rdh/module.yaml b/utility/rescale_distances_rdh/module.yaml new file mode 100644 index 00000000..3c106f4d --- /dev/null +++ b/utility/rescale_distances_rdh/module.yaml @@ -0,0 +1,84 @@ +#This is a template for module description files +name: "rescale_distances_rdh" +version: "1.0" +purpose: "Rescale computed distances to be consistent with a given value of R_d * h" +url: "" +interface: "rescale_distances_rdh.py" +attribution: [CosmoSIS Team] +rules: [] +cite: [] +assumptions: + - "FLRW" +explanation: | + BAO people sometimes sample using the best-constrained BAO parameter r_d * h, + where r_d is the sound horizon at the drag epoch (last scattering). This module + takes a set of distances and rescales them to be consistent with a given value of + r_d * h. This is useful for replicating, for example, the DESI BAO-only analysis. + + For that reason this module is limited to rescale distances, and not, for example. + k values in a power spectrum. + + In general I'd discourage people from sampling in whatever is the best-constrained + parameter for their particular analysis - it doesn't correspond to particularly + sensible priors in any other parametrization. But it can make sampling more efficient, + depending on the sampler. + + Specifically, this module reads a value of rd_prime computed from a (fixed) value + of H0, e.g. by camb, and a sampled rdh_sample value, from which it computes + rd_sample = rdh_sample / h_fid. It then sets f = rd_prime / rd_sample, and muliplies + all the distance metrics by f. H(z) is also rescaled by 1/f, and mu(z) adjust accordingly too. + + This module is not intended to be used in a general way, but only in the context of + the specific BAO analysis described above. + +params: {} + +inputs: + cosmological_parameters: + h0: + meaning: Fiducial value of H0 in km/s/Mpc/100. Should be fixed + type: real + default: + rdh: + meaning: Sample value of R_d * h in km/s. + type: real + default: + distances: + rs_zdrag: + meaning: "Sound horizon at drag epoch computing using fiducial h0, in Mpc" + type: real + default: +outputs: + cosmological_parameters: + h0: + meaning: Updated h0 + type: real + distances: + D_L: + meaning: Updated luminosity distance in Mpc + type: real 1d + D_A: + meaning: Updated angular diameter distance in Mpc + type: real 1d + D_V: + meaning: Updated BAO average distance in Mpc + type: real 1d + D_M: + meaning: Updated line of sight comoving distance in Mpc + type: real 1d + D_C: + meaning: Updated transverse comoving distance in Mpc + type: real 1d + H: + meaning: Updated Hubble parameter in km/s/Mpc + type: real 1d + mu: + meaning: Updated distance modulus + type: real 1d + rs_zsrag: + meaning: "Sound horizon at drag epoch computing using updated h0, in Mpc" + type: real + h0rd: + meaning: "Final value of h0 r_d, using updated h0" + type: real + diff --git a/utility/rescale_distances_rdh/rescale_distances_rdh.py b/utility/rescale_distances_rdh/rescale_distances_rdh.py new file mode 100644 index 00000000..5ceb6714 --- /dev/null +++ b/utility/rescale_distances_rdh/rescale_distances_rdh.py @@ -0,0 +1,46 @@ +from cosmosis import option_section +from cosmosis.datablock import names +import numpy as np +import camb + +def get_rd_from_camb(h, omega_m, ombh2, N_eff): + """ + Use CAMB to compute the sound horizon at recombination. + """ + omega_b = ombh2 / h**2 + omega_c = omega_m - omega_b + omch2 = omega_c * h**2 + cp = camb.set_params(ombh2=ombh2, omch2=omch2, H0=h*100, nnu=N_eff) + res = camb.get_background(cp) + return res.get_derived_params()['rdrag'] + + +def setup(options): + return { + } + +def execute(block, config): + h_fid = block[names.cosmological_parameters, "h0"] + rdh_sample = block[names.cosmological_parameters, "rdh"] + rd_prime = block[names.distances, "rs_zdrag"] + + rd_sample = rdh_sample / h_fid + f = rd_prime / rd_sample + + + block[names.cosmological_parameters, "h0"] /= f + block[names.distances, "D_L"] *= f + block[names.distances, "D_A"] *= f + block[names.distances, "D_V"] *= f + block[names.distances, "D_M"] *= f + block[names.distances, "D_C"] *= f + block[names.distances, "H"] /= f + block[names.distances, "mu"] += 5.0 * np.log10(f) + + block[names.distances, "rs_zdrag"] = rd_sample + + # This is the "final" value of H0*rd using the h that is consistent + # with the r_d and Omega_m values. + block["distances", "h0rd"] = block["cosmological_parameters", "h0"] * block[names.distances, "rs_zdrag"] + + return 0 \ No newline at end of file