Plots#
Rebuild the publication figures that accompany the ASPIRE data release. The notebook configures plotting defaults, loads sampler posteriors listed in config.mk, and generates the PDFs and LaTeX macros referenced in the paper so downstream readers can reproduce the visuals exactly.
Plotting Environment#
Import the plotting and analysis utilities used throughout the notebook, apply the ASPIRE plots.style defaults, and quieten logging from Bilby and pesummary so reruns focus on the figure outputs.
import bilby
import corner
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from pathlib import Path
from pesummary.io import read as pesummary_read
from pesummary.core.plots.seaborn.kde import kdeplot
from pesummary.utils.bounded_1d_kde import bounded_1d_kde
from pesummary.utils.utils import jensen_shannon_divergence_from_samples, mute_logger
from scipy.stats import gaussian_kde
import yaml
from pesummary.gw.plots.latex_labels import GWlatex_labels
GWlatex_labels["azimuth"] = r"$\epsilon\;[\mathrm{rad}]$"
GWlatex_labels["zenith"] = r"$\kappa\;[\mathrm{rad}]$"
import h5py
import warnings
from aspire_analysis_tools.gw.pesummary import extract_prior_bounds
from aspire_analysis_tools.utils import read_make_config
plt.style.use("plots.style")
plt.rcParams["text.usetex"] = False
plt.rcParams["axes.unicode_minus"] = False
bilby.core.utils.setup_logger(log_level="WARNING")
# Mute pesummary logging
mute_logger()
# Add a label for eccentricity
GWlatex_labels["eccentricity"] = r"$e_{20{\rm{Hz}}}$"
/opt/conda/envs/aspire/lib/python3.11/site-packages/gwpy/time/__init__.py:36: UserWarning: Wswiglal-redir-stdio:
SWIGLAL standard output/error redirection is enabled in IPython.
This may lead to performance penalties. To disable locally, use:
with lal.no_swig_redirect_standard_output_error():
...
To disable globally, use:
lal.swig_redirect_standard_output_error(False)
Note however that this will likely lead to error messages from
LAL functions being either misdirected or lost when called from
Jupyter notebooks.
To suppress this warning, use:
import warnings
warnings.filterwarnings("ignore", "Wswiglal-redir-stdio")
import lal
from lal import LIGOTimeGPS
GWlatex_labels
{'luminosity_distance': '$d_{L} [\\mathrm{Mpc}]$',
'geocent_time': '$t_{c} [\\mathrm{s}]$',
'dec': '$\\delta [\\mathrm{rad}]$',
'ra': '$\\alpha [\\mathrm{rad}]$',
'a_1': '$a_{1}$',
'a_2': '$a_{2}$',
'phi_jl': '$\\phi_{JL} [\\mathrm{rad}]$',
'phase': '$\\phi [\\mathrm{rad}]$',
'psi': '$\\Psi [\\mathrm{rad}]$',
'psi_J': '$\\Psi_{J} [\\mathrm{rad}]$',
'iota': '$\\iota [\\mathrm{rad}]$',
'tilt_1': '$\\theta_{1} [\\mathrm{rad}]$',
'tilt_1_infinity': '$\\theta_{1,\\infty} [\\mathrm{rad}]$',
'tilt_1_infinity_only_prec_avg': '$\\theta_{1,\\infty}^{\\mathrm{only\\, prec\\, avg}} [\\mathrm{rad}]$',
'tilt_2': '$\\theta_{2} [\\mathrm{rad}]$',
'tilt_2_infinity': '$\\theta_{2,\\infty} [\\mathrm{rad}]$',
'tilt_2_infinity_only_prec_avg': '$\\theta_{2,\\infty}^{\\mathrm{only\\, prec\\, avg}} [\\mathrm{rad}]$',
'phi_12': '$\\phi_{12} [\\mathrm{rad}]$',
'mass_2': '$m_{2} [M_{\\odot}]$',
'mass_1': '$m_{1} [M_{\\odot}]$',
'total_mass': '$M [M_{\\odot}]$',
'chirp_mass': '$\\mathcal{M} [M_{\\odot}]$',
'H1_matched_filter_snr': '$\\rho^{H}_{\\mathrm{mf}}$',
'L1_matched_filter_snr': '$\\rho^{L}_{\\mathrm{mf}}$',
'network_matched_filter_snr': '$\\rho^{N}_{\\mathrm{mf}}$',
'H1_optimal_snr': '$\\rho^{H}_{\\mathrm{opt}}$',
'L1_optimal_snr': '$\\rho^{L}_{\\mathrm{opt}}$',
'V1_optimal_snr': '$\\rho^{V}_{\\mathrm{opt}}$',
'E1_optimal_snr': '$\\rho^{E}_{\\mathrm{opt}}$',
'network_optimal_snr': '$\\rho^{N}_{\\mathrm{opt}}$',
'H1_matched_filter_snr_abs': '$\\mathrm{abs}(\\rho^{H}_{\\mathrm{mf}})$',
'L1_matched_filter_snr_abs': '$\\mathrm{abs}(\\rho^{L}_{\\mathrm{mf}})$',
'V1_matched_filter_snr_abs': '$\\mathrm{abs}(\\rho^{V}_{\\mathrm{mf}})$',
'E1_matched_filter_snr_abs': '$\\mathrm{abs}(\\rho^{E}_{\\mathrm{mf}})$',
'H1_matched_filter_snr_angle': '$\\mathrm{arg}(\\rho^{H}_{\\mathrm{mf}})$',
'L1_matched_filter_snr_angle': '$\\mathrm{arg}(\\rho^{L}_{\\mathrm{mf}})$',
'V1_matched_filter_snr_angle': '$\\mathrm{arg}(\\rho^{V}_{\\mathrm{mf}})$',
'E1_matched_filter_snr_angle': '$\\mathrm{arg}(\\rho^{E}_{\\mathrm{mf}})$',
'network_21_multipole_snr': '$\\rho_{21}$',
'network_33_multipole_snr': '$\\rho_{33}$',
'network_44_multipole_snr': '$\\rho_{44}$',
'network_precessing_snr': '$\\rho_{\\mathrm{p}}$',
'_b_bar': '$\\bar{b}$',
'_precessing_harmonics_overlap': '$|\\mathrm{O}^{\\mathrm{prec}}_{0,1}|$',
'H1_time': '$t_{H} [\\mathrm{s}]$',
'L1_time': '$t_{L} [\\mathrm{s}]$',
'V1_time': '$t_{V} [\\mathrm{s}]$',
'E1_time': '$t_{E} [\\mathrm{s}]$',
'H1_L1_time_delay': '$\\Delta t_{HL} [\\mathrm{s}]$',
'H1_V1_time_delay': '$\\Delta t_{HV} [\\mathrm{s}]$',
'L1_V1_time_delay': '$\\Delta t_{LV} [\\mathrm{s}]$',
'spin_1x': '$S_{1x}$',
'spin_1y': '$S_{1y}$',
'spin_1z': '$S_{1z}$',
'spin_1z_evolved': '$S_{1z}^{\\mathrm{evol}}$',
'spin_1z_infinity': '$S_{1z,\\infty}$',
'spin_1z_infinity_only_prec_avg': '$S_{1z,\\infty}^{\\mathrm{only\\, prec\\, avg}}$',
'spin_2x': '$S_{2x}$',
'spin_2y': '$S_{2y}$',
'spin_2z': '$S_{2z}$',
'spin_2z_evolved': '$S_{2z}^{\\mathrm{evol}}$',
'spin_2z_infinity': '$S_{2z,\\infty}$',
'spin_2z_infinity_only_prec_avg': '$S_{2z,\\infty}^{\\mathrm{only\\, prec\\, avg}}$',
'chi_p': '$\\chi_{\\mathrm{p}}$',
'chi_p_infinity': '$\\chi_{\\mathrm{p},\\infty}$',
'chi_p_infinity_only_prec_avg': '$\\chi_{\\mathrm{p},\\infty}^{\\mathrm{only\\, prec\\, avg}}$',
'chi_p_2spin': '$\\chi_{\\mathrm{p}}^{\\mathrm{2spin}}$',
'chi_eff': '$\\chi_{\\mathrm{eff}}$',
'chi_eff_infinity': '$\\chi_{\\mathrm{eff},\\infty}$',
'chi_eff_infinity_only_prec_avg': '$\\chi_{\\mathrm{eff},\\infty}^{\\mathrm{only\\, prec\\, avg}}$',
'mass_ratio': '$q$',
'symmetric_mass_ratio': '$\\eta$',
'beta': '$\\beta$',
'inverted_mass_ratio': '$1/q$',
'phi_1': '$\\phi_{1} [\\mathrm{rad}]$',
'phi_2': '$\\phi_{2} [\\mathrm{rad}]$',
'cos_tilt_1': '$\\cos{\\theta_{1}}$',
'cos_tilt_2': '$\\cos{\\theta_{2}}$',
'cos_tilt_1_infinity': '$\\cos{\\theta_{1,\\infty}}$',
'cos_tilt_2_infinity': '$\\cos{\\theta_{2,\\infty}}$',
'cos_tilt_1_infinity_only_prec_avg': '$\\cos{\\theta_{1,\\infty}^{\\mathrm{only\\, prec\\, avg}}}$',
'cos_tilt_2_infinity_only_prec_avg': '$\\cos{\\theta_{2,\\infty}^{\\mathrm{only\\, prec\\, avg}}}$',
'redshift': '$z$',
'comoving_distance': '$d_{com} [\\mathrm{Mpc}]$',
'comoving_volume': '$V_{com} [\\mathrm{Mpc}**3]$',
'mass_1_source': '$m_{1}^{\\mathrm{source}} [M_{\\odot}]$',
'mass_2_source': '$m_{2}^{\\mathrm{source}} [M_{\\odot}]$',
'chirp_mass_source': '$\\mathcal{M}^{\\mathrm{source}} [M_{\\odot}]$',
'total_mass_source': '$M^{\\mathrm{source}} [M_{\\odot}]$',
'cos_iota': '$\\cos{\\iota}$',
'theta_jn': '$\\theta_{JN} [\\mathrm{rad}]$',
'viewing_angle': '$\\Theta [\\mathrm{rad}]$',
'cos_theta_jn': '$\\cos{\\theta_{JN}}$',
'lambda_1': '$\\lambda_{1}$',
'lambda_2': '$\\lambda_{2}$',
'lambda_tilde': '$\\tilde{\\lambda}$',
'delta_lambda': '$\\delta\\lambda$',
'peak_luminosity': '$L_{\\mathrm{peak}} [10^{56} \\mathrm{ergs\\;s^{-1}}]$',
'peak_luminosity_non_evolved': '$L_{\\mathrm{peak}}^{\\mathrm{nonevol}} [10^{56} \\mathrm{ergs\\;s^{-1}}]$',
'final_mass': '$M_{\\mathrm{final}} [M_{\\odot}]$',
'final_mass_non_evolved': '$M_{\\mathrm{final}}^{\\mathrm{nonevol}} [M_{\\odot}]$',
'final_mass_source': '$M_{\\mathrm{final}}^{\\mathrm{source}} [M_{\\odot}]$',
'final_mass_source_non_evolved': '$M_{\\mathrm{final}}^{\\mathrm{source, nonevol}} [M_{\\odot}]$',
'final_spin': '$a_{\\mathrm{final}}$',
'final_spin_non_evolved': '$a_{\\mathrm{final}}^{\\mathrm{nonevol}}$',
'radiated_energy': '$E_{\\mathrm{rad}} [M_{\\odot}]$',
'radiated_energy_non_evolved': '$E_{\\mathrm{rad}}^{\\mathrm{nonevol}} [M_{\\odot}]$',
'final_kick': '$v_{\\mathrm{final}} [\\mathrm{km\\;s^{-1}}]$',
'log_pressure': '$\\log{\\mathcal{P}}$',
'gamma_1': '$\\Gamma_{1}$',
'gamma_2': '$\\Gamma_{2}$',
'gamma_3': '$\\Gamma_{3}$',
'spectral_decomposition_gamma_0': '$\\gamma_{0}$',
'spectral_decomposition_gamma_1': '$\\gamma_{1}$',
'spectral_decomposition_gamma_2': '$\\gamma_{2}$',
'spectral_decomposition_gamma_3': '$\\gamma_{3}$',
'tidal_disruption_frequency': '$f_{\\mathrm{td}} [\\mathrm{Hz}]$',
'tidal_disruption_frequency_ratio': '$f_{\\mathrm{td}} / f_{220}$',
'220_quasinormal_mode_frequency': '$f_{220} [\\mathrm{Hz}]$',
'baryonic_torus_mass': '$M_{\\mathrm{torus}} [M_{\\odot}]$',
'baryonic_torus_mass_source': '$M^{\\mathrm{source}}_{\\mathrm{torus}} [M_{\\odot}]$',
'compactness_1': '$C_{1}$',
'compactness_2': '$C_{2}$',
'baryonic_mass_1': '$m_{1, \\mathrm{baryonic}} [M_{\\odot}]$',
'baryonic_mass_1_source': '$m^{\\mathrm{source}}_{1, \\mathrm{baryonic}} [M_{\\odot}]$',
'baryonic_mass_2': '$m_{2, \\mathrm{baryonic}} [M_{\\odot}]$',
'baryonic_mass_2_source': '$m^{\\mathrm{source}}_{2, \\mathrm{baryonic}} [M_{\\odot}]$',
'azimuth': '$\\epsilon\\;[\\mathrm{rad}]$',
'zenith': '$\\kappa\\;[\\mathrm{rad}]$',
'eccentricity': '$e_{20{\\rm{Hz}}}$'}
Prepare Output Locations#
Ensure the figures/ directory exists so the generated PDFs match the data-release folder layout.
output = Path("figures")
output.mkdir(exist_ok=True)
Data Loading#
Read config.mk to locate the RESULTS_MAPPING YAML enumerating each analysis, waveform, and sampler bundle shipped with the release.
make_config = read_make_config("config.mk")
Resolve Result Paths#
Open the mapping file, promote each relative posterior path to the data-release root, and keep the nested structure so later loops can look up datasets without hard-coded filenames.
results_mapping_file = Path(make_config["RESULTS_MAPPING"])
data_release_path = results_mapping_file.parent
with open(results_mapping_file, "r") as f:
result_paths = yaml.safe_load(f)
# Add data release path to each result path
for key, value in result_paths.items():
for subkey, subvalue in value.items():
for subsubkey, subsubvalue in subvalue.items():
result_paths[key][subkey][subsubkey] = \
data_release_path / Path(subsubvalue)
Load Posterior Summaries#
Use pesummary to read every sampler output, caching the posterior samples, prior bounds, and sampler diagnostics needed for plotting and table annotations.
results = {}
stats = {}
prior_bounds = {}
for label, wf_result_files in result_paths.items():
results[label] = {}
stats[label] = {}
prior_bounds[label] = {}
for waveform, result_files in wf_result_files.items():
results[label][waveform] = {}
stats[label][waveform] = {}
prior_bounds[label][waveform] = {}
for sampler, result_file in result_files.items():
results[label][waveform][sampler] = pesummary_read(str(result_file), disable_prior=True)
prior_bounds[label][waveform][sampler] = {}
stats[label][waveform][sampler] = {
"evals": None,
"time": None,
"log_evidence": None,
"log_evidence_err": None,
"n_posterior_samples": None
}
try:
with h5py.File(result_file, "r") as f:
prior_bounds[label][waveform][sampler] = extract_prior_bounds(f["priors"][()])
stats[label][waveform][sampler]["evals"] = f["num_likelihood_evaluations"][()]
stats[label][waveform][sampler]["time"] = f["sampling_time"][()]
stats[label][waveform][sampler]["log_evidence"] = f["log_evidence"][()]
stats[label][waveform][sampler]["log_evidence_err"] = f["log_evidence_err"][()]
stats[label][waveform][sampler]["n_posterior_samples"] = len(f["posterior/chirp_mass"][()])
except (KeyError, OSError):
pass
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
Cell In[6], line 13
11 prior_bounds[label][waveform] = {}
12 for sampler, result_file in result_files.items():
---> 13 results[label][waveform][sampler] = pesummary_read(str(result_file), disable_prior=True)
14 prior_bounds[label][waveform][sampler] = {}
15 stats[label][waveform][sampler] = {
16 "evals": None,
17 "time": None,
(...) 20 "n_posterior_samples": None
21 }
File /opt/conda/envs/aspire/lib/python3.11/site-packages/pesummary/io/read.py:112, in read(path, package, file_format, skymap, strain, cls, checkpoint, **kwargs)
109 return OTHER["pickle"](path, **kwargs)
111 module = importlib.import_module("pesummary.{}.file.read".format(package))
--> 112 return getattr(module, "read")(path, file_format=file_format, **kwargs)
File /opt/conda/envs/aspire/lib/python3.11/site-packages/pesummary/gw/file/read.py:160, in read(path, HDF5_LOAD, JSON_LOAD, file_format, **kwargs)
139 def read(
140 path, HDF5_LOAD=GW_HDF5_LOAD, JSON_LOAD=GW_JSON_LOAD, file_format=None,
141 **kwargs
142 ):
143 """Read in a results file.
144
145 Parameters
(...) 158 all additional kwargs are passed directly to the load_file class method
159 """
--> 160 return CoreRead(
161 path, HDF5_LOAD=GW_HDF5_LOAD, JSON_LOAD=GW_JSON_LOAD, DEFAULT=GW_DEFAULT,
162 file_format=file_format, **kwargs
163 )
File /opt/conda/envs/aspire/lib/python3.11/site-packages/pesummary/core/file/read.py:309, in read(path, HDF5_LOAD, JSON_LOAD, DEFAULT, file_format, **kwargs)
307 if extension in ["hdf5", "h5", "hdf"]:
308 options = _file_format(file_format, HDF5_LOAD)
--> 309 return _read(path, options, default=DEFAULT, **kwargs)
310 elif extension == "json":
311 options = _file_format(file_format, JSON_LOAD)
File /opt/conda/envs/aspire/lib/python3.11/site-packages/pesummary/core/file/read.py:227, in _read(path, load_options, default, **load_kwargs)
217 """Try and load a result file according to multiple options
218
219 Parameters
(...) 224 dictionary of checks and loading functions
225 """
226 for check, load in load_options.items():
--> 227 if check(path):
228 try:
229 return load(path, **load_kwargs)
File /opt/conda/envs/aspire/lib/python3.11/site-packages/pesummary/gw/file/read.py:50, in is_lalinference_file(path)
42 """Determine if the results file is a LALInference results file
43
44 Parameters
(...) 47 path to the results file
48 """
49 import h5py
---> 50 f = h5py.File(path, 'r')
51 keys = list(f.keys())
52 f.close()
File /opt/conda/envs/aspire/lib/python3.11/site-packages/h5py/_hl/files.py:566, in File.__init__(self, name, mode, driver, libver, userblock_size, swmr, rdcc_nslots, rdcc_nbytes, rdcc_w0, track_order, fs_strategy, fs_persist, fs_threshold, fs_page_size, page_buf_size, min_meta_keep, min_raw_keep, locking, alignment_threshold, alignment_interval, meta_block_size, track_times, **kwds)
557 fapl = make_fapl(driver, libver, rdcc_nslots, rdcc_nbytes, rdcc_w0,
558 locking, page_buf_size, min_meta_keep, min_raw_keep,
559 alignment_threshold=alignment_threshold,
560 alignment_interval=alignment_interval,
561 meta_block_size=meta_block_size,
562 **kwds)
563 fcpl = make_fcpl(track_order=track_order, track_times=track_times,
564 fs_strategy=fs_strategy, fs_persist=fs_persist,
565 fs_threshold=fs_threshold, fs_page_size=fs_page_size)
--> 566 fid = make_fid(name, mode, userblock_size, fapl, fcpl, swmr=swmr)
568 if isinstance(libver, tuple):
569 self._libver = libver
File /opt/conda/envs/aspire/lib/python3.11/site-packages/h5py/_hl/files.py:241, in make_fid(name, mode, userblock_size, fapl, fcpl, swmr)
239 if swmr and swmr_support:
240 flags |= h5f.ACC_SWMR_READ
--> 241 fid = h5f.open(name, flags, fapl=fapl)
242 elif mode == 'r+':
243 fid = h5f.open(name, h5f.ACC_RDWR, fapl=fapl)
File h5py/_objects.pyx:54, in h5py._objects.with_phil.wrapper()
File h5py/_objects.pyx:55, in h5py._objects.with_phil.wrapper()
File h5py/h5f.pyx:104, in h5py.h5f.open()
FileNotFoundError: [Errno 2] Unable to synchronously open file (unable to open file: name = '../../data_releases/first_paper_data_release/core_results/single_event_analyses/GW200129_IMRPhenomXO4a_aspire_result.hdf5', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0)
Jensen-Shannon Divergence#
Compute pairwise Jensen-Shannon divergences for the highlighted parameters, restricting comparisons to runs that share the same data and waveform so the divergences isolate sampler-related differences.
parameters_of_interest = [
"chirp_mass",
"mass_ratio",
"chi_eff",
"chi_p",
"eccentricity",
]
# Use base 2 log for JSD calculation
base = 2
jsd = {}
for label, wf_results in results.items():
jsd[label] = {}
for waveform, sampler_results in wf_results.items():
jsd[label][waveform] = {}
samplers = list(sampler_results.keys())
for i in range(len(samplers)):
for j in range(i + 1, len(samplers)):
sampler1 = samplers[i]
sampler2 = samplers[j]
key = f"{sampler1} vs {sampler2}"
jsd[label][waveform][key] = {}
samples1 = sampler_results[sampler1].samples_dict
samples2 = sampler_results[sampler2].samples_dict
samples1["cos_theta_jn"] = np.cos(samples1["theta_jn"])
samples2["cos_theta_jn"] = np.cos(samples2["theta_jn"])
for param in parameters_of_interest:
bounds = prior_bounds[label][waveform][sampler1].get(param, None)
if param == "cos_theta_jn":
bounds = (-1, 1)
if bounds is None or param in ["theta_jn"]:
kde = gaussian_kde
kde_kwargs = {}
else:
kde = bounded_1d_kde
kde_kwargs = {
"xlow": bounds[0],
"xhigh": bounds[1],
"method": "Reflection"
}
if param in samples1 and param in samples2:
jsd_value = jensen_shannon_divergence_from_samples(
[samples1[param].astype(float), samples2[param].astype(float)],
base=base,
kde=kde,
**kde_kwargs
)
jsd[label][waveform][key][param] = jsd_value
else:
jsd[label][waveform][key][param] = None
Utility Functions#
Collect helpers that standardise 1-D KDE styling, axis configuration, and legend text. plot_results centralises the figure layout so each section can request a different mix of analyses without duplicating plotting logic.
def add_kde_plot_to_ax(ax, samples, bounds, linestyle="-", colour="C0", xlabel=None, jsd_value=None):
# If bounds are defined, use bounded_1d_kde
if bounds[0] is not None and bounds[1] is not None:
kde_kernel = bounded_1d_kde
kde_kwargs = {
"xlow": bounds[0],
"xhigh": bounds[1],
"method": "Reflection"
}
else:
kde_kernel = None
kde_kwargs = {}
kde_kernel = None
kde_kwargs = {}
kdeplot(
x=samples,
ax=ax,
color=colour,
alpha=1.0,
kde_kernel=kde_kernel,
kde_kwargs=kde_kwargs,
clip=bounds if bounds[0] is not None and bounds[1] is not None else None,
linewidth=1.5,
linestyle=linestyle,
)
# Set x-label
ax.set_xlabel(xlabel)
# Include JSD as title
if jsd_value is not None:
ax.set_title(
f"{1e3 * jsd_value:.2f} mbits",
fontsize=8,
)
def plot_results(
results,
analysis_keys,
analyses_to_plot,
parameters,
colors,
linestyles,
):
"""Helper function to 1-d KDE plots for a set of results.
Produces a grid of 1-d KDE plots for each parameter in `parameters` (columns)
and each analysis in `analysis_keys` (rows). Each subplot contains the KDEs
for each waveform and sampler combination specified in `analyses_to_plot`.
"""
if isinstance(parameters, list):
parameters = {k: parameters for k in analysis_keys}
if len(analysis_keys) > 1:
assert len(parameters[analysis_keys[0]]) == len(parameters[analysis_keys[1]])
figsize = plt.rcParams["figure.figsize"].copy()
figsize[1] = 0.6 * figsize[1] * len(analysis_keys)
with plt.rc_context({
'figure.constrained_layout.use': True,
'xtick.labelsize': 8,
'axes.labelpad': 2,
}), warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning) # Ignore warnings from seaborn about KDE
warnings.simplefilter("ignore", FutureWarning) # Ignore warnings from bounded_1d
fig, axs = plt.subplots(len(analysis_keys), len(parameters[analysis_keys[0]]), figsize=figsize)
axs = np.atleast_2d(axs)
for i, label in enumerate(analysis_keys):
if label not in results:
print(f"Skipping {label}")
continue
for waveform, samplers in analyses_to_plot[label].items():
if waveform not in results[label]:
print(f"Skipping {waveform} for {label}")
continue
for j, sampler in enumerate(samplers):
if sampler not in results[label][waveform]:
continue
res = results[label][waveform][sampler]
for k, parameter in enumerate(parameters[label]):
if parameter not in res.samples_dict:
continue
samples = res.samples_dict[parameter]
if np.unique(samples).size == 1:
# If all samples are the same, skip plotting
continue
if len(samples) > 0:
# Get the prior bounds
bounds = prior_bounds[label][waveform][sampler].get(parameter, (None, None))
# Get the JSD value if applicable
if len(samplers) > 1 and j == 1:
jsd_val = jsd[label][waveform].get(f"{samplers[0]} vs {samplers[1]}", {}).get(parameter, None)
if jsd_val is None:
jsd_val = jsd[label][waveform].get(f"{samplers[1]} vs {samplers[0]}", {}).get(parameter, None)
else:
jsd_val = None
add_kde_plot_to_ax(
ax=axs[i, k],
samples=samples.to_numpy().real,
bounds=bounds,
linestyle=linestyles[waveform][sampler],
colour=colors[waveform][sampler],
xlabel=GWlatex_labels.get(parameter, parameter),
jsd_value=jsd_val,
)
for ax in axs.flat:
# Remove y-axis labels and ticks
ax.set_yticklabels([])
# Remove y-axis labels
ax.yaxis.label.set_visible(False)
ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=2))
return fig
Changing Waveform - Figure 2#
Configure the comparisons for the GW150914-like injections across waveform models, then call plot_results to render dynesty versus aspire posteriors and save changing_waveform.pdf.
# Parameters to plot
parameters = ["chirp_mass", "mass_ratio", "chi_eff", "chi_p"]
# Which injections to plot
analysis_keys = ["GW150914-like", "q4"]
# Which results to plot
analyses_to_plot = {
"IMRPhenomXPHM": ["dynesty"],
"IMRPhenomXO4a": [
"dynesty",
"aspire from IMRPhenomXPHM",
],
}
analyses_to_plot = {k: analyses_to_plot for k in analysis_keys}
# Colors and linestyles for plotting
colors = {
"IMRPhenomXPHM": {
"dynesty": "k",
},
"IMRPhenomXO4a": {
"dynesty": "C0",
"aspire from IMRPhenomXPHM": "C1",
},
"SEOBNRv5PHM": {
"aspire from IMRPhenomXPHM": "C2",
}
}
ls = {
"IMRPhenomXPHM": {
"dynesty": ":",
},
"IMRPhenomXO4a": {
"dynesty": "-",
"aspire from IMRPhenomXPHM": "--",
},
"SEOBNRv5PHM": {
"aspire from IMRPhenomXPHM": "-.",
}
}
fig = plot_results(
results,
analysis_keys,
analyses_to_plot,
parameters,
colors,
ls,
)
plt.show()
fig.savefig(output / "changing_waveform.pdf", bbox_inches="tight")
Adding Physics - Figure 3#
Plot the impact of introducing spin precession and orbital eccentricity, keeping colors and linestyles consistent across analyses so differences between samplers remain easy to spot.
analysis_keys = ["GW150914-like", "eccentric"]
analyses_to_plot = {
"GW150914-like": {
"IMRPhenomD": ["dynesty"],
"IMRPhenomPv2": ["dynesty", "aspire from IMRPhenomD"],
},
"eccentric": {
"TaylorF2": ["dynesty"],
"TaylorF2Ecc": ["dynesty", "aspire"],
}
}
parameters = {
"GW150914-like": ["chirp_mass", "mass_ratio", "chi_eff", "chi_p"],
"eccentric": ["chirp_mass", "mass_ratio", "chi_eff", "eccentricity"],
}
colors = {
"IMRPhenomD": {
"dynesty": "k",
},
"IMRPhenomPv2": {
"dynesty": "C0",
"aspire from IMRPhenomD": "C1",
},
"TaylorF2": {
"dynesty": "k",
},
"TaylorF2Ecc": {
"dynesty": "C0",
"aspire": "C1",
},
}
ls = {
"IMRPhenomD": {
"dynesty": ":",
},
"IMRPhenomPv2": {
"dynesty": "-",
"aspire from IMRPhenomD": "--",
},
"TaylorF2": {
"dynesty": ":",
},
"TaylorF2Ecc": {
"dynesty": "-",
"aspire": "--",
}
}
fig = plot_results(
results,
analysis_keys,
analyses_to_plot,
parameters,
colors,
ls,
)
plt.show()
fig.savefig(output / "adding_physics.pdf", bbox_inches="tight")
GW200129 - Figure 4#
Recreate the comparison for the real GW200129 event by selecting the relevant waveform families, generating the multi-parameter KDE figure, and exporting GW150914.pdf for inclusion in the release.
to_plot = {
"IMRPhenomXPHM": [
"dynesty",
],
"IMRPhenomXO4a": [
"dynesty",
"aspire",
],
# "SEOBNRv5PHM": [
# "dynesty",
# "aspire",
# ]
}
colors = {
"IMRPhenomXPHM": {
"dynesty": "k",
},
"IMRPhenomXO4a": {
"dynesty": "C0",
"aspire": "C1",
},
"SEOBNRv5PHM": {
"dynesty": "C0",
"aspire": "C1",
}
}
ls = {
"IMRPhenomXPHM": {
"dynesty": ":",
},
"IMRPhenomXO4a": {
"dynesty": "-",
"aspire": "--",
},
"SEOBNRv5PHM": {
"dynesty": "-",
"aspire": "--",
},
}
analysis_keys = ["GW150914", "GW200129"]
analyses_to_plot = {
"GW150914": to_plot,
"GW200129": to_plot,
}
parameters = {
"GW150914": ["chirp_mass", "mass_ratio", "chi_eff", "chi_p"],
"GW200129": ["chirp_mass", "mass_ratio", "chi_eff", "chi_p"],
}
fig = plot_results(
results,
analysis_keys,
analyses_to_plot,
parameters,
colors,
ls,
)
plt.show()
fig.savefig(output / "real_events.pdf", bbox_inches="tight")
LaTeX Macros for Divergence Summaries#
Export the Jensen-Shannon divergence values into macros/jsd_macros.tex so the paper and documentation can reference the same numbers without manual transcription.
# Write macros for JSD between chirp_mass, mass_ratio, chi_eff and chi_p
parameters = ["chirp_mass", "mass_ratio", "chi_eff", "chi_p"]
analysis_key = "GW150914"
macros_dir = Path("macros")
macros_dir.mkdir(exist_ok=True)
filename = macros_dir / "jsd_macros.tex"
# Overwrite file
with open(filename, "w") as f:
f.write("% Macros for JSD between different samplers\n")
f.write("% Generated by change_waveform.ipynb\n")
for p in parameters:
jsd_val = jsd[analysis_key]["IMRPhenomXO4a"].get("dynesty vs aspire", {}).get(p, None)
if jsd_val is None:
jsd_val = jsd[analysis_key]["IMRPhenomXO4a"].get("aspire vs dynesty", {}).get(p, None)
if jsd_val is not None:
with open(filename, "a") as f:
name = f"JSD{p.capitalize().replace('_', '')}"
f.write(f"\\newcommand{{\\{name}}}{{{1e3 * jsd_val:.2f}}}\n")
P-P plot#
The P-P plots is created using a Python script found in the repository.
The Makefile in gw/plots/first_paper can be used to produce the plots after
downloading the data release and activating the environment:
make pp-plot
Flow plot#
Generate plots showing the distribution learned by the flow for supplemental material.
data_release_path = Path(make_config["DATA_RELEASE_PATH"])
file_path = data_release_path / "core_results" / "single_event_analyses" / "GW150914_IMRPhenomXO4a_aspire_checkpoint.h5"
flow_samples = {}
initial_samples = {}
with h5py.File(file_path, "r") as f:
for key in f["flow_samples"].keys():
if "samples" in key:
param = key.split(".")[-1]
else:
continue
flow_samples[param] = f["flow_samples"][key][()]
for key in f["initial_samples"].keys():
if "samples" in key:
param = key.split(".")[-1]
else:
continue
initial_samples[param] = f["initial_samples"][key][()]
# Subtract mean from L1_time so they're easier to plot
offset = np.mean(initial_samples["L1_time"])
initial_samples["L1_time"] -= offset
flow_samples["L1_time"] -= offset
Generate the plot for astrophysical parameters.
parameters_to_plot = ["chirp_mass", "mass_ratio", "a_1", "a_2", "tilt_1", "tilt_2", "phi_12", "phi_jl", "theta_jn", "delta_phase", "psi", "azimuth", "zenith", "L1_time"]
GWlatex_labels["delta_phase"] = r"$\Delta \phi\;[\mathrm{rad}]$"
flow_array = np.array([flow_samples[param] for param in parameters_to_plot]).T
initial_array = np.array([initial_samples[param] for param in parameters_to_plot]).T
parameter_labels = [GWlatex_labels.get(param, param) for param in parameters_to_plot]
rc_params = {
"legend.fontsize": 32,
"axes.labelsize": 32,
"xtick.labelsize": 24,
"ytick.labelsize": 24,
"axes.linewidth": 3,
"xtick.major.size": 6,
"xtick.major.width": 2,
"ytick.minor.size": 4,
"ytick.major.width": 2,
"grid.linewidth": 1.6,
}
with plt.rc_context(rc_params):
corner_kwargs = dict(
smooth=0.9,
bins=32,
plot_datapoints=False,
# fill_contours=True,
no_fill_contours=True,
hist_kwargs={"density": True, "histtype": "step", "linewidth": 3},
levels=[0.68, 0.95],
density=True,
plot_density=False,
contour_kwargs={"linewidths": 3},
labelpad=0.2,
)
fig = corner.corner(
flow_array,
labels=parameters_to_plot,
color="C0",
**corner_kwargs
)
corner_kwargs["hist_kwargs"]["color"] = "C1"
corner_kwargs["hist_kwargs"]["linestyle"] = "--"
corner_kwargs["contour_kwargs"]["colors"] = "C1"
corner_kwargs["contour_kwargs"]["linestyles"] = "--"
fig = corner.corner(
initial_array,
fig=fig,
color="C1",
labels=parameter_labels,
**corner_kwargs
)
fig.legend(
handles=[
mpl.lines.Line2D([], [], color="C0", linestyle="-", label="Flow samples"),
mpl.lines.Line2D([], [], color="C1", linestyle="--", label="Initial samples"),
],
bbox_to_anchor=(0.25, 0.95),
loc="center",
)
fig.savefig(output / "flow_comparison.pdf", bbox_inches="tight")
plt.show()
Generate the plot for the calibration parameters
calibration_parameters = [
"recalib_H1_amplitude_0",
"recalib_H1_amplitude_1",
"recalib_L1_amplitude_0",
"recalib_L1_amplitude_1",
"recalib_H1_phase_0",
"recalib_H1_phase_1",
"recalib_L1_phase_0",
"recalib_L1_phase_1",
]
cal_samples_flow = np.array([flow_samples[param] for param in calibration_parameters]).T
cal_samples_initial = np.array([initial_samples[param] for param in calibration_parameters]).T
labels = [
"$\delta A^{(H1)}_0$",
"$\delta A^{(H1)}_1$",
"$\delta A^{(L1)}_0$",
"$\delta A^{(L1)}_1$",
"$\delta \phi^{(H1)}_0$",
"$\delta \phi^{(H1)}_1$",
"$\delta \phi^{(L1)}_0$",
"$\delta \phi^{(L1)}_1$",
]
rc_params_cal = {
"axes.titlepad": 12,
"axes.titlesize": rc_params["axes.labelsize"],
}
rc_params_cal.update(rc_params)
with plt.rc_context(rc_params_cal):
corner_kwargs = dict(
smooth=0.9,
bins=32,
plot_datapoints=False,
# fill_contours=True,
no_fill_contours=True,
hist_kwargs={"density": True, "histtype": "step", "linewidth": 2},
levels=[0.68, 0.95],
density=True,
plot_density=False,
contour_kwargs={"linewidths": 2},
labelpad=0.3,
)
corner_kwargs["hist_kwargs"]["color"] = "C0"
corner_kwargs["hist_kwargs"]["linestyle"] = "-"
corner_kwargs["contour_kwargs"]["colors"] = "C0"
corner_kwargs["contour_kwargs"]["linestyles"] = "-"
fig_cal = corner.corner(
cal_samples_flow,
color="C0",
labels=labels,
reverse=True,
**corner_kwargs
)
corner_kwargs["hist_kwargs"]["color"] = "C1"
corner_kwargs["hist_kwargs"]["linestyle"] = "--"
corner_kwargs["contour_kwargs"]["colors"] = "C1"
corner_kwargs["contour_kwargs"]["linestyles"] = "--"
fig_cal = corner.corner(
cal_samples_initial,
fig=fig_cal,
color="C1",
labels=labels,
reverse=True,
**corner_kwargs
)
plt.show()
Overlay the two plots
import io
# render fig_cal to a PNG in memory
buf = io.BytesIO()
fig_cal.savefig(buf, format="png", dpi=300, bbox_inches="tight", transparent=True)
plt.close(fig_cal) # optional: we don't need this figure window anymore
buf.seek(0)
img = plt.imread(buf)
# create an inset axes on the main corner figure
# [left, bottom, width, height] in figure coordinates
inset_ax = fig.add_axes([0.4, 0.4, 0.6, 0.6])
inset_ax.imshow(img)
inset_ax.axis("off")
inset_ax.set_facecolor("white")
fig.savefig(output / "flow_comparison_with_inset.pdf", bbox_inches="tight")
plt.show()