diff --git a/docs/conf.py b/docs/conf.py index 0d511442e..3defccc2d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -139,14 +139,15 @@ def run_apidoc(app): # directories to ignore when looking for source files. exclude_patterns = ["_build", "**.ipynb_checkpoints"] -html_theme = "sphinx_rtd_dark_mode" +html_theme = "pydata_sphinx_theme" +#html_theme = "sphinx_rtd_dark_mode" html_theme_options = { "logo_only": False, "display_version": False, "collapse_navigation": True, - "navigation_depth": 4, + "navigation_depth": 2, "prev_next_buttons_location": "bottom", # top and bottom } diff --git a/docs/requirements.txt b/docs/requirements.txt index e7050f111..d9e06602e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -10,3 +10,4 @@ numpy pandas numba sphinx_rtd_dark_mode +pydata-sphinx-theme diff --git a/threeML/io/calculate_flux.py b/threeML/io/calculate_flux.py index 799d3c178..630761ad2 100644 --- a/threeML/io/calculate_flux.py +++ b/threeML/io/calculate_flux.py @@ -1,20 +1,17 @@ -from builtins import range - __author__ = "grburgess" import collections import numpy as np import pandas as pd - -from threeML.exceptions.custom_exceptions import custom_warnings from threeML.io.logging import setup_logger -# from threeML.io.rich_display import display -from threeML.utils.fitted_objects.fitted_point_sources import \ - FittedPointSourceSpectralHandler +from threeML.utils.fitted_objects.fitted_point_sources import ( + FittedPointSourceSpectralHandler, +) from threeML.utils.progress_bar import tqdm -log =setup_logger(__name__) +log = setup_logger(__name__) + def _setup_analysis_dictionaries( analysis_results, @@ -74,8 +71,10 @@ def _setup_analysis_dictionaries( mle_sources.setdefault(source_name, []).append(1) if len(mle_sources[source_name]) > 1: - name = "%s_%d" % (source_name, len( - mle_sources[source_name])) + name = "%s_%d" % ( + source_name, + len(mle_sources[source_name]), + ) else: @@ -84,7 +83,8 @@ def _setup_analysis_dictionaries( try: comps = [ - c.name for c in source.spectrum.main.composite.functions + c.name + for c in source.spectrum.main.composite.functions ] except: @@ -111,8 +111,10 @@ def _setup_analysis_dictionaries( # keep track of duplicate sources if len(bayes_sources[source_name]) > 1: - name = "%s_%d" % (source_name, len( - bayes_sources[source_name])) + name = "%s_%d" % ( + source_name, + len(bayes_sources[source_name]), + ) else: @@ -121,7 +123,8 @@ def _setup_analysis_dictionaries( try: comps = [ - c.name for c in source.spectrum.main.composite.functions + c.name + for c in source.spectrum.main.composite.functions ] except: @@ -148,8 +151,10 @@ def _setup_analysis_dictionaries( # go through the MLE analysis and build up some fitted sources if mle_analyses: - - for key in tqdm(list(mle_analyses.keys()), desc="processing MLE analyses"): + + for key in tqdm( + list(mle_analyses.keys()), desc="processing MLE analyses" + ): # if we want to use this source @@ -158,7 +163,9 @@ def _setup_analysis_dictionaries( or ("total" in components_to_use) or (not mle_analyses[key]["component_names"]) ): - mle_analyses[key]["fitted point source"] = FittedPointSourceSpectralHandler( + mle_analyses[key][ + "fitted point source" + ] = FittedPointSourceSpectralHandler( mle_analyses[key]["analysis"], mle_analyses[key]["source"], energy_range, @@ -185,7 +192,9 @@ def _setup_analysis_dictionaries( if not components_to_use: - component_dict[component] = FittedPointSourceSpectralHandler( + component_dict[ + component + ] = FittedPointSourceSpectralHandler( mle_analyses[key]["analysis"], mle_analyses[key]["source"], energy_range, @@ -204,7 +213,9 @@ def _setup_analysis_dictionaries( # otherwise pick off only the ones of interest if component in components_to_use: - component_dict[component] = FittedPointSourceSpectralHandler( + component_dict[ + component + ] = FittedPointSourceSpectralHandler( mle_analyses[key]["analysis"], mle_analyses[key]["source"], energy_range, @@ -238,8 +249,10 @@ def _setup_analysis_dictionaries( # repeat for the bayes analyses if bayesian_analyses: - - for key in tqdm(list(bayesian_analyses.keys()), desc="processing Bayesian analyses"): + + for key in tqdm( + list(bayesian_analyses.keys()), desc="processing Bayesian analyses" + ): # if we have a source to use @@ -276,7 +289,9 @@ def _setup_analysis_dictionaries( # extracting all components if not components_to_use: - component_dict[component] = FittedPointSourceSpectralHandler( + component_dict[ + component + ] = FittedPointSourceSpectralHandler( bayesian_analyses[key]["analysis"], bayesian_analyses[key]["source"], energy_range, @@ -293,7 +308,9 @@ def _setup_analysis_dictionaries( # or just some of them if component in components_to_use: - component_dict[component] = FittedPointSourceSpectralHandler( + component_dict[ + component + ] = FittedPointSourceSpectralHandler( bayesian_analyses[key]["analysis"], bayesian_analyses[key]["source"], energy_range, @@ -336,7 +353,9 @@ def _setup_analysis_dictionaries( return mle_analyses, bayesian_analyses, num_sources_to_use, duplicate_keys -def _collect_sums_into_dictionaries(analyses, use_components, components_to_use): +def _collect_sums_into_dictionaries( + analyses, use_components, components_to_use +): """ :param analyses: @@ -440,10 +459,16 @@ def _compute_output(analyses, _defaults, out): samples = analyses[key]["components"][component] - label = "%s: %s" % (key, component) + label = f"{key}: {component}" _append_best_fit_and_errors( - samples, _defaults, label, fluxes, p_errors, n_errors, labels + samples, + _defaults, + label, + fluxes, + p_errors, + n_errors, + labels, ) else: @@ -456,7 +481,7 @@ def _compute_output(analyses, _defaults, out): samples = analyses[key]["fitted point source"] - label = "%s: total" % key + label = f"{key}: total" _append_best_fit_and_errors( samples, _defaults, label, fluxes, p_errors, n_errors, labels @@ -466,7 +491,8 @@ def _compute_output(analyses, _defaults, out): # now make a data frame mle_df = pd.DataFrame( - {"flux": fluxes, "low bound": n_errors, "hi bound": p_errors}, index=labels + {"flux": fluxes, "low bound": n_errors, "hi bound": p_errors}, + index=labels, ) mle_df = mle_df[["flux", "low bound", "hi bound"]] mle_df = mle_df[["flux", "low bound", "hi bound"]] @@ -479,7 +505,9 @@ def _compute_output(analyses, _defaults, out): out.append(None) -def _compute_output_with_components(_defaults, component_sum_dict, total_analysis, out): +def _compute_output_with_components( + _defaults, component_sum_dict, total_analysis, out +): fluxes = [] n_errors = [] @@ -540,7 +568,8 @@ def _compute_output_with_components(_defaults, component_sum_dict, total_analysi # now make a data frame df = pd.DataFrame( - {"flux": fluxes, "low bound": n_errors, "hi bound": p_errors}, index=labels + {"flux": fluxes, "low bound": n_errors, "hi bound": p_errors}, + index=labels, ) df = df[["flux", "low bound", "hi bound"]] out.append(df) @@ -637,8 +666,14 @@ def _calculate_point_source_flux(ene_min, ene_max, *analyses, **kwargs): # instead we now sum the fluxes # we keep bayes and mle apart - total_analysis_mle, component_sum_dict_mle, _ = _collect_sums_into_dictionaries( - mle_analyses, _defaults["use_components"], _defaults["components_to_use"] + ( + total_analysis_mle, + component_sum_dict_mle, + _, + ) = _collect_sums_into_dictionaries( + mle_analyses, + _defaults["use_components"], + _defaults["components_to_use"], ) _compute_output_with_components( diff --git a/threeML/utils/OGIP/response.py b/threeML/utils/OGIP/response.py index 674a7b195..fee8dc385 100644 --- a/threeML/utils/OGIP/response.py +++ b/threeML/utils/OGIP/response.py @@ -15,9 +15,11 @@ from past.utils import old_div from threeML.config import threeML_config from threeML.exceptions.custom_exceptions import custom_warnings -from threeML.io.file_utils import (file_existing_and_readable, - fits_file_existing_and_readable, - sanitize_filename) +from threeML.io.file_utils import ( + file_existing_and_readable, + fits_file_existing_and_readable, + sanitize_filename, +) from threeML.io.fits_file import FITSExtension, FITSFile from threeML.io.logging import setup_logger from threeML.io.package_data import get_path_of_data_file @@ -245,6 +247,7 @@ def convolve(self, precalc_fluxes: Optional[np.array] = None) -> np.ndarray: if precalc_fluxes is None: try: + fluxes = self._integral_function( # self._monte_carlo_energies[:-1], self._monte_carlo_energies[1:] ) @@ -256,6 +259,7 @@ def convolve(self, precalc_fluxes: Optional[np.array] = None) -> np.ndarray: ) else: + fluxes = precalc_fluxes # Sometimes some channels have 0 lenths, or maybe they start at 0, where