Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 79 additions & 19 deletions spectrafit/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ def estimated_rel_height(self) -> float:

The relative height of a peak is approximated by the difference of the
harmonic mean value of the `data` and the minimum value of the `data`
divided by the factor of `2`. In case of negative ratios, the value will be
divided by the factor of `4`. In case of negative ratios, the value will be
set to `Zero`.

Returns:
Expand All @@ -265,8 +265,8 @@ def estimated_rel_height(self) -> float:
try:
rel_height = (hmean(self.data) - self.data.min()) / 4
except ValueError as exc:
rel_height = (self.data.mean() - self.data.min()) / 4
print(f"{exc}: Using standard arithmetic mean of NumPy.\n")
rel_height = (self.data.mean() - self.data.min()) / 4
return rel_height if rel_height > 0 else 0.0

@property
Expand Down Expand Up @@ -635,30 +635,90 @@ def define_parameters_auto(self) -> None:
def define_parameters(self) -> None:
"""Define the input parameters for a `params`-dictionary for classic fitting."""
for key_1, value_1 in self.args["peaks"].items():
for key_2, value_2 in value_1.items():
for key_3, value_3 in value_2.items():
self.params.add(f"{key_2}_{key_3}_{key_1}", **value_3)
self.define_parameters_loop(key_1=key_1, value_1=value_1)

def define_parameters_loop(self, key_1: str, value_1: Dict[str, Any]) -> None:
"""Loop through the input parameters for a `params`-dictionary.

Args:
key_1 (str): The key of the first level of the input dictionary.
value_1 (Dict[str, Any]): The value of the first level of the input
dictionary.
"""
for key_2, value_2 in value_1.items():
self.define_parameters_loop_2(key_1=key_1, key_2=key_2, value_2=value_2)

def define_parameters_loop_2(
self, key_1: str, key_2: str, value_2: Dict[str, Any]
) -> None:
"""Loop through the input parameters for a `params`-dictionary.

Args:
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
value_2 (Dict[str, Any]): The value of the second level of the input
dictionary.
"""
for key_3, value_3 in value_2.items():
self.define_parameters_loop_3(
key_1=key_1, key_2=key_2, key_3=key_3, value_3=value_3
)

def define_parameters_loop_3(
self, key_1: str, key_2: str, key_3: str, value_3: Dict[str, Any]
) -> None:
"""Loop through the input parameters for a `params`-dictionary.

Args:
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
key_3 (str): The key of the third level of the input dictionary.
value_3 (Dict[str, Any]): The value of the third level of the input
dictionary.
"""
self.params.add(f"{key_2}_{key_3}_{key_1}", **value_3)

def define_parameters_global(self) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting."""
for col_i in range(self.col_len):
for key_1, value_1 in self.args["peaks"].items():
for key_2, value_2 in value_1.items():
for key_3, value_3 in value_2.items():
if col_i:
if key_3 != "amplitude":
self.params.add(
f"{key_2}_{key_3}_{key_1}_{col_i+1}",
expr=f"{key_2}_{key_3}_{key_1}_1",
)
else:
self.params.add(
f"{key_2}_{key_3}_{key_1}_{col_i+1}",
**value_3,
)

else:
self.params.add(f"{key_2}_{key_3}_{key_1}_1", **value_3)
self._define_parameter(
col_i=col_i,
key_1=key_1,
key_2=key_2,
key_3=key_3,
value_3=value_3,
)

def _define_parameter(
self, col_i: int, key_1: str, key_2: str, key_3: str, value_3: Dict[str, Any]
) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting.

Args:
col_i (int): The column index.
key_1 (str): The key of the first level of the input dictionary.
key_2 (str): The key of the second level of the input dictionary.
key_3 (str): The key of the third level of the input dictionary.
value_3 (Dict[str, Any]): The value of the third level of the input
dictionary.
"""
if col_i:
if key_3 != "amplitude":
self.params.add(
f"{key_2}_{key_3}_{key_1}_{col_i+1}",
expr=f"{key_2}_{key_3}_{key_1}_1",
)
else:
self.params.add(
f"{key_2}_{key_3}_{key_1}_{col_i+1}",
**value_3,
)

else:
self.params.add(f"{key_2}_{key_3}_{key_1}_1", **value_3)

def define_parameters_global_pre(self) -> None:
"""Define the input parameters for a `params`-dictionary for global fitting.
Expand Down
10 changes: 7 additions & 3 deletions spectrafit/plugins/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,18 @@ def convert(args: Dict[str, Any]) -> None:

if args["format"] == "json":
# Convert the input file to a JSON file
with open(args["infile"].with_suffix(".json"), "w", encoding="utf8") as f:
with open(
args["infile"].with_suffix(f".{args['format']}"), "w", encoding="utf8"
) as f:
json.dump(data, f, indent=4)
elif args["format"] == "yaml":
with open(args["infile"].with_suffix(".yaml"), "w", encoding="utf8") as f:
with open(
args["infile"].with_suffix(f".{args['format']}"), "w", encoding="utf8"
) as f:
yaml.dump(data, f, default_flow_style=False)
elif args["format"] in ["toml", "lock"]:
with open(
args["infile"].with_suffix(".toml"),
args["infile"].with_suffix(f".{args['format']}"),
"wb+",
) as f:
tomli_w.dump(dict(**data), f)
Expand Down
2 changes: 1 addition & 1 deletion spectrafit/plugins/notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -1047,7 +1047,7 @@ def solver_model(
"global_": self.global_,
"column": list(self.df.columns),
"autopeak": self.autopeak,
**list2dict(list_=self.initial_model),
**list2dict(peak_list=self.initial_model),
},
)(),
)()
Expand Down
3 changes: 0 additions & 3 deletions spectrafit/plugins/test/test_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ def test_json_conversion() -> None:
with TemporaryDirectory() as tmpdir:
infile = Path(tmpdir) / "input_1.json"

# write input json
with open(infile, "w", encoding="utf8") as f:
json.dump({"a": 1, "b": 2}, f)
args = {
Expand All @@ -73,7 +72,6 @@ def test_yaml_conversion() -> None:
with TemporaryDirectory() as tmpdir:
infile = Path(tmpdir) / "input_1.yaml"

# write input yaml
with open(infile, "w", encoding="utf8") as f:
yaml.dump({"a": 1, "b": 2}, f)
args = {
Expand All @@ -92,7 +90,6 @@ def test_toml_conversion() -> None:
with TemporaryDirectory() as tmpdir:
infile = Path(tmpdir) / "input_1.toml"

# write input toml
with open(infile, "wb+") as f:
tomli_w.dump({"a": 1, "b": 2}, f)
args = {
Expand Down
59 changes: 51 additions & 8 deletions spectrafit/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from typing import Any
from typing import Dict
from typing import Hashable
from typing import List
from typing import Optional
from typing import Tuple

Expand Down Expand Up @@ -110,21 +111,19 @@ def initialize(
Tuple[NDArray[np.float64], NDArray[np.float64]]: Tuple of true and predicted
(fit) intensities.
"""
_true = df[
true = df[
[col_name for col_name in df.columns if name_true in col_name]
].to_numpy()

_pred = df[
pred = df[
[col_name for col_name in df.columns if name_pred in col_name]
].to_numpy()

if _pred.shape != _true.shape:
if pred.shape != true.shape:
raise ValueError("The shape of the real and fit data-values are not equal!")

return (
(_true, _pred)
if _true.shape[1] > 1
else (np.array([_true]), np.array([_pred]))
(true, pred) if true.shape[1] > 1 else (np.array([true]), np.array([pred]))
)

def __call__(self) -> Dict[Hashable, Any]:
Expand Down Expand Up @@ -182,7 +181,7 @@ def fit_report_as_dict(
result = inpars
params = inpars.params

parnames = list(params.keys())
parnames: List[str] = list(params.keys())

buffer: Dict[str, Dict[Any, Any]] = {
"configurations": {},
Expand Down Expand Up @@ -221,6 +220,7 @@ def fit_report_as_dict(
buffer["errorbars"]["at_initial_value"] = name
if np.allclose(par.value, par.min) or np.allclose(par.value, par.max):
buffer["errorbars"]["at_boundary"] = name

for name in parnames:
par = params[name]
buffer["variables"][name] = {}
Expand Down Expand Up @@ -319,15 +319,28 @@ def print_tabulate(args: Dict[str, Any]) -> None:
@property
def printing_regular_mode(self) -> None:
"""Print the fitting results in the regular mode."""
self.print_statistic()
self.print_fit_results()
self.print_confidence_interval()
self.print_linear_correlation()
self.print_regression_metrics()

def print_statistic(self) -> None:
"""Print the statistic."""
print("\nStatistic:\n")
self.print_tabulate(args=self.args["data_statistic"])

def print_fit_results(self) -> None:
"""Print the fit results."""
print("\nFit Results and Insights:\n")
print(
report_fit(self.result, modelpars=self.result.params, **self.args["report"])
)

def print_confidence_interval(self) -> None:
"""Print the confidence interval."""
print("\nConfidence Interval:\n")
if self.args["conf_interval"]:
print("\nConfidence Interval:\n")
try:
report_ci(
conf_interval(
Expand All @@ -337,25 +350,55 @@ def printing_regular_mode(self) -> None:
except MinimizerException as exc:
print(f"Error: {exc} -> No confidence interval could be calculated!")
self.args["confidence_interval"] = {}

def print_linear_correlation(self) -> None:
"""Print the linear correlation."""
print("\nOverall Linear-Correlation:\n")
self.print_tabulate(args=self.args["linear_correlation"])

def print_regression_metrics(self) -> None:
"""Print the regression metrics."""
print("\nRegression Metrics:\n")
self.print_tabulate(args=self.args["regression_metrics"])

@property
def printing_verbose_mode(self) -> None:
"""Print all results in verbose mode."""
self.print_statistic_verbose()
self.print_input_parameters_verbose()
self.print_fit_results_verbose()
self.print_confidence_interval_verbose()
self.print_linear_correlation_verbose()
self.print_regression_metrics_verbose()

def print_statistic_verbose(self) -> None:
"""Print the data statistic in verbose mode."""
print("\nStatistic:\n")
pp.pprint(self.args["data_statistic"])

def print_input_parameters_verbose(self) -> None:
"""Print input parameters in verbose mode."""
print("Input Parameter:\n")
pp.pprint(self.args)

def print_fit_results_verbose(self) -> None:
"""Print fit results in verbose mode."""
print("\nFit Results and Insights:\n")
pp.pprint(self.args["fit_insights"])

def print_confidence_interval_verbose(self) -> None:
"""Print confidence interval in verbose mode."""
if self.args["conf_interval"]:
print("\nConfidence Interval:\n")
pp.pprint(self.args["confidence_interval"])

def print_linear_correlation_verbose(self) -> None:
"""Print overall linear-correlation in verbose mode."""
print("\nOverall Linear-Correlation:\n")
pp.pprint(self.args["linear_correlation"])

def print_regression_metrics_verbose(self) -> None:
"""Print regression metrics in verbose mode."""
print("\nRegression Metrics:\n")
pp.pprint(self.args["regression_metrics"])

Expand Down
7 changes: 4 additions & 3 deletions spectrafit/spectrafit.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

from typing import Any
from typing import Dict
from typing import MutableMapping
from typing import Tuple

import pandas as pd
Expand Down Expand Up @@ -228,8 +229,8 @@ def extracted_from_command_line_runner() -> Dict[str, Any]:
Dict[str, Any]: The input file arguments as a dictionary with additional
information beyond the command line arguments.
"""
result = get_args()
_args = read_input_file(result["input"])
result: Dict[str, Any] = get_args()
_args: MutableMapping[str, Any] = read_input_file(result["input"])

if "settings" in _args.keys():
for key in _args["settings"].keys():
Expand Down Expand Up @@ -279,7 +280,7 @@ def fitting_routine(args: Dict[str, Any]) -> Tuple[pd.DataFrame, Dict[str, Any]]
contributions and the corresponding residuum. Furthermore, the dictionary
is extended by advanced statistical information of the fit.
"""
df = load_data(args)
df: pd.DataFrame = load_data(args)
df, args = PreProcessing(df=df, args=args)()
minimizer, result = SolverModels(df=df, args=args)()
df, args = PostProcessing(df=df, args=args, minimizer=minimizer, result=result)()
Expand Down
4 changes: 1 addition & 3 deletions spectrafit/test/test_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,7 @@ def test_smoothing(self) -> None:
"smooth": 5,
"column": ["energy", "intensity"],
}
assert (
PreProcessing(self.df, args).intensity_smooth(self.df, args).shape[0] == 100
)
assert PreProcessing(self.df, args).smooth_signal(self.df, args).shape[0] == 100

def test_energy_shift(self) -> None:
"""Testing energy shift for no shift."""
Expand Down
Loading