Skip to content

Commit b252199

Browse files
Marchma0Copilot
andauthored
Enh/bootstrapping for ci estimation (#897)
* Add the method CI estimation and test * Update the changelog * Add the documentation * Add more tests * Update rocketpy/simulation/monte_carlo.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
1 parent 2b6fa42 commit b252199

File tree

4 files changed

+219
-0
lines changed

4 files changed

+219
-0
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ Attention: The newest changes should be on top -->
3737
- ENH: Enable only radial burning [#815](https://github.com/RocketPy-Team/RocketPy/pull/815)
3838
- ENH: Add thrustcurve api integration to retrieve motor eng data [#870](https://github.com/RocketPy-Team/RocketPy/pull/870)
3939
- ENH: custom warning no motor or aerosurface [#871](https://github.com/RocketPy-Team/RocketPy/pull/871)
40+
- ENH: Implement Bootstrapping for Confidence Interval Estimation [#891](https://github.com/RocketPy-Team/RocketPy/pull/897)
4041

4142
### Changed
4243

docs/user/mrs.rst

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,37 @@ Finally, we can compare the ellipses for the apogees and landing points using th
204204
Note we can pass along parameters used in the usual `ellipses` method of the
205205
`MonteCarlo` class, in this case the `ylim` argument to expand the y-axis limits.
206206

207+
208+
Calculating Confidence Intervals
209+
--------------------------------
210+
211+
Beyond visual comparisons, you may want to calculate statistical confidence intervals
212+
for specific attributes of the flight (e.g., apogee, max velocity) based on the
213+
resampled data. Since the resulting data is loaded into a ``MonteCarlo`` object,
214+
you can use its method to compute these intervals using the bootstrap method.
215+
216+
The following example shows how to calculate the 95% confidence interval for the
217+
mean of the apogee using the ``mrs_results`` object created earlier:
218+
219+
.. jupyter-execute::
220+
221+
# Calculate the 95% Confidence Interval for the mean apogee
222+
# We pass np.mean as the statistic to be evaluated
223+
apogee_ci = mrs_results.calculate_confidence_interval(
224+
attribute="apogee",
225+
statistic=np.mean,
226+
confidence_level=0.95,
227+
n_resamples=10000
228+
)
229+
230+
print(f"95% Confidence Interval for Apogee Mean: {apogee_ci}")
231+
232+
You can use any statistic function that accepts an array of data, such as ``np.median``
233+
or ``np.std``.
234+
235+
236+
237+
207238
Time Comparison
208239
---------------
209240

rocketpy/simulation/monte_carlo.py

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
import numpy as np
2424
import simplekml
25+
from scipy.stats import bootstrap
2526

2627
from rocketpy._encoders import RocketPyEncoder
2728
from rocketpy.plots.monte_carlo_plots import _MonteCarloPlots
@@ -463,6 +464,67 @@ def __run_single_simulation(self):
463464
time_overshoot=self.flight.time_overshoot,
464465
)
465466

467+
def estimate_confidence_interval(
468+
self,
469+
attribute,
470+
statistic=np.mean,
471+
confidence_level=0.95,
472+
n_resamples=1000,
473+
random_state=None,
474+
):
475+
"""
476+
Estimates the confidence interval for a specific attribute of the results
477+
using the bootstrap method.
478+
479+
Parameters
480+
----------
481+
attribute : str
482+
The name of the attribute stored in self.results (e.g., "apogee", "max_velocity").
483+
statistic : callable, optional
484+
A function that computes the statistic of interest (e.g., np.mean, np.std).
485+
Default is np.mean.
486+
confidence_level : float, optional
487+
The confidence level for the interval (between 0 and 1). Default is 0.95.
488+
n_resamples : int, optional
489+
The number of resamples to perform. Default is 1000.
490+
random_state : int or None, optional
491+
Seed for the random number generator to ensure reproducibility. If None (default), the random number generator is not seeded.
492+
493+
Returns
494+
-------
495+
confidence_interval : ConfidenceInterval
496+
An object containing the low and high bounds of the confidence interval.
497+
Access via .low and .high.
498+
"""
499+
if attribute not in self.results:
500+
available = list(self.results.keys())
501+
raise ValueError(
502+
f"Attribute '{attribute}' not found in results. Available attributes: {available}"
503+
)
504+
505+
if not 0 < confidence_level < 1:
506+
raise ValueError(
507+
f"confidence_level must be between 0 and 1, got {confidence_level}"
508+
)
509+
510+
if not isinstance(n_resamples, int) or n_resamples <= 0:
511+
raise ValueError(
512+
f"n_resamples must be a positive integer, got {n_resamples}"
513+
)
514+
515+
data = (np.array(self.results[attribute]),)
516+
517+
res = bootstrap(
518+
data,
519+
statistic,
520+
confidence_level=confidence_level,
521+
n_resamples=n_resamples,
522+
random_state=random_state,
523+
method="percentile",
524+
)
525+
526+
return res.confidence_interval
527+
466528
def __evaluate_flight_inputs(self, sim_idx):
467529
"""Evaluates the inputs of a single flight simulation.
468530

tests/unit/simulation/test_monte_carlo.py

Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
11
import matplotlib as plt
22
import numpy as np
3+
import pytest
4+
5+
from rocketpy.simulation import MonteCarlo
36

47
plt.rcParams.update({"figure.max_open_warning": 0})
58

@@ -60,3 +63,125 @@ def test_stochastic_calisto_create_object_with_static_margin(stochastic_calisto)
6063

6164
assert np.isclose(np.mean(all_margins), 2.2625350013000434, rtol=0.15)
6265
assert np.isclose(np.std(all_margins), 0.1, atol=0.2)
66+
67+
68+
class MockMonteCarlo(MonteCarlo):
69+
"""Create a mock class to test the method without running a real simulation"""
70+
71+
def __init__(self):
72+
# pylint: disable=super-init-not-called
73+
74+
# Simulate pre-calculated results
75+
# Example: a normal distribution centered on 100 for the apogee
76+
self.results = {
77+
"apogee": [98, 102, 100, 99, 101, 100, 97, 103],
78+
"max_velocity": [250, 255, 245, 252, 248],
79+
"single_point": [100],
80+
"empty_attribute": [],
81+
}
82+
83+
84+
def test_estimate_confidence_interval_contains_known_mean():
85+
"""Checks that the confidence interval contains the known mean."""
86+
mc = MockMonteCarlo()
87+
88+
ci = mc.estimate_confidence_interval("apogee", confidence_level=0.95)
89+
90+
assert ci.low < 100 < ci.high
91+
assert ci.low < ci.high
92+
93+
94+
def test_estimate_confidence_interval_supports_custom_statistic():
95+
"""Checks that the statistic can be changed (e.g., standard deviation instead of mean)."""
96+
mc = MockMonteCarlo()
97+
98+
ci_std = mc.estimate_confidence_interval("apogee", statistic=np.std)
99+
100+
assert ci_std.low > 0
101+
assert ci_std.low < ci_std.high
102+
103+
104+
def test_estimate_confidence_interval_raises_value_error_when_attribute_missing():
105+
"""Checks that the code raises an error if the key does not exist."""
106+
mc = MockMonteCarlo()
107+
108+
# Request a variable that does not exist ("altitude" is not in our mock)
109+
with pytest.raises(ValueError) as excinfo:
110+
mc.estimate_confidence_interval("altitude")
111+
112+
assert "not found in results" in str(excinfo.value)
113+
114+
115+
def test_estimate_confidence_interval_increases_width_with_higher_confidence_level():
116+
"""Checks that a higher confidence level yields a wider interval."""
117+
mc = MockMonteCarlo()
118+
119+
ci_90 = mc.estimate_confidence_interval("apogee", confidence_level=0.90)
120+
width_90 = ci_90.high - ci_90.low
121+
122+
ci_99 = mc.estimate_confidence_interval("apogee", confidence_level=0.99)
123+
width_99 = ci_99.high - ci_99.low
124+
125+
# The more confident we want to be (99%), the wider the interval must be
126+
assert width_99 >= width_90
127+
128+
129+
def test_estimate_confidence_interval_raises_value_error_when_confidence_level_out_of_bounds():
130+
"""Checks that validation fails if confidence_level is not strictly between 0 and 1."""
131+
mc = MockMonteCarlo()
132+
133+
# Case 1: Value <= 0
134+
with pytest.raises(ValueError, match="confidence_level must be between 0 and 1"):
135+
mc.estimate_confidence_interval("apogee", confidence_level=0)
136+
137+
with pytest.raises(ValueError, match="confidence_level must be between 0 and 1"):
138+
mc.estimate_confidence_interval("apogee", confidence_level=-0.5)
139+
140+
# Case 2: Value >= 1
141+
with pytest.raises(ValueError, match="confidence_level must be between 0 and 1"):
142+
mc.estimate_confidence_interval("apogee", confidence_level=1)
143+
144+
with pytest.raises(ValueError, match="confidence_level must be between 0 and 1"):
145+
mc.estimate_confidence_interval("apogee", confidence_level=1.5)
146+
147+
148+
def test_estimate_confidence_interval_raises_value_error_when_n_resamples_invalid():
149+
"""Checks that validation fails if n_resamples is not a positive integer."""
150+
mc = MockMonteCarlo()
151+
152+
# Case 1: Not an integer (e.g. float)
153+
with pytest.raises(ValueError, match="n_resamples must be a positive integer"):
154+
mc.estimate_confidence_interval("apogee", n_resamples=1000.5)
155+
156+
# Case 2: Zero or Negative
157+
with pytest.raises(ValueError, match="n_resamples must be a positive integer"):
158+
mc.estimate_confidence_interval("apogee", n_resamples=0)
159+
160+
with pytest.raises(ValueError, match="n_resamples must be a positive integer"):
161+
mc.estimate_confidence_interval("apogee", n_resamples=-100)
162+
163+
164+
def test_estimate_confidence_interval_raises_value_error_on_empty_data_list():
165+
"""Checks behavior when the attribute exists but contains no data (empty list)."""
166+
mc = MockMonteCarlo()
167+
168+
with pytest.raises(ValueError):
169+
mc.estimate_confidence_interval("empty_attribute")
170+
171+
172+
def test_estimate_confidence_interval_handles_single_data_point():
173+
"""Checks behavior with only one data point. The CI should be [val, val]."""
174+
mc = MockMonteCarlo()
175+
176+
with pytest.raises(ValueError): # two or more value
177+
mc.estimate_confidence_interval("single_point", n_resamples=50)
178+
179+
180+
def test_estimate_confidence_interval_raises_type_error_for_invalid_statistic():
181+
"""Checks that passing a non-callable object (like a string/int) as statistic raises TypeError."""
182+
mc = MockMonteCarlo()
183+
with pytest.raises(TypeError):
184+
mc.estimate_confidence_interval("apogee", statistic=1)
185+
186+
with pytest.raises(TypeError):
187+
mc.estimate_confidence_interval("apogee", statistic="not_a_function")

0 commit comments

Comments
 (0)