diff --git a/README.md b/README.md deleted file mode 100644 index f145828..0000000 --- a/README.md +++ /dev/null @@ -1 +0,0 @@ -# [sambo-optimization.github.io](https://sambo-optimization.github.io) diff --git a/ads.txt b/ads.txt new file mode 100644 index 0000000..d653226 --- /dev/null +++ b/ads.txt @@ -0,0 +1 @@ +google.com, pub-2900001379782823, DIRECT, f08c47fec0942fa0 \ No newline at end of file diff --git a/benchmark.txt b/benchmark.txt new file mode 100644 index 0000000..719a2f6 --- /dev/null +++ b/benchmark.txt @@ -0,0 +1,279 @@ +numpy 1.26.4 +scipy 1.14.1 +scikit-learn 1.6.0 +scikit-optimize 0.10.2 +hyperopt 0.2.7 +nevergrad 1.0.8 +optuna 4.2.0 +sambo 1.25.1 + +Test function Method N Evals Error % Duration +———————————————————————————————————————————————————————————————————————————————— +6-hump-camelback/2 shgo 10 0 0.00 +6-hump-camelback/2 SLSQP 24 0 0.00 +6-hump-camelback/2 COBYQA 34 0 0.10 +6-hump-camelback/2 COBYLA 36 0 0.00 +6-hump-camelback/2 CG † 39 0 0.00 +6-hump-camelback/2 trust-constr 45 0 0.13 +6-hump-camelback/2 Nelder-Mead † 71 0 0.00 +6-hump-camelback/2 Powell † 74 0 0.00 +6-hump-camelback/2 TNC † 75 0 0.01 +6-hump-camelback/2 sambo.minimize(smbo) 87 0 15.31 +6-hump-camelback/2 sambo.minimize(shgo) 150 0 0.03 +6-hump-camelback/2 scikit-optimize 160 0 18.91 +6-hump-camelback/2 Optuna 245 0 2.24 +6-hump-camelback/2 differential_evolution 318 0 0.03 +6-hump-camelback/2 sambo.minimize(sceua) 482 0 0.06 +6-hump-camelback/2 nevergrad 970 0 4.80 +6-hump-camelback/2 basinhopping 1437 0 0.22 +6-hump-camelback/2 hyperopt 1543 0 17.28 +6-hump-camelback/2 direct † 2011 0 0.01 +6-hump-camelback/2 dual_annealing † 4046 0 0.25 +bird/2 COBYQA 34 0 0.14 +bird/2 SLSQP 35 0 0.01 +bird/2 COBYLA 40 0 0.00 +bird/2 Powell † 40 0 0.00 +bird/2 CG † 54 0 0.01 +bird/2 Nelder-Mead † 67 0 0.00 +bird/2 sambo.minimize(smbo) 93 0 31.59 +bird/2 TNC † 129 0 0.01 +bird/2 trust-constr 150 0 0.14 +bird/2 sambo.minimize(shgo) 192 0 0.06 +bird/2 sambo.minimize(sceua) 270 0 0.05 +bird/2 scikit-optimize 289 0 63.29 +bird/2 Optuna 353 0 3.88 +bird/2 differential_evolution 393 0 0.15 +bird/2 hyperopt 1097 0 9.08 +bird/2 nevergrad 1208 0 5.43 +bird/2 direct † 2007 0 0.03 +bird/2 dual_annealing † 4037 0 0.26 +bird/2 shgo 38* 53 0.01 +bird/2 basinhopping 66* 100 0.01 +branin-hoo/2 SLSQP 23 0 0.00 +branin-hoo/2 COBYQA 40 0 0.10 +branin-hoo/2 COBYLA 46 0 0.00 +branin-hoo/2 shgo 55 0 0.01 +branin-hoo/2 trust-constr 63 0 0.14 +branin-hoo/2 CG † 66 0 0.01 +branin-hoo/2 Nelder-Mead † 84 0 0.00 +branin-hoo/2 sambo.minimize(smbo) 87 0 10.45 +branin-hoo/2 Powell † 95 0 0.00 +branin-hoo/2 TNC † 138 0 0.01 +branin-hoo/2 sambo.minimize(shgo) 144 0 0.02 +branin-hoo/2 Optuna 286 0 2.58 +branin-hoo/2 scikit-optimize 304 0 57.91 +branin-hoo/2 sambo.minimize(sceua) 476 0 0.06 +branin-hoo/2 basinhopping 495 0 0.06 +branin-hoo/2 differential_evolution 555 0 0.05 +branin-hoo/2 nevergrad 1045 0 4.08 +branin-hoo/2 hyperopt 1249 0 9.62 +branin-hoo/2 direct † 2009 0 0.02 +branin-hoo/2 dual_annealing † 4031 0 0.23 +eggholder/2 sambo.minimize(shgo) 162 0 0.03 +eggholder/2 direct † 2011 0 0.02 +eggholder/2 dual_annealing † 4076 0 0.26 +eggholder/2 sambo.minimize(smbo) 102 1 48.04 +eggholder/2 scikit-optimize 343 1 67.57 +eggholder/2 sambo.minimize(sceua) 905 1 0.12 +eggholder/2 differential_evolution 741* 3 0.06 +eggholder/2 hyperopt 948* 4 6.24 +eggholder/2 Optuna 181* 9 1.73 +eggholder/2 TNC † 117* 12 0.01 +eggholder/2 nevergrad 538* 13 2.47 +eggholder/2 shgo 94* 20 0.01 +eggholder/2 Nelder-Mead † 108* 35 0.00 +eggholder/2 COBYQA 53* 37 0.13 +eggholder/2 COBYLA 129* 37 0.01 +eggholder/2 trust-constr 141* 37 0.18 +eggholder/2 CG † 57* 38 0.01 +eggholder/2 SLSQP 47* 43 0.00 +eggholder/2 basinhopping 1269* 44 0.16 +eggholder/2 Powell † 135* 48 0.01 +gomez-levy/2 COBYQA 39 0 0.15 +gomez-levy/2 COBYLA 45 0 0.00 +gomez-levy/2 sambo.minimize(smbo) 88 0 23.72 +gomez-levy/2 scikit-optimize 165 0 24.47 +gomez-levy/2 shgo 298 0 0.04 +gomez-levy/2 sambo.minimize(shgo) 324 0 0.07 +gomez-levy/2 differential_evolution 423 0 0.10 +gomez-levy/2 sambo.minimize(sceua) 550 0 0.09 +gomez-levy/2 nevergrad 972 0 4.02 +gomez-levy/2 SLSQP 1104 0 0.12 +gomez-levy/2 direct † 2015 0 0.02 +gomez-levy/2 trust-constr 3231 0 1.91 +gomez-levy/2 dual_annealing † 4061 0 0.24 +gomez-levy/2 Nelder-Mead † 133 1 0.01 +gomez-levy/2 Optuna 264 1 2.38 +gomez-levy/2 Powell † 78 2 0.00 +gomez-levy/2 TNC † 174 2 0.01 +gomez-levy/2 hyperopt 500 2 2.99 +gomez-levy/2 basinhopping 802* 3 0.08 +gomez-levy/2 CG † 32* 10 0.00 +griewank/2 shgo 39 0 0.01 +griewank/2 sambo.minimize(shgo) 103 0 0.02 +griewank/2 Powell † 118 0 0.01 +griewank/2 sambo.minimize(sceua) 263 0 0.04 +griewank/2 scikit-optimize 333 0 65.08 +griewank/2 Optuna 343 0 3.05 +griewank/2 direct † 461 0 0.01 +griewank/2 nevergrad 753 0 3.10 +griewank/2 basinhopping 1065 0 0.16 +griewank/2 hyperopt 1217 0 9.17 +griewank/2 differential_evolution 1392 0 0.15 +griewank/2 dual_annealing † 4109 0 0.35 +griewank/2 sambo.minimize(smbo) 86 1 7.72 +griewank/2 Nelder-Mead † 102 1 0.01 +griewank/2 SLSQP 18* 10 0.00 +griewank/2 CG † 24* 10 0.00 +griewank/2 COBYQA 33* 10 0.09 +griewank/2 trust-constr 33* 10 0.10 +griewank/2 COBYLA 35* 10 0.00 +griewank/2 TNC † 105* 10 0.01 +hartman/6 SLSQP 96 0 0.01 +hartman/6 COBYLA 118 0 0.01 +hartman/6 trust-constr 147 0 0.14 +hartman/6 Powell † 161 0 0.01 +hartman/6 shgo 168 0 0.02 +hartman/6 CG † 252 0 0.02 +hartman/6 Nelder-Mead † 422 0 0.03 +hartman/6 sambo.minimize(shgo) 513 0 0.10 +hartman/6 TNC † 616 0 0.04 +hartman/6 sambo.minimize(smbo) 634 0 182.30 +hartman/6 direct † 733 0 0.02 +hartman/6 nevergrad 1256 0 17.88 +hartman/6 differential_evolution 1787 0 0.18 +hartman/6 dual_annealing † 12120 0 0.92 +hartman/6 basinhopping 12376 0 1.02 +hartman/6 Optuna 352 1 10.11 +hartman/6 sambo.minimize(sceua) 593 1 0.09 +hartman/6 COBYQA 222* 4 0.58 +hartman/6 scikit-optimize 484* 5 68.87 +hartman/6 hyperopt 789* 5 11.41 +rastrigin/2 sambo.minimize(shgo) 21 0 0.02 +rastrigin/2 shgo 26 0 0.01 +rastrigin/2 SLSQP 42 0 0.01 +rastrigin/2 sambo.minimize(smbo) 86 0 17.41 +rastrigin/2 direct † 313 0 0.01 +rastrigin/2 sambo.minimize(sceua) 491 0 0.34 +rastrigin/2 basinhopping 828 0 0.11 +rastrigin/2 nevergrad 1119 0 4.55 +rastrigin/2 differential_evolution 1972 0 0.40 +rastrigin/2 dual_annealing † 4088 0 0.27 +rastrigin/2 COBYQA 37 2 0.15 +rastrigin/2 COBYLA 40 2 0.00 +rastrigin/2 Optuna 269* 3 2.44 +rastrigin/2 scikit-optimize 272* 3 385.21 +rastrigin/2 trust-constr 1161* 5 0.61 +rastrigin/2 hyperopt 500* 6 2.28 +rastrigin/2 CG † 3* 100 0.00 +rastrigin/2 TNC † 3* 100 0.00 +rastrigin/2 Nelder-Mead † 47* 100 0.00 +rastrigin/2 Powell † 51* 100 0.00 +rosenbrock/10 direct † 413 0 0.01 +rosenbrock/10 SLSQP 637 0 0.08 +rosenbrock/10 sambo.minimize(shgo) 664 0 2.08 +rosenbrock/10 shgo 708 0 1.88 +rosenbrock/10 COBYQA 914 0 5.48 +rosenbrock/10 COBYLA 1000 0 0.08 +rosenbrock/10 TNC † 1100 0 0.07 +rosenbrock/10 sambo.minimize(sceua) 1382 0 0.31 +rosenbrock/10 trust-constr 1485 0 0.53 +rosenbrock/10 Nelder-Mead † 2000 0 0.14 +rosenbrock/10 Powell † 2758 0 0.16 +rosenbrock/10 nevergrad 3000 0 8.89 +rosenbrock/10 CG † 4272 0 0.31 +rosenbrock/10 basinhopping 20901 0 1.62 +rosenbrock/10 dual_annealing † 24489 0 1.80 +rosenbrock/10 differential_evolution 150652 0 21.54 +rosenbrock/10 Optuna 1260 1 96.86 +rosenbrock/10 sambo.minimize(smbo) 1334 1 189.21 +rosenbrock/10 hyperopt 500* 4 9.64 +rosenbrock/10 scikit-optimize 259* 9 0.93 +rosenbrock/2 sambo.minimize(smbo) 91 0 27.02 +rosenbrock/2 COBYQA 100 0 0.40 +rosenbrock/2 sambo.minimize(shgo) 149 0 0.05 +rosenbrock/2 Optuna 152 0 1.32 +rosenbrock/2 shgo 176 0 0.05 +rosenbrock/2 Powell † 224 0 0.01 +rosenbrock/2 scikit-optimize 241 0 47.67 +rosenbrock/2 Nelder-Mead † 282 0 0.01 +rosenbrock/2 nevergrad 351 0 2.39 +rosenbrock/2 sambo.minimize(sceua) 386 0 0.07 +rosenbrock/2 COBYLA 1000 0 0.07 +rosenbrock/2 SLSQP 1124 0 0.15 +rosenbrock/2 hyperopt 1317 0 10.19 +rosenbrock/2 direct † 2011 0 0.07 +rosenbrock/2 trust-constr 2988 0 1.68 +rosenbrock/2 differential_evolution 3504 0 3.06 +rosenbrock/2 dual_annealing † 4283 0 0.50 +rosenbrock/2 TNC † 93 1 0.01 +rosenbrock/2 basinhopping 534 1 0.08 +rosenbrock/2 CG † 29 2 0.00 +schwefel/2 sambo.minimize(smbo) 93 0 20.70 +schwefel/2 sambo.minimize(shgo) 135 0 0.03 +schwefel/2 scikit-optimize 478 0 103.15 +schwefel/2 direct † 665 0 0.02 +schwefel/2 sambo.minimize(sceua) 702 0 0.10 +schwefel/2 hyperopt 1331 0 10.93 +schwefel/2 dual_annealing † 4046 0 0.28 +schwefel/2 differential_evolution 4719 0 0.46 +schwefel/2 Optuna 394* 7 3.84 +schwefel/2 nevergrad 486* 16 2.17 +schwefel/2 shgo 34* 21 0.00 +schwefel/2 Powell † 54* 25 0.00 +schwefel/2 SLSQP 24* 34 0.00 +schwefel/2 trust-constr 24* 34 0.08 +schwefel/2 COBYLA 44* 34 0.00 +schwefel/2 COBYQA 44* 34 0.11 +schwefel/2 CG † 69* 34 0.01 +schwefel/2 Nelder-Mead † 82* 34 0.00 +schwefel/2 TNC † 153* 34 0.01 +schwefel/2 basinhopping 768* 50 0.10 +simionescu/2 COBYQA 52 0 0.22 +simionescu/2 sambo.minimize(smbo) 91 0 34.08 +simionescu/2 sambo.minimize(sceua) 107 0 0.03 +simionescu/2 scikit-optimize 151 0 19.10 +simionescu/2 Nelder-Mead † 218 0 0.01 +simionescu/2 differential_evolution 981 0 0.47 +simionescu/2 hyperopt 987 0 6.54 +simionescu/2 direct † 2013 0 0.02 +simionescu/2 dual_annealing † 4163 0 0.24 +simionescu/2 Optuna 218 1 2.00 +simionescu/2 trust-constr 3063 1 2.03 +simionescu/2 sambo.minimize(shgo) 73* 11 0.02 +simionescu/2 Powell † 91* 11 0.00 +simionescu/2 TNC † 96* 32 0.01 +simionescu/2 CG † 65* 34 0.01 +simionescu/2 basinhopping 547* 50 0.06 +simionescu/2 nevergrad 783* 58 3.16 +simionescu/2 SLSQP 21* 59 0.01 +simionescu/2 shgo 1249* 59 0.14 +simionescu/2 COBYLA 47* 100 0.00 + + +Method Correct N Evals Error % Duration +———————————————————————————————————————————————————————————— +sambo.minimize(smbo) 100% 239 0 25.37 +sambo.minimize(sceua) 100% 551 0 0.08 +direct † 100% 1388 0 0.02 +dual_annealing † 100% 6462 0 0.27 +sambo.minimize(shgo) 92% 219 1 0.03 +differential_evolution 92% 13953 0 0.16 +scikit-optimize 75% 290 2 60.60 +Nelder-Mead † 75% 301 14 0.01 +Optuna 75% 360 2 2.51 +nevergrad 75% 1040 7 4.05 +COBYQA 67% 134 7 0.15 +COBYLA 67% 215 15 0.00 +shgo 67% 241 13 0.01 +SLSQP 67% 266 12 0.01 +Powell † 67% 323 16 0.00 +hyperopt 67% 998 2 9.39 +trust-constr 67% 1044 7 0.16 +TNC † 58% 233 16 0.01 +basinhopping 58% 3424 21 0.11 +CG † 50% 414 19 0.01 + + +* Did not finish / unexpected result. +† Non-constrained method. diff --git a/contourf.jpg b/contourf.jpg new file mode 100644 index 0000000..8d78e24 Binary files /dev/null and b/contourf.jpg differ diff --git a/convergence.svg b/convergence.svg new file mode 100644 index 0000000..7fd3fab --- /dev/null +++ b/convergence.svg @@ -0,0 +1,646 @@ + + + + + + + + 2025-07-10T03:04:31.695081 + image/svg+xml + + + Matplotlib v3.10.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + 6 + + + + + + + + + + + + + 12 + + + + + + + + + + + + + 18 + + + + + + + + + + + + + 24 + + + + + + + + + + + + + 30 + + + + + + + + + + + + + 36 + + + + + + + + + + + + + 42 + + + + + + + + + + + + + 48 + + + + + + + N + u + m + b + e + r +   + o + f +   + f + u + n + c + t + i + o + n +   + e + v + a + l + u + a + t + i + o + n + s +   + n + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + 1 + 0 + 0 + + + + + + + + + + + + + + + + + + 1 + 0 + 1 + + + + + + + + + + + + + + + + + + 1 + 0 + 2 + + + + + + + + + m + i + n + ( + ) +   + a + f + t + e + r +   +   + e + v + a + l + u + a + t + i + o + n + s + f + x + n + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Convergence + + + + + + + + + + + + + method='shgo' + + + + + + + + + method='sceua' + + + + + + + + + method='smbo' + + + + + + True minimum + + + + + Created with SAMBO, https://sambo-optimization.github.io + + + + + + + + diff --git a/convergence2.svg b/convergence2.svg new file mode 100644 index 0000000..5fa48d0 --- /dev/null +++ b/convergence2.svg @@ -0,0 +1,705 @@ + + + + + + + + 2025-07-10T03:04:47.450093 + image/svg+xml + + + Matplotlib v3.10.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + 6 + + + + + + + + + + + + + 12 + + + + + + + + + + + + + 18 + + + + + + + + + + + + + 24 + + + + + + + + + + + + + 30 + + + + + + + + + + + + + 36 + + + + + + + + + + + + + 42 + + + + + + + + + + + + + 48 + + + + + + + N + u + m + b + e + r +   + o + f +   + f + u + n + c + t + i + o + n +   + e + v + a + l + u + a + t + i + o + n + s +   + n + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + 10 + + + + + + + + + + + + + 20 + + + + + + + + + + + + + 30 + + + + + + + + + + + + + 40 + + + + + + + + + + + + + 50 + + + + + + + + + + + + + 60 + + + + + + + + + + + + + 70 + + + + + + + + + + + + + 80 + + + + + + + m + i + n + ( + ) +   + a + f + t + e + r +   +   + e + v + a + l + u + a + t + i + o + n + s + f + x + n + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Convergence + + + + + + + + + + + + + method='smbo', estimator='gp' + + + + + + + + + method='smbo', estimator='et' + + + + + + + + + method='smbo', estimator='gb' + + + + + + True minimum + + + + + Created with SAMBO, https://sambo-optimization.github.io + + + + + + + + diff --git a/doc/doc-search.html b/doc/doc-search.html new file mode 100644 index 0000000..016be18 --- /dev/null +++ b/doc/doc-search.html @@ -0,0 +1,199 @@ + + + + + + Codestin Search App + + + + + + + +
+

+ +
+ + + + + + \ No newline at end of file diff --git a/doc/index.js b/doc/index.js new file mode 100644 index 0000000..7e2f3f9 --- /dev/null +++ b/doc/index.js @@ -0,0 +1,4 @@ +let [INDEX, DOCS] = [{"version":"2.3.9","fields":["name","ref","doc"],"fieldVectors":[["name/0",[0,12.809]],["ref/0",[0,6.405]],["doc/0",[0,1.642,null,1.252,null,1.481,null,1.687,null,0.77,null,3.065,null,3.405,null,2.019,null,2.523,null,1.662,null,2.019,null,0.43,null,3.065,null,2.019,null,1.662,null,0.589,null,0.794,null,0.596,null,0.905,null,2.019,null,2.019,null,2.019,null,2.019,null,1.111,null,2.019,null,2.019,null,0.538,null,2.019,null,1.111,null,1.662,null,2.019,null,0.995,null,1.662,null,1.662,null,2.019,null,2.019,null,2.019,null,2.019,null,2.019,null,2.019,null,1.427,null,2.019,null,3.065,null,3.065,null,2.019,null,3.065,null,1.662,null,1.111,null,1.111,null,2.019,null,1.662,null,1.662,null,2.564,null,2.619,null,1.687,null,2.019,null,0.807,null,2.523,null,1.51,null,2.523,null,2.523,null,1.427,null,1.662,null,1.662,null,1.662,null,2.019,null,1.662,null,2.523,null,1.662,null,2.019,null,2.523,null,1.662,null,0.434,null,2.019,null,1.481,null,2.019,null,2.019,null,2.019,null,2.019,null,1.9,null,2.619,null,0.484,null,2.019,null,3.065,null,2.523,null,1.427,null,2.019,null,3.065,null,2.523,null,2.523,null,2.523,null,1.662,null,1.662,null,1.252,null,2.759,null,1.662,null,1.662,null,1.662,null,0.995,null,2.019,null,1.662,null,1.662,null,2.019,null,3.065,null,2.019,null,1.662,null,1.662,null,1.111,null,1.662,null,1.662,null,1.662,null,2.019,null,2.019,null,2.019,null,1.427,null,2.019,null,2.019,null,3.065,null,2.019,null,1.252,null,2.019,null,2.019,null,1.111,null,2.019,null,2.019,null,2.019,null,2.019,null,0.995,null,2.019,null,2.019,null,2.019,null,2.019,null,2.019,null,1.662]],["name/1",[122,15.911]],["ref/1",[40,10.215]],["doc/1",[0,0.632,2,0.769,null,0.785,null,0.605,6,1.583,8,1.174,null,0.661,11,0.465,14,0.661,null,0.614,null,0.852,null,0.788,null,0.421,23,0.442,26,0.787,28,1.058,null,0.661,46,0.661,52,0.498,54,0.785,56,0.321,72,0.574,74,0.769,79,0.884,81,0.193,84,1.583,null,0.567,88,1.174,null,1.917,null,1.583,null,1.583,null,1.583,null,0.884,null,2.284,null,1.174,null,1.174,null,1.174,null,0.947,100,1.583,107,1.625,119,0.498,122,1.625,127,2.494,134,0.884,null,1.008,null,1.359,null,0.884,null,1.066,null,1.613,null,0.567,null,0.567,null,1.174,null,1.155,null,1.174,null,0.964,null,0.661,null,0.661,null,0.635,null,0.884,null,0.884,null,1.174,null,1.192,null,0.661,null,0.661,null,0.498,null,0.661,null,0.884,null,0.785,null,1.917,null,1.76,null,1.653,null,0.442,null,1.917,null,1.917,null,0.661,null,0.785,null,1.877,null,1.174,null,3.409,null,1.008,null,1.359,null,0.803,null,0.661,null,1.646,null,1.174,null,1.192,null,0.803,null,1.426,null,0.498,null,2.196,null,0.661,null,1.008,null,0.803,null,0.803,null,0.498,null,1.583,null,0.803,null,1.008,null,0.803,null,0.803,null,0.567,null,0.803,null,1.426,null,0.803,null,0.661,null,0.803,null,2.431,null,0.803,null,0.498,null,1.923,null,1.426,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.661,null,0.803,null,0.803,null,0.803,null,1.426,null,0.567,null,0.803,null,0.803,null,1.174,null,3.246,null,0.567,null,0.803,null,1.174,null,0.803,null,0.803,null,0.803,null,1.583,null,1.058,null,0.661,null,0.661,null,1.174,null,0.803,null,0.661,null,1.426,null,1.426,null,0.567,null,0.803,null,0.567,null,0.803,null,0.498,null,1.923,null,0.803,null,0.803,null,0.702,null,0.803,null,1.426,null,1.426,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,1.426,null,0.803,null,1.182,null,0.661,null,1.426,null,1.923,null,0.661,null,0.661,null,0.803,null,0.567,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.567,null,0.661,null,0.769,null,0.785,null,0.661,null,1.359,null,0.567,null,0.661,null,0.661,null,0.661,null,0.396,null,0.661,null,0.661,null,0.661,null,0.498,null,0.567,null,1.583,null,0.29,null,0.661,null,0.661,null,0.661,null,0.498,null,0.498,null,0.567,null,0.661,null,0.884,null,0.661,null,0.661,null,0.661,null,0.661,null,0.661,null,0.567,null,0.567,null,0.661,null,0.442,null,0.661,null,0.567,null,0.567,null,0.661,null,0.661,null,0.661,null,0.803,null,1.174,null,0.29,null,0.803,null,0.803,null,0.661,null,0.661,null,0.661,null,0.803,null,0.803,null,0.568,null,0.803,null,0.803,null,1.583,null,0.661,null,0.661,null,0.661,null,0.567,null,0.661,null,0.661,null,0.661,null,0.661,null,0.803,null,0.661,null,0.803,null,0.498,null,0.567,null,0.803,null,0.803,null,1.426,null,0.803,null,0.803,null,1.174,null,0.803,null,2.667,null,2.953,null,2.329,null,0.803,null,0.803,null,1.426,null,0.803,null,0.803,null,1.426,null,1.426,null,0.803,null,1.426,null,0.803,null,1.426,null,1.426,null,0.803,null,1.426,null,1.008,null,0.803,null,0.661,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,1.426,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.661,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.661,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803,null,0.803]],["name/2",[4,4.336]],["ref/2",[325,11.898]],["doc/2",[0,0.667,null,0.933,null,1.408,4,0.721,11,0.462,15,0.794,null,0.87,null,1.286,null,0.719,23,0.829,26,0.649,28,0.829,31,0.742,47,1.34,null,1.34,50,1.24,52,1.51,null,1.721,null,1.939,56,0.974,null,1.24,null,0.742,null,1.24,null,1.24,72,0.659,74,0.974,81,0.992,93,1.51,98,1.2,122,0.829,127,1.51,134,0.933,null,1.064,null,2.167,null,1.51,null,1.226,null,2.004,null,1.064,null,1.064,null,2.005,null,1.108,null,2.005,null,1.273,null,1.24,null,1.24,null,0.324,null,1.51,null,1.51,null,2.005,null,1.9,null,1.24,null,1.24,null,0.933,null,1.24,null,0.933,null,0.829,null,2.899,null,1.939,null,0.933,null,0.829,null,1.24,null,1.24,null,1.24,null,1.34,null,0.829,176,0.933,179,0.933,186,1.24,216,1.064,223,2.524,null,1.34,null,1.24,null,1.24,234,1.064,236,0.933,240,0.742,254,1.714,258,1.24,278,1.24,null,1.741,null,2.128,null,2.005,null,2.167,284,1.24,null,1.24,null,1.24,null,0.742,null,1.24,null,1.24,null,1.24,null,0.933,null,1.064,null,2.524,null,0.544,null,1.24,null,1.24,null,1.24,null,0.933,null,0.933,null,1.064,null,1.24,null,1.51,null,1.24,null,1.24,null,1.24,null,1.24,null,1.24,null,1.721,null,1.064,null,1.24,null,1.34,null,2.524,null,1.064,null,1.064,317,1.24,319,1.24,null,0.544,323,1.24,328,0.445,331,1.24,335,1.064,350,1.24,369,1.064,371,3.732,428,1.24,null,1.506,null,1.51,null,1.506,null,1.506,null,1.064,null,1.24,null,1.721,null,0.829,null,1.24,null,1.24,null,0.742,null,1.34,null,1.506,null,1.506,null,2.524,null,1.24,null,2.005,null,2.005,null,2.435,null,1.506,null,1.506,null,0.933,null,1.506,null,2.435,null,1.506,null,1.24,null,1.506,null,3.066,null,1.064,null,1.506]],["name/3",[459,23.795]],["ref/3",[460,14.452]],["doc/3",[11,0.458,16,0.694,28,1.989,56,1.445,61,2.554,72,0.777,74,1.445,80,2.554,null,0.866,160,1.989,185,2.24,191,2.554,212,2.554,436,2.536,439,1.78,null,1.989,443,2.974,445,2.974,null,2.974,461,2.974,null,2.974,null,1.78,null,2.974,null,3.613,null,2.974,null,3.613,null,2.974,null,2.974,null,3.613,null,3.613,null,3.613,null,2.554,null,3.257,null,2.974,null,3.613,null,3.613,null,3.613,null,3.613,null,3.613,null,3.613,null,3.613,null,3.613]],["name/4",[47,15.911]],["ref/4",[484,14.452]],["doc/4",[1,1.6,null,1.032,null,1.421,11,0.45,15,0.496,null,0.821,null,0.762,null,0.762,26,0.688,31,2.294,54,2.021,72,0.919,81,1.116,94,1.421,101,2.125,110,2.125,114,1.824,127,1.272,136,1.824,null,1.6,null,1.032,null,1.032,141,3.021,145,0.933,148,0.79,152,1.6,158,1.421,160,2.021,174,1.824,182,1.824,185,1.6,219,3.023,254,2.063,277,1.824,279,1.032,298,1.6,300,1.824,302,2.649,328,0.762,436,2.895,438,2.125,440,1.421,459,3.023,461,3.518,null,2.125,464,2.125,468,3.023,null,3.023,473,2.595,null,2.595,null,3.023,485,3.291,null,1.824,null,1.421,null,3.672,null,2.125,null,2.581,null,2.581,null,2.581,null,2.581,null,2.581,null,2.581,null,2.125,null,2.581,null,2.581,null,2.581,null,2.125,null,2.581,null,2.125,null,2.581,null,2.125,null,2.125,null,2.581,null,2.581,null,2.581,null,2.581,null,2.581,null,2.581]],["name/5",[512,28.904]],["ref/5",[513,14.452]],["doc/5",[11,0.356,15,0.867,56,1.806,81,1.083,148,0.971,167,2.486,199,2.799,320,1.632,369,3.192,457,3.192,463,2.225,514,4.515]],["name/6",[515,28.904]],["ref/6",[516,14.452]],["doc/6",[11,0.351,93,2.764,195,3.67,199,2.764,240,2.197,320,1.611,457,3.151,463,2.197,517,4.458,null,4.458,null,4.458,null,4.458,null,4.458,null,4.458]],["name/7",[48,15.911]],["ref/7",[523,14.452]],["doc/7",[4,0.74,11,0.467,15,0.881,null,0.947,18,0.871,23,1.623,31,1.452,47,1.623,null,1.623,72,0.634,74,1.179,98,2.544,108,2.427,138,1.611,null,1.179,143,1.658,148,0.987,155,1.827,171,2.084,175,2.427,229,2.427,234,2.084,291,1.827,null,2.848,320,1.065,328,0.871,407,2.427,434,2.427,436,2.715,null,2.427,450,1.827,485,2.848,null,2.084,489,2.427,500,3.316,524,2.948,null,2.948,null,2.948,null,2.948,null,2.948,null,2.948,null,2.948,null,2.948,null,3.316,null,2.948,null,2.948,null,2.948,null,2.948,null,2.948,null,2.948,null,2.948,null,2.948,null,2.948,null,2.084,null,2.948,null,2.948]],["name/8",[299,17.918]],["ref/8",[545,14.452]],["doc/8",[1,1.618,4,0.792,11,0.443,15,0.501,null,0.711,null,1.459,null,1.27,26,1.146,33,2.148,47,2.037,null,2.037,58,1.823,72,0.561,74,1.871,81,1.122,98,1.286,119,1.618,139,1.48,143,0.943,145,1.337,148,0.796,152,2.294,166,1.437,182,3.307,212,1.845,232,2.616,236,2.294,254,1.64,null,2.148,279,1.48,null,2.824,282,1.845,294,1.337,299,1.618,302,1.618,311,1.437,328,0.771,433,2.616,436,2.037,439,1.286,450,2.294,463,1.286,474,1.845,485,2.616,487,2.037,502,2.148,546,2.61,null,2.61,null,2.61,null,2.148,null,2.61,null,2.61,null,2.61,null,2.61,null,2.61,null,2.61,null,2.61,null,2.61,null,2.61,null,2.61,null,2.61,null,2.61,null,2.61,null,2.148,null,2.61,null,2.148,null,2.61,null,2.61,null,2.61,null,2.61]],["name/9",[570,28.904]],["ref/9",[571,14.452]],["doc/9",[3,1.892,null,0.516,11,0.453,15,0.857,null,0.66,null,1.318,null,1.015,31,2.678,58,1.693,72,0.739,98,1.693,109,2.829,127,1.693,137,2.766,143,1.612,145,1.612,148,0.96,254,1.523,279,1.374,287,1.693,320,1.612,328,1.015,439,2.441,454,2.829,504,2.829,null,2.829,572,4.955,null,4.462,null,5.244,null,2.829,null,3.437,null,2.13,null,3.437,null,3.437,null,3.437]],["name/10",[294,10.445]],["ref/10",[581,14.452]],["doc/10",[2,1.783,4,0.669,11,0.453,26,1.188,157,2.764,343,2.764,null,3.151,582,4.458,null,4.458,null,4.458,null,3.67]],["name/11",[339,23.795]],["ref/11",[586,14.452]],["doc/11",[4,0.714,338,3.918,587,4.759,null,4.759]],["name/12",[336,23.795]],["ref/12",[589,14.452]],["doc/12",[4,0.709,180,3.892,337,3.892,590,4.727,null,4.727]],["name/13",[143,10.445]],["ref/13",[592,14.452]],["doc/13",[4,0.714,11,0.375,31,2.345,593,4.759]],["name/14",[135,20.431]],["ref/14",[594,14.452]],["doc/14",[11,0.365,15,0.89,null,0.89,143,1.675,148,0.997,532,3.815,595,4.634,null,2.873]],["name/15",[341,23.795]],["ref/15",[597,14.452]],["doc/15",[15,0.914,null,0.914,null,1.405,null,1.405]],["name/16",[598,28.904]],["ref/16",[599,14.452]],["doc/16",[4,0.709,17,1.396,79,2.931,232,3.342,280,2.602]],["name/17",[343,17.918]],["ref/17",[600,14.452]],["doc/17",[11,0.368,72,1.003,162,2.568,277,3.297,601,3.84,null,4.665,null,4.665]],["name/18",[344,20.431]],["ref/18",[604,14.452]],["doc/18",[11,0.37,15,0.902,null,0.902,148,1.01,320,1.697,343,2.911]],["name/19",[2,11.558]],["ref/19",[605,14.452]],["doc/19",[4,0.719,81,1.149,486,3.387]],["name/20",[62,23.795]],["ref/20",[606,14.452]],["doc/20",[0,1.084,2,0.978,null,1.347,null,0.723,11,0.458,17,0.722,26,0.652,40,1.729,51,2.014,null,2.571,null,2.498,56,1.413,58,1.205,63,2.014,null,2.014,66,2.909,null,2.909,null,2.014,70,2.014,null,2.014,null,1.161,74,1.659,79,1.517,null,1.729,null,0.847,85,1.729,94,1.945,105,2.909,null,2.014,119,1.517,127,1.205,133,2.014,139,1.817,148,0.526,157,1.517,null,1.347,168,2.014,174,2.932,236,1.517,279,1.413,null,1.347,294,0.884,298,1.517,308,1.729,null,1.729,311,1.347,313,1.729,null,1.729,null,2.014,null,2.909,324,2.014,430,1.517,433,1.729,435,1.729,440,1.347,466,2.014,549,2.014,577,1.517,585,2.014,601,2.014,607,1.729,null,2.447,null,2.447,null,2.447,null,3.534,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,1.729,null,2.447,null,2.447,null,2.447,null,1.729,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447,null,2.447]],["name/21",[644,14.24]],["ref/21",[645,14.452]],["doc/21",[0,1.626,11,0.474,16,0.705,18,1.083,26,0.978,28,2.813,114,2.594,122,2.02,143,1.326,160,2.02,162,2.02,216,3.799,227,3.021,240,1.808,283,2.594,328,1.083,332,3.021,null,3.021,null,3.021,null,2.594,644,1.808,646,3.669,null,3.021,null,3.021,null,3.669,null,3.669,null,3.669,null,3.669,null,3.669,null,3.669,null,3.669]],["name/22",[656,28.904]],["ref/22",[657,14.452]],["doc/22",[4,0.526,11,0.416,15,0.673,null,0.673,26,1.204,56,1.402,58,1.727,72,0.754,81,0.841,107,1.929,138,1.402,null,1.807,145,1.267,148,0.754,150,2.173,224,1.929,240,2.226,294,1.633,328,1.035,487,1.929,596,2.173,644,2.226,658,2.886,null,3.72,null,2.478,null,3.505,null,3.505,null,2.886,null,2.886,null,2.886,null,2.886,null,2.886,null,2.173,null,2.478,null,2.886,null,2.886,null,2.886,null,2.478,null,2.478,null,2.478,null,2.478,null,2.478,null,2.173,null,2.173,null,2.886,null,2.478,null,2.173,null,3.505]],["name/23",[684,28.904]],["ref/23",[685,14.452]],["doc/23",[11,0.424,15,0.838,null,0.638,26,1.299,72,0.715,81,0.797,94,1.829,107,1.829,134,2.06,138,1.329,null,1.746,145,1.201,148,0.939,150,2.06,171,2.349,224,1.829,240,1.637,287,1.637,294,1.578,328,0.981,428,2.736,565,2.736,596,3.022,644,2.151,647,4.013,658,2.736,null,3.594,663,2.736,null,2.736,null,2.736,null,2.736,null,2.736,null,2.06,null,2.349,null,2.736,null,2.736,null,2.736,null,2.349,null,2.349,null,2.349,null,2.349,null,2.349,null,2.06,null,2.06,null,2.736,null,2.349,null,2.06,686,3.323,null,3.323,null,3.323,null,3.323,null,3.323]],["name/24",[691,28.904]],["ref/24",[692,14.452]],["doc/24",[2,1.268,4,0.544,11,0.426,15,0.853,null,0.853,null,1.071,null,0.936,26,0.845,32,1.304,54,0.872,56,1.777,72,0.545,81,0.869,107,1.396,134,0.982,138,1.268,140,1.119,143,0.572,145,0.572,148,0.545,null,0.982,155,0.982,158,1.396,161,2.248,166,1.996,null,2.689,170,1.119,173,1.304,176,0.982,179,0.982,181,2.985,185,0.982,188,1.119,197,1.304,199,0.982,207,1.304,217,1.792,224,0.872,254,1.607,259,2.088,261,1.119,279,1.586,283,2.991,287,1.562,291,0.982,294,0.572,311,0.872,320,1.146,328,0.468,397,2.611,430,0.982,435,1.119,439,1.249,null,0.872,444,1.304,450,0.982,463,1.786,473,1.119,487,1.396,542,1.119,563,1.304,575,1.304,577,2.248,596,0.982,607,2.242,617,2.242,621,1.792,644,2.609,648,3.484,660,2.991,668,0.982,null,1.792,673,1.119,null,1.119,null,1.119,null,1.119,678,0.982,null,0.982,682,0.982,693,2.088,null,3.626,null,2.088,null,3.626,null,2.088,null,1.584,null,1.584,null,1.584,null,1.584,null,2.611,null,2.088,null,1.304,null,1.584,null,1.584,null,1.584,null,1.584,null,2.536,null,1.584,null,1.584,null,2.536,null,1.584,null,1.584,null,2.536,null,1.304,null,1.584,null,3.626,null,2.536,null,2.536,null,1.584,null,1.584,null,1.584,null,1.584,null,1.584,null,1.584,null,1.584,null,1.304,null,1.304,null,1.304,null,1.304,null,1.304,null,1.584,null,1.584,null,1.584,null,2.611,null,1.304,null,1.304,null,1.304,null,1.584,null,1.584,null,1.584,null,1.584,null,1.584,null,2.088,null,1.304,null,1.584,null,1.304,null,1.304,null,1.304,null,1.304,null,1.304,null,1.304,null,1.304,null,1.304,null,1.584,null,1.304,null,1.304,null,1.584,null,1.584]],["name/25",[761,28.904]],["ref/25",[762,14.452]],["doc/25",[4,0.504,11,0.431,15,0.438,17,0.991,null,1.176,23,1.255,26,0.894,61,1.612,72,0.857,74,0.912,81,1.054,138,1.342,145,0.824,148,0.722,null,1.414,161,2.081,null,1.255,166,2.193,null,2.578,170,1.612,176,1.414,179,1.414,188,1.612,191,2.373,215,1.877,217,2.373,254,1.766,261,1.612,279,1.342,287,1.124,294,0.824,320,1.44,328,0.673,430,1.414,439,1.124,463,1.963,487,1.255,496,1.877,542,1.612,577,2.47,607,1.612,617,1.612,621,2.373,644,2.56,660,2.816,668,1.414,677,1.612,null,1.414,null,1.414,681,2.816,null,1.414,693,2.764,695,3.28,697,3.28,702,2.764,null,1.877,null,1.877,716,1.877,728,1.877,null,1.877,null,1.877,null,1.877,null,1.877,736,2.764,null,1.877,null,1.877,null,1.877,745,2.764,null,1.877,748,1.877,null,1.877,null,1.877,null,1.877,null,1.877,null,1.877,null,1.877,null,1.877,757,2.764,null,1.877,763,2.28,null,2.28,null,3.357,null,2.28,null,3.984,null,3.984,null,2.28,null,2.28,null,2.28,null,2.28,null,2.28,null,2.28,null,2.28,null,2.28,null,3.357,null,2.28,null,2.28,null,2.28,null,2.28]]],"invertedIndex":[["sambo",[0,{"position":[[0,5]]}],[1,{"position":[[0,5]]}],[2,{"position":[[1,5],[62,5],[1748,5]]},5,{"position":[[4551,5],[5570,5]]},8,{"position":[[2508,5]]},62,{"position":[[0,5]]},65,{"position":[[207,5]]}]],["sequenti",[],[],[2,{"position":[[10,10]]},8,{"position":[[2,10]]},14,{"position":[[950,12]]},26,{"position":[[160,10]]}]],["model",[57,{"position":[[0,5]]}],[],[2,{"position":[[25,5],[718,7],[1262,5]]},5,{"position":[[2392,5],[5111,6],[6113,8]]},8,{"position":[[78,6],[1044,6],[1298,6],[1652,5]]},14,{"position":[[500,7]]},32,{"position":[[128,5]]},62,{"position":[[360,5]]},74,{"position":[[1006,5],[1762,5],[1826,5]]}]],["base",[],[],[2,{"position":[[31,5],[1269,5]]},5,{"position":[[2398,5],[6107,5]]},14,{"position":[[62,5]]},29,{"position":[[0,5]]},62,{"position":[[355,4]]}]],["optim",[6,{"position":[[0,9]]}],[],[2,{"position":[[37,12],[81,12],[482,12],[622,9],[741,12],[1161,13],[1275,13],[1375,14],[1695,10]]},5,{"position":[[363,13],[1490,10],[2196,14],[2404,14],[3327,12],[3621,12],[4077,12],[4676,12],[5789,5],[5985,5],[6126,13],[6306,8]]},8,{"position":[[13,9],[28,9],[301,9],[478,13],[1489,12],[1666,10],[2097,12],[2521,9],[2581,9],[2713,9]]},23,{"position":[[36,9],[160,9],[464,9],[773,9]]},26,{"position":[[12,12],[118,12],[171,12],[294,9],[482,12],[1093,12],[1135,12]]},29,{"position":[[89,12]]},32,{"position":[[0,12]]},35,{"position":[[19,9]]},38,{"position":[[23,12]]},41,{"position":[[20,13]]},50,{"position":[[38,12]]},59,{"position":[[4,12]]},62,{"position":[[72,8],[381,8],[738,13],[817,12],[1436,12]]},68,{"position":[[89,12]]},74,{"position":[[845,13],[1093,10],[1265,13],[1449,12]]},77,{"position":[[58,13],[422,12]]}]],["python",[],[],[2,{"position":[[54,7],[475,6]]}]],["global",[],[],[2,{"position":[[74,6],[128,6],[1154,6],[1368,6]]},5,{"position":[[2189,6],[2354,6],[5962,6]]}]],["framework",[],[],[2,{"position":[[94,9]]}]],["find",[],[],[2,{"position":[[108,7],[373,4]]},5,{"position":[[0,4],[2043,4]]}]],["approxim",[],[],[2,{"position":[[116,11]]},5,{"position":[[5,11]]}]],["optima",[],[],[2,{"position":[[135,6]]}]],["",[],[],[2,{"position":[[142,1],[239,1],[537,1],[570,1],[835,1],[892,1],[1182,1],[1319,1],[1660,1],[1790,1],[1857,1],[1917,1],[1919,2]]},5,{"position":[[106,1],[283,1],[382,1],[479,1],[683,1],[800,1],[1656,1],[1658,2],[1685,1],[1708,1],[1710,2],[1741,1],[1783,1],[1934,3],[1948,2],[1978,1],[2013,1],[2030,1],[2034,1],[2087,1],[2165,2],[2947,2],[2988,2],[3033,2],[3158,1],[3265,1],[3407,1],[3505,1],[3516,1],[3694,1],[3703,1],[3814,1],[3824,1],[3893,1],[4018,1],[4141,1],[4205,1],[4227,1],[4237,1],[4253,1],[4265,1],[4323,2],[4347,1],[4362,1],[4505,3],[4542,3],[4573,3],[4584,1],[4625,1],[4657,2],[4785,1],[4793,1],[4801,1],[4810,1],[4818,1],[4831,1],[4843,1],[4866,1],[5031,2],[5034,3],[5054,1],[5090,1],[5095,1],[5171,1],[5180,1],[5195,1],[5219,1],[5237,2],[5259,1],[5295,1],[5300,1],[5319,1],[5335,1],[5345,1],[5402,1],[5420,3],[5431,1],[5433,1],[5436,1],[5485,1],[5519,1],[5557,1],[5559,1],[5561,3],[5592,3],[5603,1],[5708,1],[5881,1],[6257,1]]},8,{"position":[[105,1],[399,1],[407,1],[497,1],[594,1],[716,1],[876,1],[937,1],[1064,1],[1152,1],[1237,1],[1421,1],[1427,1],[1576,1],[1883,1],[1981,1],[1992,1],[2170,1],[2179,1],[2290,1],[2300,1],[2369,1],[2499,3],[2531,3],[2559,1],[2577,3],[2591,1],[2640,1],[2650,3],[2661,1],[2709,3],[2723,1],[2762,1],[2772,3],[2788,1],[2806,3],[2812,1],[2855,3]]},11,{"position":[[104,1],[205,1],[207,1],[209,2],[408,1],[415,1],[423,1],[431,1]]},14,{"position":[[157,1],[295,1],[788,1],[878,1],[906,1],[980,1],[1181,1],[1185,1],[1200,3],[1215,1],[1256,3],[1290,1],[1301,1]]},17,{"position":[[20,1]]},20,{"position":[[20,1]]},23,{"position":[[94,1],[98,1],[137,1],[141,2],[261,1],[335,1],[446,1],[615,1],[625,1],[666,3],[681,1],[713,3],[718,1],[783,3],[804,1],[819,3]]},26,{"position":[[240,1],[330,1],[386,1],[397,1],[596,1],[612,2],[728,1],[878,1],[1187,3],[1198,1],[1227,3]]},29,{"position":[[136,1],[296,1],[357,1],[361,1],[407,1],[450,3],[470,3],[489,1]]},32,{"position":[[83,1],[117,1],[125,1],[134,1]]},41,{"position":[[55,1]]},44,{"position":[[34,1]]},53,{"position":[[84,1]]},56,{"position":[[40,1]]},62,{"position":[[307,1],[335,1],[470,1],[668,1],[759,1],[897,1],[1016,1],[1080,1],[1091,1],[1102,1],[1117,1],[1129,1],[1146,1],[1162,1],[1185,2],[1226,1],[1401,1]]},65,{"position":[[110,1],[125,3],[161,3],[198,3],[229,3],[240,1],[275,1],[286,1],[318,2],[346,3],[370,3],[397,3],[426,3]]},68,{"position":[[136,1],[324,1],[418,1],[508,1]]},71,{"position":[[206,1],[394,1],[558,1],[575,1],[665,1]]},74,{"position":[[921,1],[1428,1],[1477,1],[1576,1],[1589,1],[1700,1],[1802,1],[1876,1],[1944,1],[2038,1],[2128,2],[2131,1],[2143,1],[2300,1],[2555,1],[2805,1],[2824,1]]},77,{"position":[[400,1],[448,1],[660,1],[750,2],[753,1],[765,1],[916,1],[1042,1],[1398,1],[1431,2],[1451,1]]}]],["arbitrari",[],[],[2,{"position":[[147,9],[680,9]]}]],["high",[],[],[2,{"position":[[157,4]]}]],["dimension",[],[],[2,{"position":[[162,11]]},5,{"position":[[4484,11]]}]],["object",[],[],[2,{"position":[[174,9],[1841,9]]},5,{"position":[[31,9],[147,9],[436,9],[3044,9],[3465,9],[3730,9],[4929,9]]},8,{"position":[[41,9],[146,9],[551,9],[990,9],[1941,9],[2206,9]]},14,{"position":[[41,9]]},17,{"position":[[101,9]]},23,{"position":[[68,9],[313,9],[417,9]]},26,{"position":[[257,9]]},29,{"position":[[15,9],[374,9]]},44,{"position":[[9,9]]},47,{"position":[[10,9]]},56,{"position":[[0,9]]},68,{"position":[[373,9]]},71,{"position":[[90,9],[443,9]]},74,{"position":[[104,9],[188,9],[266,9],[523,9],[777,9],[1060,9],[1851,9]]},77,{"position":[[1237,7]]}]],["function",[],[],[2,{"position":[[184,9],[218,8],[241,8],[510,8]]},5,{"position":[[41,8],[157,8],[446,8],[1823,8],[2746,9],[2763,9],[2826,9],[2931,8],[2972,8],[3017,8],[3054,8],[3475,8],[3571,8],[3740,8],[4090,9],[4939,8]]},8,{"position":[[51,8],[156,8],[561,8],[756,8],[1000,8],[1951,8],[2047,8],[2216,8]]},11,{"position":[[12,9]]},14,{"position":[[118,9],[344,8],[680,10]]},23,{"position":[[78,8],[323,9],[427,8],[591,8]]},26,{"position":[[80,9],[267,9]]},29,{"position":[[25,8]]},44,{"position":[[19,8]]},47,{"position":[[20,8]]},56,{"position":[[10,8]]},65,{"position":[[21,9]]},68,{"position":[[383,9]]},71,{"position":[[453,9]]},74,{"position":[[114,9],[198,9],[276,8],[787,9],[1070,8],[1768,8],[1861,9]]}]],["number",[],[],[2,{"position":[[208,6]]},5,{"position":[[63,6],[3174,6],[3720,6],[3964,6],[4151,7]]},8,{"position":[[900,6],[953,6],[1080,6],[1170,6],[1254,6],[2196,6],[2440,6]]},14,{"position":[[173,6]]},26,{"position":[[59,6],[365,6],[756,6],[894,6],[1165,6]]},29,{"position":[[157,6],[212,6]]},47,{"position":[[0,6]]},50,{"position":[[0,6]]},62,{"position":[[709,6]]},74,{"position":[[545,6],[1495,6],[1607,6],[1719,6]]},77,{"position":[[466,6],[607,6]]}]],["evalu",[],[],[2,{"position":[[227,11],[250,11]]},5,{"position":[[73,12],[3749,11]]},8,{"position":[[971,11],[2225,11]]},14,{"position":[[51,10]]},23,{"position":[[721,8]]},26,{"position":[[90,12],[242,10],[930,8]]},29,{"position":[[222,9]]},47,{"position":[[29,12]]},65,{"position":[[97,11]]},74,{"position":[[828,9],[1636,8],[2501,9]]},77,{"position":[[41,9],[322,10],[1353,11]]}]],["consid",[],[],[2,{"position":[[266,10]]}]],["expens",[],[],[2,{"position":[[281,11]]}]],["resourc",[],[],[2,{"position":[[293,8]]}]],["sometim",[],[],[2,{"position":[[310,9]]}]],["take",[],[],[2,{"position":[[320,4]]},5,{"position":[[184,4]]},8,{"position":[[183,4]]},23,{"position":[[606,5]]},77,{"position":[[1292,4]]}]],["week",[],[],[2,{"position":[[325,5]]}]],["obtain",[],[],[2,{"position":[[334,6]]}]],["result",[],[],[2,{"position":[[341,10]]},5,{"position":[[1220,7],[1540,8],[3880,8],[4577,6],[4660,6],[5596,6]]},8,{"position":[[2356,8],[2654,6]]},14,{"position":[[518,9]]},26,{"position":[[131,7],[1078,7],[1191,6]]},32,{"position":[[13,7]]},62,{"position":[[1422,6]]},65,{"position":[[233,6]]},68,{"position":[[128,7],[303,7]]},71,{"position":[[198,7],[373,7],[550,7]]},74,{"position":[[1421,6],[1462,7],[2420,7]]},77,{"position":[[393,6],[435,7]]}]],["it'",[],[],[2,{"position":[[355,4]]}]],["import",[],[],[2,{"position":[[360,9]]},5,{"position":[[4529,6],[4557,6],[5576,6]]},8,{"position":[[2514,6]]},11,{"position":[[300,9]]},65,{"position":[[129,6],[185,6],[213,6]]}]],["good",[],[],[2,{"position":[[378,4]]},5,{"position":[[2048,4]]}]],["enough",[],[],[2,{"position":[[383,6]]}]],["solut",[],[],[2,{"position":[[390,9]]},8,{"position":[[1100,9]]},14,{"position":[[18,9],[193,9],[394,10],[1084,10]]},23,{"position":[[740,9]]},29,{"position":[[66,9],[171,9],[232,10],[257,9],[440,9]]},41,{"position":[[4,8]]}]],["few",[],[],[2,{"position":[[407,3]]},74,{"position":[[1222,3]]}]],["step",[],[],[2,{"position":[[411,5]]},26,{"position":[[590,5]]}]],["possibl",[],[],[2,{"position":[[420,8]]}]],["whenc",[],[],[2,{"position":[[429,7]]}]],["_sequential_",[],[],[2,{"position":[[437,14]]}]],["main",[],[],[2,{"position":[[456,4]]}]],["tool",[],[],[2,{"position":[[461,5]]}]],["toolbox",[],[],[2,{"position":[[495,7]]}]],["sambo.minim",[],[4,{"position":[[0,14]]}],[2,{"position":[[520,16]]},62,{"position":[[860,16]]}]],["near",[],[],[2,{"position":[[541,4]]}]],["drop",[],[],[2,{"position":[[546,4],[852,4]]}]],["replac",[],[],[2,{"position":[[554,11],[860,11]]}]],["scipy.optimize.minim",[],[],[2,{"position":[[572,25]]}]],["sp_opt_min",[],[],[2,{"position":[[598,14],[2136,13]]}]],["class",[],[],[2,{"position":[[615,5]]},5,{"position":[[4300,5]]}]],["ask",[12,{"position":[[0,3]]}],[],[2,{"position":[[640,3]]},8,{"position":[[341,3],[2689,3]]},23,{"position":[[559,3]]},26,{"position":[[234,5],[598,3]]}]],["tell",[21,{"position":[[0,4]]}],[],[2,{"position":[[648,4]]},8,{"position":[[345,4],[2693,4]]},23,{"position":[[758,4]]},26,{"position":[[323,6],[607,4]]}]],["user",[],[],[2,{"position":[[653,4]]}]],["interfac",[],[],[2,{"position":[[658,10]]},8,{"position":[[2698,10]]}]],["support",[],[],[2,{"position":[[669,10]]},62,{"position":[[573,8]]}]],["scikit",[],[],[2,{"position":[[690,6],[876,6],[2086,6],[2258,6]]},5,{"position":[[6283,6]]},8,{"position":[[1600,6],[1822,6]]},62,{"position":[[151,6],[1289,6],[1481,6]]}]],["learn",[],[],[2,{"position":[[697,5],[958,8],[1252,8]]},8,{"position":[[1607,5],[1829,5]]},62,{"position":[[108,8],[158,6]]}]],["surrog",[],[],[2,{"position":[[708,9],[1234,9]]},5,{"position":[[2381,10],[6097,9]]},8,{"position":[[68,9],[1034,9],[1288,9],[1642,9]]},14,{"position":[[83,9],[490,9]]},74,{"position":[[996,9]]}]],["bayesian",[],[],[2,{"position":[[732,8]]}]],["estim",[],[],[2,{"position":[[754,10]]},5,{"position":[[4268,9]]},8,{"position":[[1376,10],[1566,9]]},11,{"position":[[397,10]]},17,{"position":[[74,9]]},62,{"position":[[117,9],[325,9]]},68,{"position":[[61,8]]},74,{"position":[[391,9],[978,10],[1037,10],[1133,9],[1149,9],[1804,9],[1836,10]]}]],["gaussian",[],[],[2,{"position":[[770,9],[1922,9]]},8,{"position":[[1706,9]]}]],["process",[],[],[2,{"position":[[780,10],[1932,11]]},8,{"position":[[1716,9]]},26,{"position":[[25,7],[1106,8]]},29,{"position":[[102,7]]},62,{"position":[[1449,8]]},68,{"position":[[102,8]]}]],["extra",[],[],[2,{"position":[[795,6],[1996,6]]},8,{"position":[[1731,6]]}]],["tree",[],[],[2,{"position":[[802,6],[2003,7]]},8,{"position":[[1738,7]]}]],["built",[],[],[2,{"position":[[809,5]]},11,{"position":[[388,5]]},77,{"position":[[1381,5]]}]],["sambosearchcv",[60,{"position":[[0,13]]}],[],[2,{"position":[[821,13]]}]],["much",[],[],[2,{"position":[[839,4]]},62,{"position":[[268,4]]}]],["faster",[],[],[2,{"position":[[844,6]]},62,{"position":[[273,6]]}]],["learn'",[],[],[2,{"position":[[883,7]]}]],["gridsearchcv",[],[],[2,{"position":[[894,12]]},62,{"position":[[219,12],[1228,12]]}]],["skl_gridsearchcv",[],[],[2,{"position":[[907,19],[2231,19]]},62,{"position":[[1241,20],[1262,19]]}]],["similar",[],[],[2,{"position":[[931,7]]},62,{"position":[[166,7]]}]],["exhaust",[],[],[2,{"position":[[939,10]]}]],["machin",[],[],[2,{"position":[[950,7],[1244,7]]},62,{"position":[[100,7]]}]],["hyper",[],[],[2,{"position":[[967,5]]},62,{"position":[[6,5]]}]],["paramet",[],[],[2,{"position":[[973,9]]},5,{"position":[[86,10],[220,10],[514,9],[1883,9],[4055,10]]},8,{"position":[[85,10],[219,10],[816,9]]},11,{"position":[[283,10]]},14,{"position":[[128,10],[608,10],[849,10]]},23,{"position":[[243,10]]},26,{"position":[[703,10]]},29,{"position":[[118,10]]},53,{"position":[[8,9]]},62,{"position":[[12,9],[290,9],[309,10],[390,10],[493,10],[537,9],[598,9],[635,9],[1044,10]]},68,{"position":[[111,10]]},71,{"position":[[181,10]]},74,{"position":[[1405,10],[2400,10]]},77,{"position":[[365,11],[377,10],[1297,9]]}]],["tune",[],[],[2,{"position":[[983,6]]}]],["method",[],[],[2,{"position":[[990,8],[1040,8],[1080,7]]},5,{"position":[[2693,6],[3132,8],[3243,6]]},8,{"position":[[368,7],[1870,9]]},11,{"position":[[341,6]]},23,{"position":[[563,6]]},26,{"position":[[144,6],[226,6],[315,6],[451,6]]},62,{"position":[[450,8],[752,6],[852,6]]},77,{"position":[[1390,7]]}]],["compar",[],[],[2,{"position":[[1003,8]]}]],["unpredict",[],[],[2,{"position":[[1015,13]]}]],["stochast",[],[],[2,{"position":[[1029,10]]}]],["_informed_",[],[],[2,{"position":[[1049,11]]}]],["algorithm",[],[],[2,{"position":[[1065,10],[1806,10]]},5,{"position":[[4394,11],[5744,9]]},50,{"position":[[51,10]]},62,{"position":[[830,9]]}]],["implement",[],[],[2,{"position":[[1088,11],[1203,15],[1430,15]]},11,{"position":[[351,9]]},62,{"position":[[418,9]]}]],["us",[],[],[2,{"position":[[1106,4]]},5,{"position":[[2065,5]]},8,{"position":[[60,5],[318,4],[358,5],[1308,3],[1344,6],[2679,5]]},11,{"position":[[265,3]]},14,{"position":[[280,5],[353,4],[860,4],[946,3]]},17,{"position":[[66,3]]},26,{"position":[[220,5],[529,3],[859,5],[1028,5]]},59,{"position":[[26,5]]},62,{"position":[[63,4],[843,4]]},68,{"position":[[269,4]]},71,{"position":[[339,4]]},74,{"position":[[1740,3],[1991,3],[2233,4],[2382,4]]},77,{"position":[[484,3],[534,4],[603,3],[855,4]]}]],["packag",[],[],[2,{"position":[[1119,7]]}]],["simplic",[],[],[2,{"position":[[1134,10],[1348,10]]}]],["homolog",[],[],[2,{"position":[[1145,8],[1359,8]]},5,{"position":[[2180,8],[2345,8],[5735,8]]}]],["shgo",[],[],[2,{"position":[[1175,6]]},5,{"position":[[2700,4]]},62,{"position":[[761,8]]}]],["reiniti",[],[],[2,{"position":[[1184,14]]}]],["scipi",[],[],[2,{"position":[[1224,7],[1451,7]]}]],["shuffl",[],[],[2,{"position":[[1290,9],[1536,9]]},5,{"position":[[2464,9],[5898,8]]}]],["complex",[],[],[2,{"position":[[1300,7],[1546,7]]},5,{"position":[[2107,9],[2126,7],[2474,7],[5907,7]]}]],["evolut",[],[],[2,{"position":[[1308,10],[1554,11]]},5,{"position":[[2134,10],[2482,9],[5915,9]]}]],["sce",[],[],[2,{"position":[[1321,3]]},5,{"position":[[2492,4],[4385,4],[4406,4]]}]],["ua",[],[],[2,{"position":[[1325,2]]},5,{"position":[[2497,5],[4390,3],[4411,4]]}]],["improv",[],[],[2,{"position":[[1333,14]]},5,{"position":[[3203,11],[3365,12]]},8,{"position":[[1199,11],[1527,12]]},20,{"position":[[103,7]]}]],["http",[],[],[2,{"position":[[1390,5],[1459,6],[1566,6],[1944,6],[2011,6],[2079,6],[2150,6],[2251,6]]},5,{"position":[[1346,6],[1428,6],[2211,5],[2280,6],[2419,6],[2503,6],[2552,6],[2632,6],[4416,6],[5815,6],[6023,6],[6166,6],[6333,6]]},14,{"position":[[699,6]]},62,{"position":[[1282,6],[1474,6]]},71,{"position":[[127,6]]}]],["doi.org/10.1007/s10898",[],[],[2,{"position":[[1396,22]]},5,{"position":[[2217,22],[5822,22]]}]],["018",[],[],[2,{"position":[[1419,3]]},5,{"position":[[2240,3],[5845,3]]}]],["0645",[],[],[2,{"position":[[1423,4]]},5,{"position":[[2244,4],[5849,4]]}]],["y",[],[],[2,{"position":[[1428,1]]},5,{"position":[[264,1],[2249,1],[5854,1]]},8,{"position":[[263,1],[2810,1]]},23,{"position":[[96,1],[259,1],[444,1],[492,1],[613,1]]},26,{"position":[[1268,1]]},29,{"position":[[359,1]]}]],["docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.shgo.html",[],[],[2,{"position":[[1466,69]]}]],["doi.org/10.1007/bf00939380",[],[],[2,{"position":[[1573,26]]},5,{"position":[[2510,26],[4423,26],[6030,26]]}]],["open",[],[],[2,{"position":[[1605,4]]},14,{"position":[[623,4]]}]],["sourc",[],[],[2,{"position":[[1610,6]]}]],["project",[],[],[2,{"position":[[1617,7],[1666,7]]}]],["inspir",[],[],[2,{"position":[[1630,8]]}]],["_scikit",[],[],[2,{"position":[[1642,7]]},62,{"position":[[197,7],[237,7]]}]],["optimize_",[],[],[2,{"position":[[1650,9]]},62,{"position":[[205,9]]}]],["on",[],[],[2,{"position":[[1677,3]]},5,{"position":[[1285,4],[1410,4],[3115,3],[4968,3],[4982,3],[4996,3]]},68,{"position":[[5,3]]},71,{"position":[[5,3]]},74,{"position":[[605,3],[748,3]]}]],["better",[],[],[2,{"position":[[1688,6]]},23,{"position":[[208,6]]}]],["avail",[],[],[2,{"position":[[1706,9]]},29,{"position":[[247,9]]}]],["accord",[],[],[2,{"position":[[1716,9]]},14,{"position":[[1141,9]]}]],["benchmark](http",[],[],[2,{"position":[[1729,18]]}]],["optimization.github.io",[],[],[2,{"position":[[1754,23]]}]],["benchmark",[],[],[2,{"position":[[1778,11]]}]],["contain",[],[],[2,{"position":[[1796,9]]},14,{"position":[[1050,10]]},65,{"position":[[11,8]]}]],["seek",[],[],[2,{"position":[[1817,4]]}]],["_minimize_",[],[],[2,{"position":[[1825,10]]}]],["f(x",[],[],[2,{"position":[[1852,4],[1912,4]]}]],["instead",[],[],[2,{"position":[[1866,7]]}]],["need",[],[],[2,{"position":[[1874,4]]},5,{"position":[[1277,4]]},26,{"position":[[548,4]]},62,{"position":[[409,5]]}]],["_maximum_",[],[],[2,{"position":[[1883,10]]}]],["simpli",[],[],[2,{"position":[[1894,6]]}]],["minim",[3,{"position":[[0,8]]}],[],[2,{"position":[[1901,8]]},5,{"position":[[169,9],[1938,9],[4564,8],[4915,10],[5583,8],[5969,13]]},8,{"position":[[168,9]]},65,{"position":[[220,8]]}]],["www.gaussianprocess.org/gpml/chapters/rw.pdf",[],[],[2,{"position":[[1951,44]]}]],["doi.org/10.1007/s10994",[],[],[2,{"position":[[2018,22]]}]],["006",[],[],[2,{"position":[[2041,3]]}]],["6226",[],[],[2,{"position":[[2045,4]]}]],["1",[],[],[2,{"position":[[2050,1]]},5,{"position":[[1691,2],[1697,3],[2032,1],[3718,1],[3816,2],[4740,2],[4743,1],[4745,1],[4747,1],[4749,1],[4751,1],[4753,1],[4755,1],[4757,1],[4759,2],[4790,2],[4806,2],[4812,2],[4815,1],[4820,1],[4822,2],[4825,2],[4828,1],[4833,1],[4835,1],[5197,2],[6193,1]]},8,{"position":[[1252,1],[2194,1],[2292,2]]},14,{"position":[[1183,1]]},29,{"position":[[151,1]]},62,{"position":[[1471,2]]}]],["kernel",[],[],[2,{"position":[[2052,7]]}]],["ridg",[],[],[2,{"position":[[2060,5]]}]],["regress",[],[],[2,{"position":[[2066,12]]}]],["learn.org/stable/modules/kernel_ridge.html",[],[],[2,{"position":[[2093,42]]}]],["docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html",[],[],[2,{"position":[[2157,73]]}]],["learn.org/stable/modules/generated/sklearn.model_selection.gridsearchcv.html",[],[],[2,{"position":[[2265,76]]},62,{"position":[[1296,76]]}]],["optimum",[],[],[5,{"position":[[17,7],[3357,7]]},8,{"position":[[1519,7]]},71,{"position":[[108,8]]},74,{"position":[[1395,9]]}]],["fun",[42,{"position":[[0,3]]}],[],[5,{"position":[[102,3],[4728,4]]},8,{"position":[[101,3]]}]],["callabl",[],[],[5,{"position":[[108,8],[1785,8],[3518,8]]},8,{"position":[[107,8],[718,8],[1994,8]]},14,{"position":[[297,9]]}]],["np.ndarray",[],[],[5,{"position":[[117,12],[1794,12]]},8,{"position":[[116,12],[727,12]]},14,{"position":[[995,10]]},29,{"position":[[298,10],[363,10]]}]],["float",[],[],[5,{"position":[[130,7],[793,6],[1611,10],[3267,6],[3409,5]]},8,{"position":[[129,7],[1429,6],[1885,5]]},14,{"position":[[790,5]]},23,{"position":[[263,5],[337,5]]},68,{"position":[[326,6]]},71,{"position":[[396,6]]},74,{"position":[[1878,6],[2310,7],[2557,6]]},77,{"position":[[918,6],[1044,6]]}]],["option",[],[],[5,{"position":[[138,8],[307,8],[391,8],[494,8],[1814,8],[3165,8],[3432,8],[3551,8],[3948,8],[4026,8],[4046,8],[4108,7]]},8,{"position":[[137,8],[422,8],[506,8],[609,8],[747,8],[883,8],[944,8],[1071,8],[1685,7],[1908,8],[2027,8],[2424,8]]},14,{"position":[[164,8]]},23,{"position":[[359,8]]},26,{"position":[[735,8],[885,8]]},62,{"position":[[675,9],[788,9],[966,8],[1024,8]]},68,{"position":[[333,8],[439,9]]},71,{"position":[[403,8],[596,9]]}]],["singl",[],[],[5,{"position":[[191,6]]},8,{"position":[[190,6]]},74,{"position":[[164,6]]}]],["array",[],[],[5,{"position":[[198,5]]},8,{"position":[[197,5]]},14,{"position":[[927,5],[1009,5],[1271,6]]}]],["argument",[],[],[5,{"position":[[209,8],[411,9]]},8,{"position":[[208,8],[526,9]]}]],["x",[39,{"position":[[0,1]]}],[],[5,{"position":[[218,1],[1905,1],[1970,2],[4646,2],[4737,2],[5092,1],[5297,1]]},8,{"position":[[217,1],[838,1],[2837,1]]},23,{"position":[[139,1],[333,1],[623,1]]},26,{"position":[[1265,2]]},29,{"position":[[294,1],[405,1]]},44,{"position":[[32,1]]},65,{"position":[[307,2]]},74,{"position":[[2398,1]]}]],["combin",[],[],[5,{"position":[[231,12],[1893,11]]},8,{"position":[[230,12],[826,11]]}]],["return",[],[],[5,{"position":[[248,6],[1863,6],[3656,7],[5221,6],[5404,6]]},8,{"position":[[247,6],[796,6],[2132,7],[2561,6]]},14,{"position":[[982,7]]},26,{"position":[[107,6],[1034,7]]},29,{"position":[[271,9],[281,7]]},68,{"position":[[491,7]]},71,{"position":[[648,7]]},74,{"position":[[2807,7]]},77,{"position":[[1434,7]]}]],["scalar",[],[],[5,{"position":[[257,6]]},8,{"position":[[256,6]]}]],["cost",[],[],[5,{"position":[[266,5]]},8,{"position":[[265,5]]}]],["valu",[],[],[5,{"position":[[272,7],[625,7],[887,6],[974,7],[1622,6],[2877,6]]},8,{"position":[[271,7]]},14,{"position":[[245,5],[936,6]]},17,{"position":[[111,7]]},23,{"position":[[87,6],[436,6],[494,6]]},26,{"position":[[819,5],[988,5]]},29,{"position":[[34,7],[384,6]]},44,{"position":[[0,5]]},56,{"position":[[19,6]]},62,{"position":[[565,7]]},68,{"position":[[360,5]]},71,{"position":[[430,5],[533,6]]},74,{"position":[[352,6],[533,5]]},77,{"position":[[525,5],[646,7]]}]],["x0",[],[],[5,{"position":[[280,2],[3502,2]]},8,{"position":[[396,2],[1978,2]]},74,{"position":[[2114,6]]},77,{"position":[[736,6]]}]],["tupl",[],[],[5,{"position":[[285,5],[384,6]]},8,{"position":[[401,5],[499,6]]},68,{"position":[[241,5]]},71,{"position":[[311,5]]}]],["list[tupl",[],[],[5,{"position":[[294,12],[481,12]]},8,{"position":[[409,12],[596,12]]}]],["initi",[],[],[5,{"position":[[316,7],[3441,7],[4162,7]]},8,{"position":[[431,7],[963,7],[1917,7]]},14,{"position":[[262,14]]},26,{"position":[[841,14],[1010,14]]}]],["guess(",[],[],[5,{"position":[[324,9]]},8,{"position":[[439,9]]}]],["start",[],[],[5,{"position":[[337,8]]},8,{"position":[[452,8]]}]],["point(",[],[],[5,{"position":[[346,8]]},8,{"position":[[461,8]]},23,{"position":[[378,8]]},74,{"position":[[2351,8]]}]],["arg",[],[],[5,{"position":[[377,4]]},8,{"position":[[492,4]]}]],["addit",[],[],[5,{"position":[[400,10],[4035,10]]},8,{"position":[[515,10]]},32,{"position":[[90,10]]},62,{"position":[[1033,10]]}]],["pass",[],[],[5,{"position":[[424,4],[4069,4]]},8,{"position":[[539,4]]},14,{"position":[[601,6]]},62,{"position":[[1058,4]]},74,{"position":[[1541,6],[2770,6]]}]],["constraint",[],[],[5,{"position":[[459,12],[1771,11],[1845,12],[1921,12]]},8,{"position":[[574,12],[704,11],[778,12],[854,12]]}]],["bound",[],[],[5,{"position":[[472,6],[503,6],[1649,6],[1701,6],[4602,9],[5023,7],[5424,6]]},8,{"position":[[587,6],[618,6],[2623,9],[2745,9]]},11,{"position":[[124,5]]},14,{"position":[[434,5],[843,5]]},65,{"position":[[258,9]]}]],["variabl",[],[],[5,{"position":[[524,10],[1315,9],[2899,11],[4957,10],[5008,8]]},8,{"position":[[642,10]]},74,{"position":[[88,8],[370,9],[462,10],[2090,10]]},77,{"position":[[185,9],[712,10]]}]],["sequenc",[],[],[5,{"position":[[547,8]]},8,{"position":[[655,8]]},53,{"position":[[48,9]]},65,{"position":[[85,8]]},77,{"position":[[1161,8]]}]],["min",[],[],[5,{"position":[[559,5],[657,3],[740,5],[776,3]]},8,{"position":[[667,5]]}]],["max",[],[],[5,{"position":[[565,4],[666,3],[746,4],[784,3]]},8,{"position":[[673,4]]}]],["pair",[],[],[5,{"position":[[570,5]]},8,{"position":[[678,5]]}]],["each",[],[],[5,{"position":[[580,4],[3601,4]]},8,{"position":[[688,4],[2077,4]]},26,{"position":[[942,4]]},74,{"position":[[83,4],[1674,4],[1780,4],[1917,4]]},77,{"position":[[180,4],[1083,4],[1369,4]]}]],["dimens",[],[],[5,{"position":[[585,10],[641,10],[689,9],[806,9],[913,9],[1163,11],[1589,10],[1728,9]]},8,{"position":[[693,10]]},17,{"position":[[51,10]]},74,{"position":[[171,9],[302,11],[616,10],[705,10],[759,10],[1679,10],[2080,9],[2179,9],[2255,10],[2627,11]]},77,{"position":[[548,11],[592,10],[702,9],[801,9],[877,10]]}]],["enumer",[],[],[5,{"position":[[602,11],[1071,13]]},62,{"position":[[645,13]]}]],["nomin",[],[],[5,{"position":[[617,7],[945,11],[1010,8],[1059,11],[1242,8],[1307,7],[1335,10],[5000,7]]}]],["integ",[],[],[5,{"position":[[674,8],[1581,7]]},74,{"position":[[2619,7]]},77,{"position":[[584,7]]}]],["assum",[],[],[5,{"position":[[702,7],[819,7],[926,7]]},23,{"position":[[474,7]]},71,{"position":[[500,7]]}]],["_integral_",[],[],[5,{"position":[[716,10]]}]],["interv",[],[],[5,{"position":[[730,8]]},74,{"position":[[644,10]]}]],["see",[],[],[5,{"position":[[751,4],[982,3],[4296,3],[5017,4]]},14,{"position":[[533,4]]},62,{"position":[[848,3],[1205,3],[1458,3]]}]],["warn",[],[],[5,{"position":[[756,7],[1466,7]]},23,{"position":[[579,7]]}]],["below",[],[],[5,{"position":[[764,7],[997,6],[3382,5]]},8,{"position":[[1544,5]]},74,{"position":[[224,5]]},77,{"position":[[201,5]]}]],["_real_",[],[],[5,{"position":[[833,7]]}]],["case",[],[],[5,{"position":[[854,5],[1261,7]]}]],["includ",[],[],[5,{"position":[[860,9]]},8,{"position":[[1693,7]]},74,{"position":[[2203,8]]},77,{"position":[[825,8]]}]],["more",[],[],[5,{"position":[[873,4],[2102,4],[2121,4],[3066,4],[4891,4]]},38,{"position":[[0,4]]}]],["two",[],[],[5,{"position":[[883,3]]},74,{"position":[[298,3],[366,3],[612,3],[755,3]]}]],["specifi",[],[],[5,{"position":[[898,10],[1570,10]]},14,{"position":[[222,10]]},26,{"position":[[49,9],[796,10],[965,10],[1155,9]]}]],["_enumeration_",[],[],[5,{"position":[[957,13]]}]],["_examples_",[],[],[5,{"position":[[986,10]]}]],["note",[],[],[5,{"position":[[1005,4]]},11,{"position":[[248,4]]},14,{"position":[[1095,5]]},74,{"position":[[938,4]]}]],["repres",[],[],[5,{"position":[[1023,11],[1139,11],[1832,12]]},8,{"position":[[765,12]]}]],["ordin",[],[],[5,{"position":[[1038,8]]}]],["categor",[],[],[5,{"position":[[1047,11],[2887,11]]},74,{"position":[[2603,11]]},77,{"position":[[568,11]]}]],["although",[],[],[5,{"position":[[1085,8]]}]],["inher",[],[],[5,{"position":[[1104,10]]}]],["order",[],[],[5,{"position":[[1115,8]]},11,{"position":[[165,6]]},77,{"position":[[14,5],[290,5]]}]],["intern",[],[],[5,{"position":[[1128,10]]}]],["integr",[],[],[5,{"position":[[1154,8],[4972,9]]}]],["appear",[],[],[5,{"position":[[1183,7]]}]],["significantli",[],[],[5,{"position":[[1194,13]]},20,{"position":[[137,14]]}]],["affect",[],[],[5,{"position":[[1208,6]]}]],["e.g",[],[],[5,{"position":[[1228,5],[1643,5],[2859,5],[2925,5],[2966,5],[3011,5]]},74,{"position":[[1357,5]]}]],["span",[],[],[5,{"position":[[1251,4]]}]],["mani",[],[],[5,{"position":[[1256,4]]},17,{"position":[[34,4]]},20,{"position":[[44,4]]},74,{"position":[[2480,4]]}]],["hot",[],[],[5,{"position":[[1290,3],[1415,3],[1461,3]]}]],["encod",[],[],[5,{"position":[[1294,7],[1419,8]]}]],["manual",[],[],[5,{"position":[[1325,9]]}]],["en.wikipedia.org/wiki/level_of_measur",[],[],[5,{"position":[[1353,42]]}]],["nominal_level",[],[],[5,{"position":[[1396,13]]}]],["en.wikipedia.org/wiki/on",[],[],[5,{"position":[[1435,25]]}]],["mind",[],[],[5,{"position":[[1474,4]]}]],["dot",[],[],[5,{"position":[[1483,3]]},74,{"position":[[803,4]]}]],["problem",[],[],[5,{"position":[[1506,7]]}]],["fail",[],[],[5,{"position":[[1514,5]]}]],["produc",[],[],[5,{"position":[[1523,7]]}]],["expect",[],[],[5,{"position":[[1531,8],[1633,9]]}]],["make",[],[],[5,{"position":[[1549,4]]},11,{"position":[[256,4]]},26,{"position":[[505,6]]}]],["sure",[],[],[5,{"position":[[1554,4]]}]],["you'r",[],[],[5,{"position":[[1559,6]]}]],["real",[],[],[5,{"position":[[1606,4],[4986,5]]},77,{"position":[[543,4]]}]],["2",[],[],[5,{"position":[[1661,2],[1664,3],[1669,1],[1687,2],[1713,3],[1717,4],[1756,3],[1760,3],[2015,1],[4612,2],[4615,3],[4779,1],[4782,1],[4788,1],[4795,1],[4798,1],[4804,1],[5187,1]]},8,{"position":[[2574,2]]},65,{"position":[[268,2],[271,3],[277,2],[280,4]]}]],["2d",[],[],[5,{"position":[[1674,2]]},74,{"position":[[7,2],[2853,2]]},77,{"position":[[87,2],[1480,2]]}]],["grid",[],[],[5,{"position":[[1677,4]]}]],["0",[],[],[5,{"position":[[1694,2],[5438,3]]},14,{"position":[[820,1],[908,3]]}]],["1d",[],[],[5,{"position":[[1725,2]]}]],["np.linspac",[],[],[5,{"position":[[1743,12]]}]],["1/ep",[],[],[5,{"position":[[1764,6]]}]],["bool",[],[],[5,{"position":[[1807,6],[3544,6],[3826,5]]},8,{"position":[[740,6],[2020,6],[2302,5]]}]],["true",[],[],[5,{"position":[[1870,4],[3664,4],[4723,4]]},8,{"position":[[803,4],[2140,4]]},68,{"position":[[346,4]]},71,{"position":[[416,4]]},74,{"position":[[1055,4]]}]],["iff",[],[],[5,{"position":[[1875,3]]},8,{"position":[[808,3]]}]],["satisfi",[],[],[5,{"position":[[1907,9]]},8,{"position":[[840,9]]}]],["constraints=lambda",[],[],[5,{"position":[[1951,18],[4627,18]]},65,{"position":[[288,18]]}]],["lb",[],[],[5,{"position":[[1973,3]]}]],["3",[],[],[5,{"position":[[1980,1]]},23,{"position":[[812,2]]}]],["len(bound",[],[],[5,{"position":[[1983,11],[2018,11]]}]],["complex_s",[],[],[5,{"position":[[2000,12],[4365,12]]}]],["perform",[],[],[5,{"position":[[2053,11]]},26,{"position":[[151,8],[780,8]]},50,{"position":[[21,9]]}]],["complex_size=2",[],[],[5,{"position":[[2072,14]]}]],["allow",[],[],[5,{"position":[[2089,8]]},8,{"position":[[921,8]]},23,{"position":[[149,6]]}]],["given",[],[],[5,{"position":[[2149,5]]}]],["max_it",[],[],[5,{"position":[[2156,8]]},8,{"position":[[867,8]]},26,{"position":[[388,8],[719,8]]},62,{"position":[[659,8]]}]],["simplici",[],[],[5,{"position":[[2168,11],[2334,10],[5724,10]]}]],["assur",[],[],[5,{"position":[[2251,8]]}]],["quick",[],[],[5,{"position":[[2260,5]]}]],["converg",[],[],[5,{"position":[[2266,13],[3314,12]]},8,{"position":[[1476,12]]},20,{"position":[[125,11]]},65,{"position":[[44,12]]},68,{"position":[[20,11],[219,11]]},71,{"position":[[289,11]]}]],["shgo.readthedocs.io/en/latest/docs/readme.html",[],[],[5,{"position":[[2287,46]]}]],["optimis",[],[],[5,{"position":[[2361,12],[5768,13]]}]],["theori",[],[],[5,{"position":[[2374,6],[5991,6]]}]],["en.wikipedia.org/wiki/surrogate_model",[],[],[5,{"position":[[2426,37]]}]],["nelder",[],[],[5,{"position":[[2537,7]]}]],["mead",[],[],[5,{"position":[[2545,6]]}]],["en.wikipedia.org/wiki/nelder%e2%80%93mead_method",[],[],[5,{"position":[[2559,48]]}]],["canon",[],[],[5,{"position":[[2608,10]]}]],["literatur",[],[],[5,{"position":[[2619,12]]}]],["doi.org/10.1016/0022",[],[],[5,{"position":[[2639,20]]}]],["1694(94)90057",[],[],[5,{"position":[[2660,13]]}]],["4",[],[],[5,{"position":[[2674,1],[6205,1]]}]],["caution",[],[],[5,{"position":[[2677,7]]}]],["default",[],[],[5,{"position":[[2685,7],[3232,7],[3274,7],[3710,7],[3832,7]]},8,{"position":[[1159,7],[1244,7],[1436,7],[2186,7],[2308,7]]},14,{"position":[[237,7],[307,7],[408,8],[812,7]]},26,{"position":[[811,7],[980,7]]},29,{"position":[[143,7]]},74,{"position":[[2101,8],[2225,7],[2374,7],[2639,7]]},77,{"position":[[723,8],[847,7],[978,7]]}]],["appropri",[],[],[5,{"position":[[2713,11]]},26,{"position":[[688,14]]}]],["lipschitz",[],[],[5,{"position":[[2729,9],[5758,9]]}]],["smooth",[],[],[5,{"position":[[2739,6],[2756,6],[2819,6]]}]],["gradient",[],[],[5,{"position":[[2778,9]]},8,{"position":[[1754,9]]}]],["vari",[],[],[5,{"position":[[2793,4]]},74,{"position":[[290,7],[687,7]]}]],["gradual",[],[],[5,{"position":[[2798,10]]}]],["non",[],[],[5,{"position":[[2815,3]]},74,{"position":[[2242,3]]},77,{"position":[[864,3]]}]],["exhibit",[],[],[5,{"position":[[2836,7]]}]],["abrupt",[],[],[5,{"position":[[2844,6]]}]],["chang",[],[],[5,{"position":[[2851,7]]}]],["neighbor",[],[],[5,{"position":[[2865,11]]}]],["sharp",[],[],[5,{"position":[[2911,5]]}]],["corner",[],[],[5,{"position":[[2917,7]]}]],["ab",[],[],[5,{"position":[[2941,5]]}]],["discontinu",[],[],[5,{"position":[[2950,15]]}]],["tan",[],[],[5,{"position":[[2982,5]]}]],["unbound",[],[],[5,{"position":[[2994,9]]}]],["growth",[],[],[5,{"position":[[3004,6]]}]],["exp",[],[],[5,{"position":[[3027,5]]}]],["latter",[],[],[5,{"position":[[3078,6]]}]],["kind",[],[],[5,{"position":[[3085,5]]}]],["prefer",[],[],[5,{"position":[[3101,6]]}]],["set",[],[],[5,{"position":[[3111,3]]},14,{"position":[[251,3]]},53,{"position":[[18,4]]}]],["n_iter_no_chang",[],[],[5,{"position":[[3141,16]]},8,{"position":[[1135,16]]}]],["int",[],[],[5,{"position":[[3160,4],[3705,4],[3895,3]]},8,{"position":[[878,4],[939,4],[1066,4],[1154,4],[1239,4],[2181,4],[2371,3]]},14,{"position":[[159,4]]},26,{"position":[[730,4],[880,4]]},29,{"position":[[138,4]]},62,{"position":[[670,4],[899,3]]},74,{"position":[[1479,4],[1591,4],[1702,4],[2153,4],[2445,4]]},77,{"position":[[450,4],[775,4]]}]],["iter",[],[],[5,{"position":[[3184,10],[3606,10]]},8,{"position":[[323,11],[910,10],[1124,10],[1180,10],[2082,10]]},26,{"position":[[69,10],[187,11],[375,10],[766,10],[947,10],[1175,11]]},50,{"position":[[10,10]]},62,{"position":[[719,10]]}]],["befor",[],[],[5,{"position":[[3215,6]]},8,{"position":[[1009,6],[1211,6]]}]],["stop",[],[],[5,{"position":[[3222,9],[3340,5],[3634,5]]},8,{"position":[[1218,9],[1502,5],[2110,5]]},26,{"position":[[419,8]]}]],["depend",[],[],[5,{"position":[[3250,10]]},65,{"position":[[73,11]]},74,{"position":[[28,10],[322,10],[481,10],[951,10],[1657,10],[2874,10]]}]],["tol",[],[],[5,{"position":[[3261,3]]},8,{"position":[[1423,3]]}]],["float32_precis",[],[],[5,{"position":[[3282,17]]},8,{"position":[[1444,17]]}]],["toler",[],[],[5,{"position":[[3300,9]]},8,{"position":[[1462,9]]}]],["found",[],[],[5,{"position":[[3351,5]]},8,{"position":[[1513,5]]},29,{"position":[[76,5]]},71,{"position":[[540,5]]},74,{"position":[[889,5],[1389,5],[2392,5]]},77,{"position":[[359,5]]}]],["threshold",[],[],[5,{"position":[[3393,10]]},8,{"position":[[1555,10]]}]],["y0",[],[],[5,{"position":[[3404,2]]},8,{"position":[[1880,2]]}]],["tuple[float",[],[],[5,{"position":[[3418,13]]},8,{"position":[[1894,13]]}]],["value(",[],[],[5,{"position":[[3449,8]]},8,{"position":[[1925,8]]},23,{"position":[[297,8]]},74,{"position":[[2331,8]]}]],["correspond",[],[],[5,{"position":[[3484,13]]},8,{"position":[[1960,13]]},23,{"position":[[387,13],[501,10]]}]],["callback",[],[],[5,{"position":[[3507,8],[3562,8],[3647,8]]},8,{"position":[[1983,8],[2038,8],[2123,8]]}]],["optimizeresult",[30,{"position":[[0,14]]}],[],[5,{"position":[[3527,16]]},8,{"position":[[2003,16]]},26,{"position":[[1047,15],[1063,14]]},62,{"position":[[1403,14]]},68,{"position":[[138,14],[167,15]]},71,{"position":[[208,14],[237,15]]},74,{"position":[[1430,14]]},77,{"position":[[403,14]]}]],["call",[],[],[5,{"position":[[3588,6]]},8,{"position":[[2064,6]]}]],["rais",[],[],[5,{"position":[[3672,6]]},8,{"position":[[2148,6]]}]],["stopiter",[],[],[5,{"position":[[3680,13]]},8,{"position":[[2156,13]]}]],["n_job",[],[],[5,{"position":[[3696,6]]},8,{"position":[[2172,6]]},14,{"position":[[1155,6]]},62,{"position":[[1094,7]]}]],["run",[24,{"position":[[0,3]]}],[],[5,{"position":[[3764,3]]},8,{"position":[[2240,3]]},26,{"position":[[1128,3]]}]],["parallel",[],[],[5,{"position":[[3771,9]]},8,{"position":[[2247,9]]},14,{"position":[[1132,8]]}]],["applic",[],[],[5,{"position":[[3786,9]]},8,{"position":[[2262,9]]}]],["n_candid",[],[],[5,{"position":[[3801,12],[4240,12]]},8,{"position":[[1051,12],[2277,12]]},14,{"position":[[144,12],[1025,14],[1168,12]]},26,{"position":[[865,12]]}]],["disp",[],[],[5,{"position":[[3819,4]]},8,{"position":[[2295,4]]}]],["fals",[],[],[5,{"position":[[3840,5]]},8,{"position":[[2316,5]]}]],["display",[],[],[5,{"position":[[3846,7]]},8,{"position":[[2322,7]]}]],["progress",[],[],[5,{"position":[[3854,8]]},8,{"position":[[2330,8]]}]],["intermedi",[],[],[5,{"position":[[3867,12]]},8,{"position":[[2343,12]]}]],["rng",[],[],[5,{"position":[[3889,3]]},8,{"position":[[1416,4],[2365,3]]},62,{"position":[[893,3]]}]],["np.random.randomst",[],[],[5,{"position":[[3902,21]]},8,{"position":[[2378,21]]},62,{"position":[[906,21]]}]],["np.random.gener",[],[],[5,{"position":[[3927,20]]},8,{"position":[[2403,20]]}]],["random",[],[],[5,{"position":[[3957,6]]},8,{"position":[[1365,10],[2433,6]]},26,{"position":[[665,6]]},62,{"position":[[975,6]]},74,{"position":[[555,6]]}]],["gener",[],[],[5,{"position":[[3971,9]]},8,{"position":[[1110,9],[1278,9],[2447,9]]}]],["seed",[],[],[5,{"position":[[3984,4]]},8,{"position":[[2460,4]]},62,{"position":[[982,4]]}]],["reproduc",[],[],[5,{"position":[[3993,16]]},8,{"position":[[2469,16]]},62,{"position":[[991,16]]}]],["kwarg",[],[],[5,{"position":[[4011,6]]},62,{"position":[[1009,6]]}]],["dict",[],[],[5,{"position":[[4020,5]]},62,{"position":[[472,4],[1018,5]]}]],["popular",[],[],[5,{"position":[[4100,7]]},8,{"position":[[1677,7]]}]],["method=\"shgo",[],[],[5,{"position":[[4127,13]]}]],["n_init",[],[],[5,{"position":[[4144,6],[4230,6]]},8,{"position":[[930,6]]}]],["point",[],[],[5,{"position":[[4170,8]]},8,{"position":[[1326,5]]},17,{"position":[[39,6]]},20,{"position":[[49,7]]},23,{"position":[[130,6]]},29,{"position":[[324,6],[394,6]]},56,{"position":[[29,6]]},74,{"position":[[821,6],[1617,6],[2511,6]]},77,{"position":[[29,6],[252,7],[1181,7]]}]],["sampling_method=\"halton",[],[],[5,{"position":[[4180,24]]}]],["method=\"smbo",[],[],[5,{"position":[[4213,13]]}]],["n_model",[],[],[5,{"position":[[4256,8]]},8,{"position":[[1228,8]]}]],["explan",[],[],[5,{"position":[[4283,12]]},62,{"position":[[1192,12]]}]],["sambo.optim",[],[7,{"position":[[0,15]]}],[5,{"position":[[4307,15]]}]],["method=\"sceua",[],[],[5,{"position":[[4332,14]]}]],["n_complex",[],[],[5,{"position":[[4350,11]]}]],["exampl",[],[],[5,{"position":[[4450,8],[4496,8],[4906,8]]},8,{"position":[[2486,8]]},14,{"position":[[1187,8]]},23,{"position":[[653,8]]},26,{"position":[[1115,8]]},29,{"position":[[409,8]]},65,{"position":[[112,7]]},68,{"position":[[558,7]]},71,{"position":[[715,7]]},74,{"position":[[2896,7]]},77,{"position":[[1503,7]]}]],["basic",[],[],[5,{"position":[[4463,5]]}]],["constrain",[],[],[5,{"position":[[4469,11]]}]],["10",[],[],[5,{"position":[[4481,2],[4620,3],[5521,4]]},8,{"position":[[1167,2]]}]],["scipy.optim",[],[],[5,{"position":[[4514,14]]},65,{"position":[[170,14]]}]],["rosen",[],[],[5,{"position":[[4536,5]]},65,{"position":[[192,5]]}]],["minimize(rosen",[],[],[5,{"position":[[4586,15]]},65,{"position":[[242,15]]}]],["sum(x",[],[],[5,{"position":[[4649,6]]},8,{"position":[[2568,5]]},65,{"position":[[310,6]]}]],["messag",[36,{"position":[[0,7]]}],[],[5,{"position":[[4667,8]]}]],["termin",[],[],[5,{"position":[[4689,10]]},38,{"position":[[36,12]]}]],["successfulli",[],[],[5,{"position":[[4700,13]]},35,{"position":[[36,13]]}]],["success",[33,{"position":[[0,7]]}],[],[5,{"position":[[4714,8]]}]],["0.0",[],[],[5,{"position":[[4733,3]]}]],["nfev",[45,{"position":[[0,4]]}],[],[5,{"position":[[4762,5]]}]],["1036",[],[],[5,{"position":[[4768,4]]}]],["xv",[51,{"position":[[0,2]]}],[],[5,{"position":[[4773,3]]},32,{"position":[[114,2]]},56,{"position":[[37,2]]}]],["funv",[54,{"position":[[0,4]]}],[],[5,{"position":[[4837,5]]},32,{"position":[[120,4]]}]],["1.174e+04",[],[],[5,{"position":[[4845,9]]}]],["1.535e+04",[],[],[5,{"position":[[4855,9]]}]],["0.000e+00",[],[],[5,{"position":[[4868,9],[4878,10]]}]],["elabor",[],[],[5,{"position":[[4896,9]]}]],["three",[],[],[5,{"position":[[4951,5]]}]],["def",[],[],[5,{"position":[[5038,3],[5240,3]]},8,{"position":[[2535,3]]}]],["demand(x",[],[],[5,{"position":[[5042,10]]}]],["n_rose",[],[],[5,{"position":[[5056,8],[5228,7],[5261,8],[5326,7],[5347,7]]}]],["price",[],[],[5,{"position":[[5065,6],[5136,6],[5189,5],[5270,6],[5356,5],[5498,5]]}]],["advertising_cost",[],[],[5,{"position":[[5072,17],[5200,17],[5277,17],[5383,17]]}]],["ground",[],[],[5,{"position":[[5098,6]]}]],["truth",[],[],[5,{"position":[[5105,5]]}]],["demand",[],[],[5,{"position":[[5118,6],[5173,6]]}]],["fall",[],[],[5,{"position":[[5125,5]]}]],["grow",[],[],[5,{"position":[[5147,5]]}]],["advertis",[],[],[5,{"position":[[5160,9],[5537,11]]}]],["20",[],[],[5,{"position":[[5182,2],[5526,3]]}]],["objective(x",[],[],[5,{"position":[[5244,13]]}]],["production_cost",[],[],[5,{"position":[[5302,16],[5364,16]]}]],["1.5",[],[],[5,{"position":[[5321,3]]}]],["profit",[],[],[5,{"position":[[5337,7],[5412,7]]}]],["100",[],[],[5,{"position":[[5442,5],[5530,5]]}]],["zero",[],[],[5,{"position":[[5454,4]]}]],["rose",[],[],[5,{"position":[[5470,5],[5508,4]]}]],["per",[],[],[5,{"position":[[5476,3],[5504,3]]},8,{"position":[[1120,3]]},17,{"position":[[46,4]]}]],["day",[],[],[5,{"position":[[5480,3]]}]],["5",[],[],[5,{"position":[[5487,4]]},8,{"position":[[2633,2],[2636,3],[2642,2],[2645,4],[2755,2],[2758,3],[2764,2],[2767,4]]}]],["9",[],[],[5,{"position":[[5492,4]]}]],["sold",[],[],[5,{"position":[[5513,4]]}]],["budget",[],[],[5,{"position":[[5549,6]]}]],["minimize(fun=object",[],[],[5,{"position":[[5605,23]]}]],["bounds=bound",[],[],[5,{"position":[[5629,14]]}]],["constraints=demand",[],[],[5,{"position":[[5644,19]]}]],["refer",[],[],[5,{"position":[[5664,10]]}]],["endr",[],[],[5,{"position":[[5681,7]]}]],["s.c",[],[],[5,{"position":[[5689,5]]}]],["sandrock",[],[],[5,{"position":[[5695,9]]}]],["c",[],[],[5,{"position":[[5705,2]]}]],["fock",[],[],[5,{"position":[[5710,6]]}]],["w.w",[],[],[5,{"position":[[5717,4]]}]],["j",[],[],[5,{"position":[[5782,1],[5983,1]]}]],["glob",[],[],[5,{"position":[[5784,4]]}]],["72",[],[],[5,{"position":[[5795,3]]}]],["181–217",[],[],[5,{"position":[[5799,7]]}]],["2018",[],[],[5,{"position":[[5807,7]]}]],["duan",[],[],[5,{"position":[[5857,5]]}]],["q.i",[],[],[5,{"position":[[5863,5]]}]],["gupta",[],[],[5,{"position":[[5869,6]]}]],["v.k",[],[],[5,{"position":[[5876,4]]}]],["sorooshian",[],[],[5,{"position":[[5883,11]]}]],["s",[],[],[5,{"position":[[5895,2]]}]],["approach",[],[],[5,{"position":[[5925,8]]}]],["effect",[],[],[5,{"position":[[5938,9]]},74,{"position":[[152,6],[252,6],[677,6]]}]],["effici",[],[],[5,{"position":[[5952,9]]}]],["appl",[],[],[5,{"position":[[5998,4]]}]],["76",[],[],[5,{"position":[[6003,3]]}]],["501–521",[],[],[5,{"position":[[6007,7]]}]],["1993",[],[],[5,{"position":[[6015,7]]}]],["koziel",[],[],[5,{"position":[[6058,7]]}]],["slawomir",[],[],[5,{"position":[[6066,9]]}]],["leifur",[],[],[5,{"position":[[6080,6]]}]],["leifsson",[],[],[5,{"position":[[6087,9]]}]],["new",[],[],[5,{"position":[[6140,3]]},23,{"position":[[116,3]]}]],["york",[],[],[5,{"position":[[6144,5]]}]],["springer",[],[],[5,{"position":[[6150,9]]}]],["2013",[],[],[5,{"position":[[6160,5]]}]],["doi.org/10.1007/978",[],[],[5,{"position":[[6173,19]]}]],["4614",[],[],[5,{"position":[[6195,4]]}]],["7551",[],[],[5,{"position":[[6200,4]]}]],["head",[],[],[5,{"position":[[6208,5]]}]],["t",[],[],[5,{"position":[[6214,3]]}]],["kumar",[],[],[5,{"position":[[6218,6]]}]],["m",[],[],[5,{"position":[[6225,3]]}]],["nahrstaedt",[],[],[5,{"position":[[6229,11]]}]],["h",[],[],[5,{"position":[[6241,3]]}]],["loupp",[],[],[5,{"position":[[6245,7]]}]],["g",[],[],[5,{"position":[[6253,3]]}]],["shcherbatyi",[],[],[5,{"position":[[6259,12]]}]],["2021",[],[],[5,{"position":[[6275,7]]}]],["optimize/scikit",[],[],[5,{"position":[[6290,15]]}]],["v0.9.0",[],[],[5,{"position":[[6315,9]]}]],["zenodo",[],[],[5,{"position":[[6325,7]]}]],["doi.org/10.5281/zenodo.5565057",[],[],[5,{"position":[[6340,30]]}]],["unspecifi",[],[],[8,{"position":[[284,12]]},71,{"position":[[476,12]]}]],["fashion",[],[],[8,{"position":[[350,7]]}]],["name",[],[],[8,{"position":[[376,5],[1840,7]]},62,{"position":[[504,5]]},74,{"position":[[2032,5]]},77,{"position":[[654,5]]}]],["respect",[],[],[8,{"position":[[382,13]]}]],["decis",[],[],[8,{"position":[[633,8]]}]],["maximum",[],[],[8,{"position":[[892,7]]},26,{"position":[[357,7],[748,7]]},62,{"position":[[701,7]]}]],["first",[],[],[8,{"position":[[1016,5]]},23,{"position":[[600,5]]}]],["fit",[],[],[8,{"position":[[1022,7],[1849,5]]},62,{"position":[[429,5]]},74,{"position":[[1819,6]]}]],["candid",[],[],[8,{"position":[[1090,9]]},11,{"position":[[45,10],[172,10]]},14,{"position":[[8,9],[183,9],[384,9],[1074,9],[1105,10],[1204,10],[1260,10]]},23,{"position":[[120,9],[531,10],[670,10],[730,9]]},26,{"position":[[209,10],[904,10]]}]],["recent",[],[],[8,{"position":[[1269,8]]},23,{"position":[[524,6]]}]],["next",[],[],[8,{"position":[[1316,4]]},14,{"position":[[36,4]]}]],["best",[],[],[8,{"position":[[1321,4]]},11,{"position":[[40,4]]},26,{"position":[[1260,4]]},29,{"position":[[61,4],[319,4],[435,4]]},74,{"position":[[884,4],[2387,4]]},77,{"position":[[354,4]]}]],["predict",[],[],[8,{"position":[[1332,11],[1860,9]]},11,{"position":[[331,9]]},14,{"position":[[508,9]]},62,{"position":[[440,9]]},74,{"position":[[401,11]]}]],["small",[],[],[8,{"position":[[1355,5]]}]],["such",[],[],[8,{"position":[[1387,4]]}]],["et",[],[],[8,{"position":[[1396,4],[1585,5],[1726,4]]},11,{"position":[[418,4]]}]],["fix",[],[],[8,{"position":[[1409,5]]},74,{"position":[[627,5]]}]],["gp",[],[],[8,{"position":[[1578,6],[1701,4]]},11,{"position":[[410,4]]}]],["gb",[],[],[8,{"position":[[1591,5],[1749,4]]},11,{"position":[[426,4]]}]],["regressor",[],[],[8,{"position":[[1618,10],[1805,9]]}]],["default='gp",[],[],[8,{"position":[[1629,12]]}]],["boost",[],[],[8,{"position":[[1764,10]]}]],["provid",[],[],[8,{"position":[[1788,7]]},23,{"position":[[0,7]]},26,{"position":[[825,8],[994,8]]},74,{"position":[[926,10]]}]],["api",[],[],[8,{"position":[[1835,4]]}]],["objective_func(x",[],[],[8,{"position":[[2539,18],[2814,18]]}]],["optimizer(fun=objective_func",[],[],[8,{"position":[[2593,29]]}]],["optimizer.run",[],[],[8,{"position":[[2663,15]]},29,{"position":[[454,15]]}]],["optimizer(fun=non",[],[],[8,{"position":[[2725,19]]}]],["suggested_x",[],[],[8,{"position":[[2776,11],[2842,12],[2877,12]]}]],["optimizer.ask",[],[],[8,{"position":[[2790,15]]},17,{"position":[[4,15]]},20,{"position":[[4,15]]}]],["optimizer.tell(i",[],[],[8,{"position":[[2859,17]]}]],["acq_func",[9,{"position":[[0,9]]}],[],[14,{"position":[[286,8],[869,8]]}]],["sambo.optimizer.acq_func",[],[10,{"position":[[0,25]]}],[]],["acquisit",[],[],[11,{"position":[[0,11]]},14,{"position":[[106,11],[332,11],[668,11]]}]],["select",[],[],[11,{"position":[[26,9]]},14,{"position":[[371,9]]}]],["sampl",[],[],[11,{"position":[[65,7]]},17,{"position":[[22,6]]},20,{"position":[[22,6]]},26,{"position":[[672,8]]},74,{"position":[[562,7],[1226,7],[1349,7],[1729,7]]},77,{"position":[[168,7],[245,6],[309,7]]}]],["current",[],[],[11,{"position":[[73,9]]},14,{"position":[[75,7]]}]],["defin",[],[],[11,{"position":[[83,7]]}]],["key",[],[],[11,{"position":[[91,5]]},62,{"position":[[519,4]]}]],["lcb",[],[],[11,{"position":[[98,5]]}]],["lower",[],[],[11,{"position":[[107,5]]},14,{"position":[[417,5],[826,5]]}]],["confid",[],[],[11,{"position":[[113,10]]},14,{"position":[[423,10],[832,10]]}]],["invers",[],[],[11,{"position":[[134,7]]}]],["analog",[],[],[11,{"position":[[142,6]]}]],["ucb",[],[],[11,{"position":[[152,6]]}]],["mean",[],[],[11,{"position":[[187,4]]},14,{"position":[[447,4],[472,4]]},74,{"position":[[1109,5]]}]],["kappa",[],[],[11,{"position":[[194,5],[277,5]]},14,{"position":[[454,5],[782,5]]},26,{"position":[[682,5]]}]],["std",[],[],[11,{"position":[[201,3]]},14,{"position":[[461,3],[482,3]]}]],["blank",[],[],[11,{"position":[[217,5]]}]],["line",[],[],[11,{"position":[[223,4]]}]],["here",[],[],[11,{"position":[[228,5]]}]],["bug",[],[],[11,{"position":[[234,3]]}]],["pdoc",[],[],[11,{"position":[[241,5]]}]],["estimator'",[],[],[11,{"position":[[318,11]]}]],["return_std",[],[],[11,{"position":[[362,11]]}]],["behavior",[],[],[11,{"position":[[374,9]]}]],["sambo.optimizer.ask",[],[13,{"position":[[0,19]]}],[]],["propos",[],[],[14,{"position":[[0,7],[206,8],[1065,8],[1120,8]]},23,{"position":[[232,10],[542,8]]},26,{"position":[[199,9],[918,7]]}]],["model(",[],[],[14,{"position":[[93,8]]},23,{"position":[[195,8]]},59,{"position":[[17,8]]}]],["dure",[],[],[14,{"position":[[255,6]]},26,{"position":[[834,6],[1003,6]]},68,{"position":[[78,6]]},74,{"position":[[838,6],[1254,6]]},77,{"position":[[51,6]]}]],["acq_funcs['lcb",[],[],[14,{"position":[[315,16],[565,16]]}]],["guid",[],[],[14,{"position":[[361,5]]},23,{"position":[[215,5]]}]],["i.",[],[],[14,{"position":[[440,5]]}]],["tip",[],[],[14,{"position":[[529,3]]}]],["source][_gh",[],[],[14,{"position":[[542,13]]}]],["implemet",[],[],[14,{"position":[[585,11]]}]],["extens",[],[],[14,{"position":[[631,9]]}]],["accommod",[],[],[14,{"position":[[644,11]]}]],["altern",[],[],[14,{"position":[[656,11]]},77,{"position":[[1259,14]]}]],["_gh",[],[],[14,{"position":[[691,7]]}]],["github.com/search?q=repo%3asambo",[],[],[14,{"position":[[706,32]]}]],["optimization%2fsambo%20acq_funcs&type=cod",[],[],[14,{"position":[[739,42]]}]],["list[float",[],[],[14,{"position":[[799,12]]},23,{"position":[[272,11],[346,12]]}]],["balanc",[],[],[14,{"position":[[885,8]]}]],["explor",[],[],[14,{"position":[[894,11]]},26,{"position":[[633,11]]}]],["n_cadid",[],[],[14,{"position":[[968,11]]}]],["shape",[],[],[14,{"position":[[1018,5]]},29,{"position":[[336,5]]}]],["n_bound",[],[],[14,{"position":[[1040,9]]},29,{"position":[[347,9]]}]],["optimizer.ask(n_candidates=2",[],[],[14,{"position":[[1217,29]]}]],["kappa=2",[],[],[14,{"position":[[1247,8]]}]],["1.1",[],[],[14,{"position":[[1278,4]]}]],["0.2",[],[],[14,{"position":[[1284,5]]}]],["0.8",[],[],[14,{"position":[[1292,4]]}]],["0.1",[],[],[14,{"position":[[1297,3]]}]],["points_per_dim",[15,{"position":[[0,14]]}],[],[]],["sambo.optimizer.points_per_dim",[],[16,{"position":[[0,30]]}],[]],["_predict_",[],[],[17,{"position":[[87,9]]}]],["max_points_per_it",[18,{"position":[[0,19]]}],[],[]],["sambo.optimizer.max_points_per_it",[],[19,{"position":[[0,35]]}],[]],["_at",[],[],[20,{"position":[[29,3]]}]],["most_",[],[],[20,{"position":[[33,5]]}]],["increas",[],[],[20,{"position":[[62,9]]}]],["comput",[],[],[20,{"position":[[72,11]]}]],["time",[],[],[20,{"position":[[84,5]]}]],["precis",[],[],[20,{"position":[[111,9]]}]],["sambo.optimizer.tel",[],[22,{"position":[[0,20]]}],[]],["increment",[],[],[23,{"position":[[8,11]]}]],["feedback",[],[],[23,{"position":[[20,8]]}]],["report",[],[],[23,{"position":[[49,9]]}]],["back",[],[],[23,{"position":[[59,4]]}]],["suggest",[],[],[23,{"position":[[103,9]]}]],["refin",[],[],[23,{"position":[[173,6]]}]],["underli",[],[],[23,{"position":[[184,10]]}]],["subsequ",[],[],[23,{"position":[[221,10]]}]],["observ",[],[],[23,{"position":[[288,8],[408,8]]},44,{"position":[[44,8]]}]],["input",[],[],[23,{"position":[[372,5]]}]],["omit",[],[],[23,{"position":[[451,8]]}]],["fifo",[],[],[23,{"position":[[570,7]]}]],["way",[],[],[23,{"position":[[641,3]]}]],["around",[],[],[23,{"position":[[645,7]]}]],["optimizer.ask(n_candidates=3",[],[],[23,{"position":[[683,29]]}]],["irl",[],[],[23,{"position":[[750,3]]}]],["objective_valu",[],[],[23,{"position":[[787,16]]}]],["1.7",[],[],[23,{"position":[[806,5]]}]],["8",[],[],[23,{"position":[[815,3]]},74,{"position":[[2689,1]]},77,{"position":[[1028,1]]}]],["optimizer.tell(y=objective_valu",[],[],[23,{"position":[[823,34]]}]],["x=candid",[],[],[23,{"position":[[858,13]]}]],["sambo.optimizer.run",[],[25,{"position":[[0,19]]}],[]],["execut",[],[],[26,{"position":[[0,7]]}]],["updat",[],[],[26,{"position":[[281,8]]}]],["state",[],[],[26,{"position":[[304,5]]}]],["continu",[],[],[26,{"position":[[337,9]]},62,{"position":[[587,10]]}]],["until",[],[],[26,{"position":[[347,5]]}]],["reach",[],[],[26,{"position":[[402,7]]}]],["criteria",[],[],[26,{"position":[[428,8]]}]],["met",[],[],[26,{"position":[[441,4]]}]],["encapsul",[],[],[26,{"position":[[458,12]]}]],["entir",[],[],[26,{"position":[[475,6]]}]],["workflow",[],[],[26,{"position":[[495,9]]}]],["conveni",[],[],[26,{"position":[[515,10]]}]],["don't",[],[],[26,{"position":[[542,5]]}]],["fine",[],[],[26,{"position":[[553,4]]}]],["grain",[],[],[26,{"position":[[558,7]]}]],["control",[],[],[26,{"position":[[566,7]]}]],["over",[],[],[26,{"position":[[574,4]]}]],["individu",[],[],[26,{"position":[[579,10]]},74,{"position":[[59,10]]}]],["cycl",[],[],[26,{"position":[[618,6]]}]],["between",[],[],[26,{"position":[[625,7]]},71,{"position":[[73,7]]}]],["exploit",[],[],[26,{"position":[[649,12]]}]],["optimizer.run(max_iter=30",[],[],[26,{"position":[[1200,26]]}]],["print(result.x",[],[],[26,{"position":[[1231,15]]}]],["result.fun",[],[],[26,{"position":[[1247,11]]}]],["top_k",[27,{"position":[[0,5]]}],[],[]],["sambo.optimizer.top_k",[],[28,{"position":[[0,21]]}],[]],["retriev",[],[],[29,{"position":[[42,8],[184,9],[422,8]]}]],["top",[],[],[29,{"position":[[55,3],[167,3]]}]],["k",[],[],[29,{"position":[[59,1],[134,1],[198,1],[343,3]]}]],["far",[],[],[29,{"position":[[113,4]]},74,{"position":[[1371,3]]}]],["exce",[],[],[29,{"position":[[200,7]]}]],["list",[],[],[29,{"position":[[311,4]]},62,{"position":[[528,5]]},74,{"position":[[2040,4],[2145,4],[2171,4],[2302,4]]},77,{"position":[[662,4],[767,4],[793,4]]}]],["best_x",[],[],[29,{"position":[[474,7]]}]],["best_i",[],[],[29,{"position":[[482,6]]}]],["optimizer.top_k(1",[],[],[29,{"position":[[491,18]]}]],["sambo.optimizeresult",[],[31,{"position":[[0,20]]}],[]],["field",[],[],[32,{"position":[[26,6]]}]],["inherit",[],[],[32,{"position":[[37,9]]}]],["scipy.optimize.optimizeresult",[],[],[32,{"position":[[53,29]]}]],["attribut",[],[],[32,{"position":[[101,11]]},62,{"position":[[1373,10]]}]],["sambo.optimizeresult.success",[],[34,{"position":[[0,28]]}],[]],["whether",[],[],[35,{"position":[[0,7]]}]],["exit",[],[],[35,{"position":[[29,6]]}]],["sambo.optimizeresult.messag",[],[37,{"position":[[0,28]]}],[]],["detail",[],[],[38,{"position":[[5,8]]}]],["caus",[],[],[38,{"position":[[14,5]]}]],["sambo.optimizeresult.x",[],[40,{"position":[[0,22]]}],[]],["shape=(n_featur",[],[],[41,{"position":[[35,19]]}]],["sambo.optimizeresult.fun",[],[43,{"position":[[0,24]]}],[]],["aka",[],[],[44,{"position":[[36,3]]}]],["minimum",[],[],[44,{"position":[[53,8]]},68,{"position":[[351,7]]},71,{"position":[[421,7],[489,7],[518,7]]},74,{"position":[[895,7]]}]],["sambo.optimizeresult.nfev",[],[46,{"position":[[0,25]]}],[]],["nit",[48,{"position":[[0,3]]}],[],[]],["sambo.optimizeresult.nit",[],[49,{"position":[[0,24]]}],[]],["sambo.optimizeresult.xv",[],[52,{"position":[[0,23]]}],[]],["tri",[],[],[53,{"position":[[38,6]]},62,{"position":[[558,3]]}]],["shape=(nfev",[],[],[53,{"position":[[59,12]]}]],["n_featur",[],[],[53,{"position":[[72,11]]}]],["sambo.optimizeresult.funv",[],[55,{"position":[[0,25]]}],[]],["sambo.optimizeresult.model",[],[58,{"position":[[0,26]]}],[]],["sambo.sambosearchcv",[],[61,{"position":[[0,19]]}],[]],["search",[],[],[62,{"position":[[22,6]]},74,{"position":[[577,6],[1312,6],[2273,6]]},77,{"position":[[895,6]]}]],["cross",[],[],[62,{"position":[[34,5]]}]],["valid",[],[],[62,{"position":[[40,10]]}]],["hyperparamet",[],[],[62,{"position":[[81,15]]}]],["pipelin",[],[],[62,{"position":[[127,9],[369,8]]}]],["those",[],[],[62,{"position":[[142,5]]}]],["bayessearchcv",[],[],[62,{"position":[[178,13]]}]],["learn_",[],[],[62,{"position":[[245,7]]}]],["hopefulli",[],[],[62,{"position":[[257,9]]}]],["larg",[],[],[62,{"position":[[284,5]]}]],["space",[],[],[62,{"position":[[300,6]]},74,{"position":[[584,6],[1319,5],[2280,6]]},77,{"position":[[902,6]]}]],["baseestim",[],[],[62,{"position":[[337,13]]}]],["param_grid",[],[],[62,{"position":[[459,10]]}]],["dictionari",[],[],[62,{"position":[[477,10]]}]],["str",[],[],[62,{"position":[[510,5]]},74,{"position":[[2048,4],[2704,3]]},77,{"position":[[670,4],[1109,3]]}]],["choic",[],[],[62,{"position":[[547,7]]}]],["both",[],[],[62,{"position":[[582,4]]}]],["rang",[],[],[62,{"position":[[608,6]]}]],["discrete/str",[],[],[62,{"position":[[619,15]]}]],["default=100",[],[],[62,{"position":[[685,11]]}]],["sceua",[],[],[62,{"position":[[770,8]]}]],["smbo",[],[],[62,{"position":[[779,8]]}]],["default='smbo",[],[],[62,{"position":[[798,14]]}]],["comparison",[],[],[62,{"position":[[881,11]]}]],["np.random.randomgener",[],[],[62,{"position":[[931,25]]}]],["none",[],[],[62,{"position":[[960,5]]}]],["basesearchcv",[],[],[62,{"position":[[1067,12]]}]],["score",[],[],[62,{"position":[[1082,8]]}]],["refit",[],[],[62,{"position":[[1105,6]]}]],["cv",[],[],[62,{"position":[[1113,3]]}]],["verbos",[],[],[62,{"position":[[1120,8]]}]],["pre_dispatch",[],[],[62,{"position":[[1132,13]]}]],["error_scor",[],[],[62,{"position":[[1149,12]]}]],["return_train_scor",[],[],[62,{"position":[[1165,19]]}]],["document",[],[],[62,{"position":[[1209,13]]}]],["opt_result_",[],[],[62,{"position":[[1389,11]]}]],["learn.org/stable/modules/grid_search.html",[],[],[62,{"position":[[1488,41]]}]],["plot",[63,{"position":[[0,4]]}],[],[65,{"position":[[35,8]]},68,{"position":[[0,4],[210,4]]},71,{"position":[[0,4],[280,4]]},74,{"position":[[0,4],[39,5],[137,5],[218,5],[333,4],[962,4],[1119,5],[1535,5],[2025,6],[2219,5],[2367,6],[2462,4],[2541,6],[2763,6],[2889,6]]},77,{"position":[[97,4],[121,5],[195,5],[232,5],[841,5],[971,6],[1315,4],[1345,4]]}]],["sambo.plot",[],[64,{"position":[[0,10]]}],[]],["modul",[],[],[65,{"position":[[4,6]]}]],["regret",[],[],[65,{"position":[[57,7]]},71,{"position":[[31,8],[48,6],[117,9]]}]],["partial",[],[],[65,{"position":[[65,7]]},74,{"position":[[20,7],[314,7],[473,7],[943,7],[1649,7],[2866,7]]}]],["matplotlib.pyplot",[],[],[65,{"position":[[136,17]]}]],["plt",[],[],[65,{"position":[[157,3]]}]],["plot_convergence(result",[],[],[65,{"position":[[321,24]]}]],["plot_regret(result",[],[],[65,{"position":[[350,19]]}]],["plot_objective(result",[],[],[65,{"position":[[374,22]]}]],["plot_evaluations(result",[],[],[65,{"position":[[401,24]]}]],["plt.show",[],[],[65,{"position":[[430,10]]}]],["plot_converg",[66,{"position":[[0,16]]}],[],[]],["sambo.plot.plot_converg",[],[67,{"position":[[0,27]]}],[]],["sever",[],[],[68,{"position":[[12,7]]},71,{"position":[[12,7]]}]],["trace",[],[],[68,{"position":[[32,7],[231,6]]},71,{"position":[[40,7],[301,6]]}]],["show",[],[],[68,{"position":[[40,7]]},74,{"position":[[50,4],[143,4],[243,4],[338,5],[720,5],[1125,4]]},77,{"position":[[147,4],[344,5],[1338,4]]}]],["error",[],[],[68,{"position":[[55,5]]}]],["evolv",[],[],[68,{"position":[[70,7]]}]],["tuple[str",[],[],[68,{"position":[[156,10]]},71,{"position":[[226,10]]}]],["result(",[],[],[68,{"position":[[187,9]]},71,{"position":[[257,9]]}]],["format",[],[],[68,{"position":[[247,7]]},71,{"position":[[317,7]]}]],["string",[],[],[68,{"position":[[259,6]]},71,{"position":[[329,6]]}]],["legend",[],[],[68,{"position":[[281,6]]},71,{"position":[[351,6]]}]],["label",[],[],[68,{"position":[[288,5]]},71,{"position":[[358,5]]},74,{"position":[[2066,6]]},77,{"position":[[688,6]]}]],["true_minimum",[],[],[68,{"position":[[311,12]]},71,{"position":[[381,12]]},74,{"position":[[908,12],[2287,12]]}]],["known",[],[],[68,{"position":[[396,6]]},71,{"position":[[466,6]]}]],["xscale",[],[],[68,{"position":[[403,7]]},71,{"position":[[560,7]]}]],["yscale",[],[],[68,{"position":[[411,6]]},71,{"position":[[568,6]]}]],["linear",[],[],[68,{"position":[[420,10]]},71,{"position":[[577,10]]},74,{"position":[[1946,10]]}]],["log",[],[],[68,{"position":[[431,7]]},71,{"position":[[588,7]]},74,{"position":[[1957,7]]}]],["default='linear",[],[],[68,{"position":[[449,16]]},71,{"position":[[606,16]]},74,{"position":[[1965,16]]}]],["scale",[],[],[68,{"position":[[470,6]]},71,{"position":[[627,6]]},74,{"position":[[1982,5]]}]],["ax",[],[],[68,{"position":[[485,5]]},71,{"position":[[642,5]]},77,{"position":[[1308,3]]}]],["fig",[],[],[68,{"position":[[504,3]]},71,{"position":[[661,3]]},74,{"position":[[2820,3]]},77,{"position":[[1447,3]]}]],["matplotlib.figure.figur",[],[],[68,{"position":[[510,24]]},71,{"position":[[667,24]]},74,{"position":[[2826,24]]},77,{"position":[[1453,24]]}]],["matplotlib",[],[],[68,{"position":[[539,10]]},71,{"position":[[696,10]]}]],["figur",[],[],[68,{"position":[[550,7]]},71,{"position":[[707,7]]},77,{"position":[[1195,6],[1230,6],[1274,6]]}]],["imag",[],[],[68,{"position":[[572,5]]},71,{"position":[[729,5]]},74,{"position":[[2910,5]]},77,{"position":[[1517,5]]}]],["convergence.svg",[],[],[68,{"position":[[578,16]]}]],["plot_regret",[69,{"position":[[0,11]]}],[],[]],["sambo.plot.plot_regret",[],[70,{"position":[[0,22]]}],[]],["cumul",[],[],[71,{"position":[[20,10]]}]],["differ",[],[],[71,{"position":[[62,10]]}]],["achiev",[],[],[71,{"position":[[81,8]]}]],["en.wikipedia.org/wiki/regret_(decision_theori",[],[],[71,{"position":[[134,46]]}]],["regret.svg",[],[],[71,{"position":[[735,11]]}]],["plot_object",[72,{"position":[[0,14]]}],[],[]],["sambo.plot.plot_object",[],[73,{"position":[[0,25]]}],[]],["matrix",[],[],[74,{"position":[[10,6],[2856,6]]},77,{"position":[[90,6],[1483,6]]}]],["influenc",[],[],[74,{"position":[[70,9],[380,9],[439,9],[730,9]]}]],["diagon",[],[],[74,{"position":[[128,8],[234,8]]},77,{"position":[[112,8],[211,8],[510,9]]}]],["averag",[],[],[74,{"position":[[419,10],[509,9],[660,8],[1748,9]]}]],["out",[],[],[74,{"position":[[430,4],[669,3]]},77,{"position":[[1202,3],[1217,3],[1281,3]]}]],["calcul",[],[],[74,{"position":[[495,10]]}]],["keep",[],[],[74,{"position":[[597,7]]}]],["regular",[],[],[74,{"position":[[636,7]]}]],["black",[],[],[74,{"position":[[797,5]]}]],["indic",[],[],[74,{"position":[[808,8],[870,9],[2189,7]]},77,{"position":[[275,10],[811,7]]}]],["red",[],[],[74,{"position":[[861,3],[2347,3]]},77,{"position":[[335,3]]}]],["star",[],[],[74,{"position":[[865,4]]},77,{"position":[[339,4]]}]],["turn",[],[],[74,{"position":[[1021,4]]}]],["therefor",[],[],[74,{"position":[[1167,9]]}]],["quit",[],[],[74,{"position":[[1180,5]]}]],["imprecis",[],[],[74,{"position":[[1186,10]]}]],["especi",[],[],[74,{"position":[[1197,10],[1283,10]]}]],["rel",[],[],[74,{"position":[[1211,10]]}]],["collect",[],[],[74,{"position":[[1244,9]]}]],["region",[],[],[74,{"position":[[1297,7],[1363,7]]}]],["spars",[],[],[74,{"position":[[1340,8]]}]],["away",[],[],[74,{"position":[[1375,4]]}]],["level",[],[],[74,{"position":[[1470,6],[1505,6]]}]],["default=10",[],[],[74,{"position":[[1484,10]]},77,{"position":[[455,10]]}]],["draw",[],[],[74,{"position":[[1515,4]]}]],["contour",[],[],[74,{"position":[[1527,7],[2017,7],[2533,7],[2755,7]]}]],["directli",[],[],[74,{"position":[[1548,8],[2777,8]]}]],["plt.contourf",[],[],[74,{"position":[[1561,14],[2790,14]]}]],["resolut",[],[],[74,{"position":[[1578,10]]}]],["default=16",[],[],[74,{"position":[[1596,10]]}]],["along",[],[],[74,{"position":[[1668,5]]}]],["n_sampl",[],[],[74,{"position":[[1690,9]]}]],["default=250",[],[],[74,{"position":[[1707,11]]}]],["n_point",[],[],[74,{"position":[[1793,8]]}]],["last",[],[],[74,{"position":[[1814,4]]}]],["size",[],[],[74,{"position":[[1871,4]]},77,{"position":[[1037,4]]}]],["default=2",[],[],[74,{"position":[[1885,9]]},77,{"position":[[1051,9]]}]],["height",[],[],[74,{"position":[[1895,6]]},77,{"position":[[1061,6]]}]],["inch",[],[],[74,{"position":[[1906,7]]},77,{"position":[[1072,7]]}]],["subplot/facet",[],[],[74,{"position":[[1922,14]]},77,{"position":[[1088,14]]}]],["zscale",[],[],[74,{"position":[[1937,6]]}]],["z",[],[],[74,{"position":[[2003,1]]}]],["axi",[],[],[74,{"position":[[2005,4]]}]],["default=non",[],[],[74,{"position":[[2053,12],[2158,12],[2318,12]]},77,{"position":[[675,12],[780,12]]}]],["x1",[],[],[74,{"position":[[2121,5]]},77,{"position":[[743,5]]}]],["plot_dim",[],[],[74,{"position":[[2133,9]]},77,{"position":[[755,9]]}]],["constant",[],[],[74,{"position":[[2246,8]]},77,{"position":[[868,8]]}]],["plot_max_point",[],[],[74,{"position":[[2428,16]]}]],["default=200",[],[],[74,{"position":[[2450,11]]}]],["randomli",[],[],[74,{"position":[[2485,8]]}]],["chosen",[],[],[74,{"position":[[2494,6]]}]],["overlay",[],[],[74,{"position":[[2518,10]]}]],["jitter",[],[],[74,{"position":[[2548,6],[2586,6]]},77,{"position":[[909,6],[946,6]]}]],["default=.02",[],[],[74,{"position":[[2564,11]]},77,{"position":[[925,11]]}]],["amount",[],[],[74,{"position":[[2576,6]]}]],["add",[],[],[74,{"position":[[2596,3]]},77,{"position":[[956,3]]}]],["look",[],[],[74,{"position":[[2647,5]]},77,{"position":[[986,5]]}]],["clear",[],[],[74,{"position":[[2653,5]]},77,{"position":[[992,5]]}]],["categori",[],[],[74,{"position":[[2663,10]]},77,{"position":[[1002,10]]}]],["up",[],[],[74,{"position":[[2677,2]]},77,{"position":[[1016,2]]}]],["item",[],[],[74,{"position":[[2691,6]]},77,{"position":[[1030,6]]}]],["cmap",[],[],[74,{"position":[[2698,5]]},77,{"position":[[1103,5]]}]],["colormap",[],[],[74,{"position":[[2711,9]]},77,{"position":[[1116,9]]}]],["default='viridis_r",[],[],[74,{"position":[[2721,19]]}]],["color",[],[],[74,{"position":[[2741,5]]},77,{"position":[[269,5],[1143,5]]}]],["map",[],[],[74,{"position":[[2747,3]]},77,{"position":[[1149,3]]}]],["sub",[],[],[74,{"position":[[2885,3]]}]],["objective.svg",[],[],[74,{"position":[[2916,14]]}]],["plot_evalu",[75,{"position":[[0,16]]}],[],[]],["sambo.plot.plot_evalu",[],[76,{"position":[[0,27]]}],[]],["visual",[],[],[77,{"position":[[0,9]]}]],["creat",[],[],[77,{"position":[[77,7]]}]],["histogram",[],[],[77,{"position":[[131,10],[492,10]]}]],["distribut",[],[],[77,{"position":[[152,12]]}]],["scatter",[],[],[77,{"position":[[224,7],[963,7],[1173,7]]}]],["bin",[],[],[77,{"position":[[443,4],[476,4],[617,4]]}]],["wherea",[],[],[77,{"position":[[560,7]]}]],["equal",[],[],[77,{"position":[[622,5]]}]],["distinct",[],[],[77,{"position":[[637,8]]}]],["ratio",[],[],[77,{"position":[[937,5]]}]],["default='summ",[],[],[77,{"position":[[1126,16]]}]],["todo",[],[],[77,{"position":[[1190,4]]}]],["lay",[],[],[77,{"position":[[1213,3]]}]],["multipl",[],[],[77,{"position":[[1221,8]]}]],["side",[],[],[77,{"position":[[1245,4],[1253,5]]}]],["onto",[],[],[77,{"position":[[1320,5]]}]],["testdocs.test_make_doc_plot",[],[],[77,{"position":[[1400,30]]}]],["subplot",[],[],[77,{"position":[[1493,9]]}]],["evaluations.svg",[],[],[77,{"position":[[1523,16]]}]]],"pipeline":["stemmer"]},[{"ref":"sambo","url":0,"doc":" SAMBO - Sequential and Model-Based Optimization [in Python] Sambo is a global optimization framework for finding approximate global optima † of arbitrary high-dimensional objective functions in the least number of function evaluations . Function evaluations are considered the \"expensive\" resource (it can sometimes take weeks to obtain results!), so it's important to find good-enough solutions in as few steps as possible (whence _sequential_). The main tools in this Python optimization toolbox are: function sambo.minimize() , a near drop-in replacement for [ scipy.optimize.minimize() ][sp_opt_min], class Optimizer with an ask-and-tell user interface, supporting arbitrary scikit-learn-like surrogate models, with Bayesian optimization estimators like [Gaussian processes] and [Extra Trees] built in, SamboSearchCV , a much faster drop-in replacement for scikit-learn's [ GridSearchCV ][skl_gridsearchcv] and similar exhaustive machine-learning hyper-parameter tuning methods, but compared to unpredictable stochastic methods, _informed_. The algorithms and methods implemented by or used in this package are: [simplical homology global optimization] (SHGO) , reinitializing the [implementation from SciPy], surrogate machine learning model -based optimization, [shuffled complex evolution] ( SCE-UA with improvements). [simplical homology global optimization]: http: doi.org/10.1007/s10898-018-0645-y [implementation from SciPy]: https: docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.shgo.html [shuffled complex evolution]: https: doi.org/10.1007/BF00939380 This open-source project was inspired by _scikit-optimize_ . The project is one of the better optimizers available according to [benchmark](https: sambo-optimization.github.io/ benchmark). † The contained algorithms seek to _minimize_ your objective f(x) . If you instead need the _maximum_, simply minimize -f(x) . 💡 [Gaussian processes]: https: www.gaussianprocess.org/gpml/chapters/RW.pdf [Extra Trees]: https: doi.org/10.1007/s10994-006-6226-1 [kernel ridge regression]: https: scikit-learn.org/stable/modules/kernel_ridge.html [sp_opt_min]: https: docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html [skl_gridsearchcv]: https: scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html","name":"sambo","i":0},{"ref":"sambo.minimize","url":0,"doc":"Find approximate optimum of an objective function in the least number of evaluations. Parameters fun : Callable np.ndarray], float], optional Objective function to minimize. Must take a single array-like argument x (parameter combination) and return a scalar y (cost value). x0 : tuple or list[tuple], optional Initial guess(es) or starting point(s) for the optimization. args : tuple, optional Additional arguments to pass to the objective function and constraints. bounds : list[tuple], optional Bounds for parameter variables. Should be a sequence of (min, max) pairs for each dimension, or an enumeration of nominal values. For any dimension, if min and max are integers , the dimension is assumed to be _integral_ on interval [min, max) (see warning below). If min or max are floats , the dimension is assumed to be _real_. In all other cases including if more than two values are specified, the dimension is assumed to be that ([nominal]) _enumeration_ of values. See _Examples_ below. note Nominals are represented as ordinals Categorical ([nominal]) enumerations, although often not inherently ordered, are internally represented as integral dimensions. If this appears to significantly affect your results (e.g. if your nominals span many cases), you may need to [one-hot encode] your nominal variables manually. [nominal]: https: en.wikipedia.org/wiki/Level_of_measurement Nominal_level [one-hot encode]: https: en.wikipedia.org/wiki/One-hot warning Mind the dot If optimizing your problem fails to produce expected results, make sure you're not specifying integer dimensions where real (floating) values are expected. E.g.: bounds = [(-2, 2)] 2 A 2D grid of {-2, -1, 0, 1}² bounds = [(-2., 2.)] A 1D dimension of ~ np.linspace(-2., 2., 1/eps) constraints : Callable np.ndarray], bool], optional Function representing constraints. Must return True iff the parameter combination x satisfies the constraints. >>> minimize( ., constraints=lambda x: (lb = 3 len(bounds) and complex_size = 2 len(bounds) + 1 , but we find good performance using complex_size=2 , allowing for more complexes and more complex evolutions for given max_iter ). [simplicial homology global optimization]: http: doi.org/10.1007/s10898-018-0645-y [assures quick convergence]: https: shgo.readthedocs.io/en/latest/docs/README.html simplicial-homology-global-optimisation-theory [surrogate model-based optimization]: https: en.wikipedia.org/wiki/Surrogate_model [shuffled complex evolution (SCE-UA)]: https: doi.org/10.1007/BF00939380 [Nelder-Mead]: https: en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method [canonical literature]: https: doi.org/10.1016/0022-1694(94)90057-4 caution Default method SHGO is only appropriate for Lipschitz-smooth functions Smooth functions have gradients that vary gradually, while non-smooth functions exhibit abrupt changes (e.g. neighboring values of categorical variables), sharp corners (e.g. function abs() ), discontinuities (e.g. function tan() ), or unbounded growth (e.g. function exp() ). If your objective function is more of the latter kind, you might prefer to set one of the other methods. n_iter_no_change : int, optional Number of iterations with no improvement before stopping. Default is method-dependent. tol : float, default FLOAT32_PRECISION Tolerance for convergence. Optimization stops when found optimum improvements are below this threshold. y0 : float or tuple[float], optional Initial value(s) of the objective function corresponding to x0 . callback : Callable OptimizeResult], bool], optional A callback function that is called after each iteration. The optimization stops If the callback returns True or raises StopIteration . n_jobs : int, default 1 Number of objective function evaluations to run in parallel. Most applicate when n_candidates > 1. disp : bool, default False Display progress and intermediate results. rng : int or np.random.RandomState or np.random.Generator, optional Random number generator or seed for reproducibility. kwargs : dict, optional Additional optional parameters to pass to optimization function. Popular options are: for method=\"shgo\" : n_init (number of initial points), sampling_method=\"halton\" , for method=\"smbo\" : n_init , n_candidates , n_models , estimator (for explanation, see class sambo.Optimizer ), for method=\"sceua\" : n_complexes , complex_size (as in [SCE-UA] algorithm), [SCE-UA]: https: doi.org/10.1007/BF00939380 Examples Basic constrained 10-dimensional example: >>> from scipy.optimize import rosen >>> from sambo import minimize >>> result = minimize(rosen, bounds=[(-2, 2)] 10, . constraints=lambda x: sum(x) >> result message: Optimization terminated successfully. success: True fun: 0.0 x: [1 1 1 1 1 1 1 1 1 1] nfev: 1036 xv: -2 -2 . -2 1] [-2 -2 . -2 1] . [1 1 . 1 1] [1 1 . 1 1 funv: [ 1.174e+04 1.535e+04 . 0.000e+00 0.000e+00] A more elaborate example, minimizing an objective function of three variables: one integral, one real, and one nominal variable (see bounds= ). >>> def demand(x): . n_roses, price, advertising_costs = x . Ground truth model: Demand falls with price, but grows if you advertise . demand = 20 - 2 price + .1 advertising_costs . return n_roses >> def objective(x): . n_roses, price, advertising_costs = x . production_costs = 1.5 n_roses . profits = n_roses price - production_costs - advertising_costs . return -profits >>> bounds = [ . (0, 100), From zero to at most roses per day . (.5, 9.), Price per rose sold . (10, 20, 100), Advertising budget . ] >>> from sambo import minimize >>> result = minimize(fun=objective, bounds=bounds, constraints=demand) References Endres, S.C., Sandrock, C. & Focke, W.W. A simplicial homology algorithm for Lipschitz optimisation. J Glob Optim 72, 181–217 (2018). https: doi.org/10.1007/s10898-018-0645-y Duan, Q.Y., Gupta, V.K. & Sorooshian, S. Shuffled complex evolution approach for effective and efficient global minimization. J Optim Theory Appl 76, 501–521 (1993). https: doi.org/10.1007/BF00939380 Koziel, Slawomir, and Leifur Leifsson. Surrogate-based modeling and optimization. New York: Springer, 2013. https: doi.org/10.1007/978-1-4614-7551-4 Head, T., Kumar, M., Nahrstaedt, H., Louppe, G., & Shcherbatyi, I. (2021). scikit-optimize/scikit-optimize (v0.9.0). Zenodo. https: doi.org/10.5281/zenodo.5565057","func":1,"name":"minimize","i":1},{"ref":"sambo.Optimizer","url":0,"doc":"A sequential optimizer that optimizes an objective function using a surrogate model. Parameters fun : Callable np.ndarray], float], optional Objective function to minimize. Must take a single array-like argument x (parameter combination) and return a scalar y (cost value). When unspecified, the Optimizer can be used iteratively in an ask-tell fashion using the methods named respectively. x0 : tuple | list[tuple], optional Initial guess(es) or starting point(s) for the optimization. args : tuple, optional Additional arguments to pass to the objective function and constraints. bounds : list[tuple], optional Bounds for the decision variables. A sequence of (min, max) pairs for each dimension. constraints : Callable np.ndarray], bool], optional Function representing constraints. Must return True iff the parameter combination x satisfies the constraints. max_iter : int, optional Maximum number of iterations allowed. n_init : int, optional Number of initial evaluations of the objective function before first fitting the surrogate model. n_candidates : int, optional Number of candidate solutions generated per iteration. n_iter_no_change : int, default 10 Number of iterations with no improvement before stopping. n_models : int, default 1 Number of most-recently-generated surrogate models to use for next best-point prediction. Useful for small and randomized estimators such as \"et\" with no fixed rng= . tol : float, default FLOAT32_PRECISION Tolerance for convergence. Optimization stops when found optimum improvements are below this threshold. estimator : {'gp', 'et', 'gb'} or scikit-learn-like regressor, default='gp' Surrogate model for the optimizer. Popular options include \"gp\" (Gaussian process), \"et\" (extra trees), or \"gb\" (gradient boosting). You can also provide your own regressor with a scikit-learn API, (namely fit() and predict() methods). y0 : float or tuple[float], optional Initial value(s) of the objective function corresponding to x0 . callback : Callable OptimizeResult], bool], optional A callback function that is called after each iteration. The optimization stops If the callback returns True or raises StopIteration . n_jobs : int, default 1 Number of objective function evaluations to run in parallel. Most applicate when n_candidates > 1. disp : bool, default False Display progress and intermediate results. rng : int or np.random.RandomState or np.random.Generator, optional Random number generator or seed for reproducibility. Examples >>> from sambo import Optimizer >>> def objective_func(x): . return sum(x 2) >>> optimizer = Optimizer(fun=objective_func, bounds=[(-5, 5), (-5, 5)]) >>> result = optimizer.run() Using the ask-tell interface: >>> optimizer = Optimizer(fun=None, bounds=[(-5, 5), (-5, 5)]) >>> suggested_x = optimizer.ask() >>> y = [objective_func(x) for x in suggested_x] >>> optimizer.tell(y, suggested_x)","name":"Optimizer","i":2},{"ref":"sambo.Optimizer.ACQ_FUNCS","url":0,"doc":"Acquisition functions for selecting the best candidates from the sample. Currently defined keys: \"LCB\" — lower confidence bound (an inverse analog of \"UCB\") which orders candidates by mean - kappa std . [ ]: (No blank line here! bug in pdoc) note To make any use of the kappa parameter, it is important for the estimator's predict() method to implement return_std= behavior. All built-in estimators ( \"gp\" , \"et\" , \"gb\" ) do so.","name":"ACQ_FUNCS","i":3},{"ref":"sambo.Optimizer.ask","url":0,"doc":"Propose candidate solutions for the next objective evaluation based on the current surrogate model(s) and acquisition function. Parameters n_candidates : int, optional Number of candidate solutions to propose. If not specified, the default value set during initialization is used. acq_func : Callable, default ACQ_FUNCS['LCB'] Acquisition function used to guide the selection of candidate solutions. By default, lower confidence bound (i.e. mean - kappa std where mean and std are surrogate models' predicted results). tip [See the source][_ghs] for how ACQ_FUNCS['LCB'] is implemeted. The passed parameters are open to extension to accommodate alternative acquisition functions. [_ghs]: https: github.com/search?q=repo%3Asambo-optimization%2Fsambo%20ACQ_FUNCS&type=code kappa : float or list[float], default 0 The lower-confidence-bound parameter, used by acq_func , that balances exploration ( 0). Can also be an array of values to use sequentially for n_cadidates . Returns - np.ndarray An array of shape (n_candidates, n_bounds) containing the proposed candidate solutions. Notes - Candidates are proposed in parallel according to n_jobs when n_candidates > 1 . Examples >>> candidates = optimizer.ask(n_candidates=2, kappa=2) >>> candidates array( 1.1, -0.2], [ 0.8, 0.1 )","func":1,"name":"ask","i":4},{"ref":"sambo.Optimizer.POINTS_PER_DIM","url":0,"doc":"In Optimizer.ask() , sample this many points (per dimension) and use the estimator to _predict_ the objective values.","name":"POINTS_PER_DIM","i":5},{"ref":"sambo.Optimizer.MAX_POINTS_PER_ITER","url":0,"doc":"In Optimizer.ask() , sample _at most_ this many points. This increases computation time, but may also improve precision and convergence significantly.","name":"MAX_POINTS_PER_ITER","i":6},{"ref":"sambo.Optimizer.tell","url":0,"doc":"Provide incremental feedback to the optimizer by reporting back the objective function values ( y ) at suggested or new candidate points ( x ). This allows the optimizer to refine its underlying model(s) and better guide subsequent proposals. Parameters y : float or list[float] The observed value(s) of the objective function. x : float or list[float], optional The input point(s) corresponding to the observed objective function values y . If omitted, the optimizer assumes that the y values correspond to the most recent candidates proposed by the ask method (FIFO). warning The function first takes y , then x , not the other way around! Examples >>> candidates = optimizer.ask(n_candidates=3) >>> . Evaluate candidate solutions IRL and tell it to the optimizer >>> objective_values = [1.7, 3, .8] >>> optimizer.tell(y=objective_values, x=candidates)","func":1,"name":"tell","i":7},{"ref":"sambo.Optimizer.run","url":0,"doc":"Execute the optimization process for (at most) a specified number of iterations (function evaluations) and return the optimization result. This method performs sequential optimization by iteratively proposing candidates using method ask() , evaluating the objective function, and updating the optimizer state with method tell() . This continues until the maximum number of iterations ( max_iter ) is reached or other stopping criteria are met. This method encapsulates the entire optimization workflow, making it convenient to use when you don't need fine-grained control over individual steps ( ask and tell ). It cycles between exploration and exploitation by random sampling kappa appropriately. Parameters max_iter : int, optional The maximum number of iterations to perform. If not specified, the default value provided during initialization is used. n_candidates : int, optional Number of candidates to propose and evaluate in each iteration. If not specified, the default value provided during initialization is used. Returns - OptimizeResult: OptimizeResult Results of the optimization process. Examples Run an optimization with a specified number of iterations: >>> result = optimizer.run(max_iter=30) >>> print(result.x, result.fun) Best x, y","func":1,"name":"run","i":8},{"ref":"sambo.Optimizer.top_k","url":0,"doc":"Based on their objective function values, retrieve the top-k best solutions found by the optimization process so far. Parameters k : int, default 1 The number of top solutions to retrieve. If k exceeds the number of evaluated solutions, all available solutions are returned. Returns - X : np.ndarray A list of best points with shape (k, n_bounds) . y : np.ndarray Objective values at points of X . Examples Retrieve the best solution: >>> optimizer.run() >>> best_x, best_y = optimizer.top_k(1)","func":1,"name":"top_k","i":9},{"ref":"sambo.OptimizeResult","url":0,"doc":"Optimization result. Most fields are inherited from scipy.optimize.OptimizeResult , with additional attributes: xv , funv , model .","name":"OptimizeResult","i":10},{"ref":"sambo.OptimizeResult.success","url":0,"doc":"Whether or not the optimizer exited successfully.","name":"success","i":11},{"ref":"sambo.OptimizeResult.message","url":0,"doc":"More detailed cause of optimization termination.","name":"message","i":12},{"ref":"sambo.OptimizeResult.x","url":0,"doc":"The solution of the optimization, shape=(n_features,) .","name":"x","i":13},{"ref":"sambo.OptimizeResult.fun","url":0,"doc":"Value of objective function at x , aka the observed minimum.","name":"fun","i":14},{"ref":"sambo.OptimizeResult.nfev","url":0,"doc":"Number of objective function evaluations.","name":"nfev","i":15},{"ref":"sambo.OptimizeResult.nit","url":0,"doc":"Number of iterations performed by the optimization algorithm.","name":"nit","i":16},{"ref":"sambo.OptimizeResult.xv","url":0,"doc":"All the parameter sets that have been tried, in sequence, shape=(nfev, n_features) .","name":"xv","i":17},{"ref":"sambo.OptimizeResult.funv","url":0,"doc":"Objective function values at points xv .","name":"funv","i":18},{"ref":"sambo.OptimizeResult.model","url":0,"doc":"The optimization model(s) used, if any.","name":"model","i":19},{"ref":"sambo.SamboSearchCV","url":0,"doc":"SAMBO hyper-parameter search with cross-validation that can be used to optimize hyperparameters of machine learning estimator pipelines like those of scikit-learn. Similar to BayesSearchCV from _scikit-optimize_ or GridSearchCV from _scikit-learn_, but hopefully much faster for large parameter spaces . Parameters estimator : BaseEstimator The base model or pipeline to optimize parameters for. It needs to implement fit() and predict() methods. param_grid : dict Dictionary with parameters names (str) as keys and lists of parameter choices to try as values. Supports both continuous parameter ranges and discrete/string parameter enumerations. max_iter : int, optional, default=100 The maximum number of iterations for the optimization. method : {'shgo', 'sceua', 'smbo'}, optional, default='smbo' The optimization algorithm to use. See method sambo.minimize() for comparison. rng : int or np.random.RandomState or np.random.RandomGenerator or None, optional Random seed for reproducibility. kwargs : dict, optional Additional parameters to pass to BaseSearchCV ( scoring= , n_jobs= , refit= cv= , verbose= , pre_dispatch= , error_score= , return_train_score= ). For explanation, see documentation on [ GridSearchCV ][skl_gridsearchcv]. [skl_gridsearchcv]: https: scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html Attributes opt_result_ : OptimizeResult The result of the optimization process. See Also 1: https: scikit-learn.org/stable/modules/grid_search.html","name":"SamboSearchCV","i":20},{"ref":"sambo.plot","url":1,"doc":"The module contains functions for plotting convergence, regret, partial dependence, sequence of evaluations . Example - >>> import matplotlib.pyplot as plt >>> from scipy.optimize import rosen >>> from sambo import minimize >>> result = minimize(rosen, bounds=[(-2, 2), (-2, 2)], . constraints=lambda x: sum(x) >> plot_convergence(result) >>> plot_regret(result) >>> plot_objective(result) >>> plot_evaluations(result) >>> plt.show()","name":"plot","i":21},{"ref":"sambo.plot.plot_convergence","url":1,"doc":"Plot one or several convergence traces, showing how an error estimate evolved during the optimization process. Parameters results : OptimizeResult or tuple[str, OptimizeResult] The result(s) for which to plot the convergence trace. In tuple format, the string is used as the legend label for that result. true_minimum : float, optional The true minimum value of the objective function, if known. xscale, yscale : {'linear', 'log'}, optional, default='linear' The scales for the axes. Returns - fig : matplotlib.figure.Figure The matplotlib figure. Example - image /convergence.svg","func":1,"name":"plot_convergence","i":22},{"ref":"sambo.plot.plot_regret","url":1,"doc":"Plot one or several cumulative [regret] traces. Regret is the difference between achieved objective and its optimum. [regret]: https: en.wikipedia.org/wiki/Regret_(decision_theory) Parameters results : OptimizeResult or tuple[str, OptimizeResult] The result(s) for which to plot the convergence trace. In tuple format, the string is used as the legend label for that result. true_minimum : float, optional The true minimum value of the objective function, if known. If unspecified, minimum is assumed to be the minimum of the values found in results . xscale, yscale : {'linear', 'log'}, optional, default='linear' The scales for the axes. Returns - fig : matplotlib.figure.Figure The matplotlib figure. Example - image /regret.svg","func":1,"name":"plot_regret","i":23},{"ref":"sambo.plot.plot_objective","url":1,"doc":"Plot a 2D matrix of partial dependence plots that show the individual influence of each variable on the objective function. The diagonal plots show the effect of a single dimension on the objective function, while the plots below the diagonal show the effect on the objective function when varying two dimensions. Partial dependence plot shows how the values of any two variables influence estimator predictions after \"averaging out\" the influence of all other variables. Partial dependence is calculated by averaging the objective value for a number of random samples in the search-space, while keeping one or two dimensions fixed at regular intervals. This averages out the effect of varying the other dimensions and shows the influence of just one or two dimensions on the objective function. Black dots indicate the points evaluated during optimization. A red star indicates the best found minimum (or true_minimum , if provided). note Partial dependence plot is only an estimation of the surrogate model which in turn is only an estimation of the true objective function that has been optimized. This means the plots show an \"estimate of an estimate\" and may therefore be quite imprecise, especially if relatively few samples have been collected during the optimization, and especially in regions of the search-space that have been sparsely sampled (e.g. regions far away from the found optimum). Parameters result : OptimizeResult The optimization result. levels : int, default=10 Number of levels to draw on the contour plot, passed directly to plt.contourf() . resolution : int, default=16 Number of points at which to evaluate the partial dependence along each dimension. n_samples : int, default=250 Number of samples to use for averaging the model function at each of the n_points . estimator Last fitted model for estimating the objective function. size : float, default=2 Height (in inches) of each subplot/facet. zscale : {'linear', 'log'}, default='linear' Scale to use for the z axis of the contour plots. names : list of str, default=None Labels of the dimension variables. Defaults to ['x0', 'x1', .] . plot_dims : list of int, default=None List of dimension indices to be included in the plot. Default uses all non-constant dimensions of the search-space. true_minimum : list of floats, default=None Value(s) of the red point(s) in the plots. Default uses best found X parameters from the result. plot_max_points: int, default=200 Plot at most this many randomly-chosen evaluated points overlaying the contour plots. jitter : float, default=.02 Amount of jitter to add to categorical and integer dimensions. Default looks clear for categories of up to about 8 items. cmap: str or Colormap, default='viridis_r' Color map for contour plots, passed directly to plt.contourf() . Returns - fig : matplotlib.figure.Figure A 2D matrix of partial dependence sub-plots. Example - image /objective.svg","func":1,"name":"plot_objective","i":24},{"ref":"sambo.plot.plot_evaluations","url":1,"doc":"Visualize the order in which points were evaluated during optimization. This creates a 2D matrix plot where the diagonal plots are histograms that show distribution of samples for each variable. Plots below the diagonal are scatter-plots of the sample points, with the color indicating the order in which the samples were evaluated. A red star shows the best found parameters. Parameters result : OptimizeResult The optimization result. bins : int, default=10 Number of bins to use for histograms on the diagonal. This value is used for real dimensions, whereas categorical and integer dimensions use number of bins equal to their distinct values. names : list of str, default=None Labels of the dimension variables. Defaults to ['x0', 'x1', .] . plot_dims : list of int, default=None List of dimension indices to be included in the plot. Default uses all non-constant dimensions of the search-space. jitter : float, default=.02 Ratio of jitter to add to scatter plots. Default looks clear for categories of up to about 8 items. size : float, default=2 Height (in inches) of each subplot/facet. cmap: str or Colormap, default='summer' Color map for the sequence of scatter points. todo Figure out how to lay out multiple Figure objects side-by-side. Alternatively, figure out how to take parameter ax= to plot onto. Then we can show a plot of evaluations for each of the built-in methods ( TestDocs.test_make_doc_plots() ). Returns - fig : matplotlib.figure.Figure A 2D matrix of subplots. Example - image /evaluations.svg","func":1,"name":"plot_evaluations","i":25}]]; let URLS=[ +"sambo/index.html", +"sambo/plot.html" +] \ No newline at end of file diff --git a/doc/sambo/index.html b/doc/sambo/index.html new file mode 100644 index 0000000..074625f --- /dev/null +++ b/doc/sambo/index.html @@ -0,0 +1,1882 @@ + + + + + + +Codestin Search App + + + + + + + + + + + + + + +
+
+
+

Package sambo

+
+
+

SAMBO - Sequential and Model-Based Optimization [in Python]

+

Sambo is a global optimization framework for finding approximate global optima† +of arbitrary high-dimensional objective functions in the least number of function evaluations. +Function evaluations are considered the "expensive" resource +(it can sometimes take weeks to obtain results!), +so it's important to find good-enough solutions in +as few steps as possible (whence sequential).

+

The main tools in this Python optimization toolbox are:

+
    +
  • function minimize(), a near drop-in replacement for scipy.optimize.minimize(),
  • +
  • class Optimizer with an ask-and-tell user interface, +supporting arbitrary scikit-learn-like surrogate models, +with Bayesian optimization estimators like Gaussian processes and Extra Trees +built in,
  • +
  • SamboSearchCV, a much faster drop-in replacement for +scikit-learn's GridSearchCV and similar exhaustive +machine-learning hyper-parameter tuning methods, +but compared to unpredictable stochastic methods, informed.
  • +
+

The algorithms and methods implemented by or used in this package are:

+ +

This open-source project was inspired by scikit-optimize. +The project is one of the better optimizers available according to +benchmark.

+

† The contained algorithms seek to minimize your objective f(x). +If you instead need the maximum, simply minimize -f(x). 💡

+
+
+

Sub-modules

+
+
sambo.plot
+
+

The module contains functions for plotting +convergence, regret, partial dependence, sequence of evaluations +…

+
+
+
+
+
+
+

Functions

+
+
+def minimize(fun: Callable[[numpy.ndarray], float],
x0: tuple[float] | list[tuple[float]] | None = None,
*,
args: tuple = (),
bounds: list[tuple] | None = None,
constraints: Callable[[numpy.ndarray], bool] | scipy.optimize._constraints.NonlinearConstraint | None = None,
max_iter: int = 2147483647,
method: Literal['shgo', 'sceua', 'smbo'] = 'shgo',
tol: float = 1e-06,
n_iter_no_change: int | None = None,
y0: float | list[float] | None = None,
callback: Callable[[sambo._util.OptimizeResult], bool] | None = None,
n_jobs: int = 1,
disp: bool = False,
rng: int | numpy.random.mtrand.RandomState | numpy.random._generator.Generator | None = None,
**kwargs)
+
+
+
+ +Expand source code +Browse git + +
def minimize(
+        fun: Callable[[np.ndarray], float],
+        x0: Optional[tuple[float] | list[tuple[float]]] = None,
+        *,
+        args: tuple = (),
+        bounds: Optional[list[tuple]] = None,
+        constraints: Optional[Callable[[np.ndarray], bool] | NonlinearConstraint] = None,
+        max_iter: int = INT32_MAX,
+        method: Literal['shgo', 'sceua', 'smbo'] = 'shgo',
+        tol: float = FLOAT32_PRECISION,
+        # x_tol: float = FLOAT32_PRECISION,
+        n_iter_no_change: Optional[int] = None,
+        y0: Optional[float | list[float]] = None,
+        callback: Optional[Callable[[OptimizeResult], bool]] = None,
+        n_jobs: int = 1,
+        disp: bool = False,
+        rng: Optional[int | np.random.RandomState | np.random.Generator] = None,
+        **kwargs,
+):
+    """
+    Find approximate optimum of an objective function in the
+    least number of evaluations.
+
+    Parameters
+    ----------
+    fun : Callable[[np.ndarray], float], optional
+        Objective function to minimize. Must take a single array-like argument
+        x (parameter combination) and return a scalar y (cost value).
+
+    x0 : tuple or list[tuple], optional
+        Initial guess(es) or starting point(s) for the optimization.
+
+    args : tuple, optional
+        Additional arguments to pass to the objective function and constraints.
+
+    bounds : list[tuple], optional
+        Bounds for parameter variables.
+        Should be a sequence of (min, max) pairs for each dimension,
+        or an enumeration of nominal values. For any dimension,
+        **if `min` and `max` are integers**, the dimension is assumed to be _integral_
+        on interval `[min, max)` (see warning below).
+        If `min` or `max` are **floats**, the dimension is assumed to be _real_.
+        In all other cases including if more than two values are specified,
+        the dimension is assumed to be that ([nominal]) _enumeration_ of values.
+        See _Examples_ below.
+
+        .. note:: Nominals are represented as ordinals
+            Categorical ([nominal]) enumerations, although often not inherently ordered,
+            are internally represented as integral dimensions.
+            If this appears to significantly affect your results
+            (e.g. if your nominals span many cases),
+            you may need to [one-hot encode] your nominal variables manually.
+
+        [nominal]: https://en.wikipedia.org/wiki/Level_of_measurement#Nominal_level
+        [one-hot encode]: https://en.wikipedia.org/wiki/One-hot
+
+        .. warning:: Mind the dot
+            If optimizing your problem fails to produce expected results,
+            make sure you're not specifying integer dimensions where real
+            (floating) values are expected. E.g.:
+
+                bounds = [(-2, 2)] * 2  # A 2D grid of {-2, -1, 0, 1}²
+                bounds = [(-2., 2.)]    # A 1D dimension of ~ np.linspace(-2., 2., 1/eps)
+
+    constraints : Callable[[np.ndarray], bool], optional
+        Function representing constraints.
+        Must return True iff the parameter combination x satisfies the constraints.
+
+            >>> minimize(..., constraints=lambda x: (lb < x <= ub))
+
+    max_iter : int, optional
+        Maximum number of iterations (objective function evaluations) allowed.
+
+    method : {'shgo', 'sceua', 'smbo'}, default='shgo'
+        Global optimization algorithm to use. Options are:
+
+        * `"shgo"` – [simplicial homology global optimization] (SHGO; from SciPy),
+          [assures quick convergence] to global minimum for Lipschitz-smooth functions;
+        * `"smbo"` – [surrogate model-based optimization], for which you can pass
+          your own `estimator=` (see `**kwargs`), robust, but slowest of the bunch;
+        * `"sceua"` – [shuffled complex evolution (SCE-UA)] (with a few tweaks,
+           marked in the source), a good, time-tested all-around algorithm
+           similar to [Nelder-Mead],
+           provided it's initialized with sufficient `n_complexes` and `complex_size`
+           kwargs ([canonical literature] suggests reference values
+           `n_complexes >= 3 * len(bounds)` and
+           `complex_size = 2 * len(bounds) + 1`,
+           but we find good performance using `complex_size=2`,
+           allowing for more complexes and more complex evolutions for given `max_iter`).
+
+        [simplicial homology global optimization]: http://doi.org/10.1007/s10898-018-0645-y
+        [assures quick convergence]: https://shgo.readthedocs.io/en/latest/docs/README.html#simplicial-homology-global-optimisation-theory
+        [surrogate model-based optimization]: https://en.wikipedia.org/wiki/Surrogate_model
+        [shuffled complex evolution (SCE-UA)]: https://doi.org/10.1007/BF00939380
+        [Nelder-Mead]: https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method
+        [canonical literature]: https://doi.org/10.1016/0022-1694(94)90057-4
+
+        .. caution:: Default method SHGO is only appropriate for Lipschitz-smooth functions
+            Smooth functions have gradients that vary gradually, while non-smooth functions
+            exhibit abrupt changes (e.g. neighboring values of categorical variables),
+            sharp corners (e.g. function `abs()`),
+            discontinuities (e.g. function `tan()`),
+            or unbounded growth (e.g. function `exp()`).
+
+            If your objective function is more of the latter kind,
+            you might prefer to set one of the other methods.
+
+    n_iter_no_change : int, optional
+        Number of iterations with no improvement before stopping.
+        Default is method-dependent.
+
+    tol : float, default FLOAT32_PRECISION
+        Tolerance for convergence. Optimization stops when
+        found optimum improvements are below this threshold.
+
+    y0 : float or tuple[float], optional
+        Initial value(s) of the objective function corresponding to `x0`.
+
+    callback : Callable[[OptimizeResult], bool], optional
+        A callback function that is called after each iteration.
+        The optimization stops If the callback returns True or
+        raises `StopIteration`.
+
+    n_jobs : int, default 1
+        Number of objective function evaluations to run in parallel.
+        Most applicate when n_candidates > 1.
+
+    disp : bool, default False
+        Display progress and intermediate results.
+
+    rng : int or np.random.RandomState or np.random.Generator, optional
+        Random number generator or seed for reproducibility.
+
+    **kwargs : dict, optional
+        Additional optional parameters to pass to optimization function.
+        Popular options are:
+
+        * for `method="shgo"`: `n_init` (number of initial points), `sampling_method="halton"`,
+        * for `method="smbo"`: `n_init`, `n_candidates`, `n_models`, `estimator`
+          (for explanation, see class `sambo.Optimizer`),
+        * for `method="sceua"`: `n_complexes`, `complex_size` (as in [SCE-UA] algorithm),
+
+        [SCE-UA]: https://doi.org/10.1007/BF00939380
+
+    Examples
+    --------
+    Basic constrained 10-dimensional example:
+    >>> from scipy.optimize import rosen
+    >>> from sambo import minimize
+    >>> result = minimize(rosen, bounds=[(-2, 2)] * 10,
+    ...                   constraints=lambda x: sum(x) <= len(x))
+    >>> result
+     message: Optimization terminated successfully.
+     success: True
+         fun: 0.0
+           x: [1 1 1 1 1 1 1 1 1 1]
+        nfev: 1036
+          xv: [[-2 -2 ... -2 1]
+               [-2 -2 ... -2 1]
+               ...
+               [1 1 ... 1 1]
+               [1 1 ... 1 1]]
+        funv: [ 1.174e+04  1.535e+04 ...  0.000e+00  0.000e+00]
+
+    A more elaborate example, minimizing an objective function of three variables:
+    one integral, one real, and one nominal variable (see `bounds=`).
+    >>> def demand(x):
+    ...     n_roses, price, advertising_costs = x
+    ...     # Ground truth model: Demand falls with price, but grows if you advertise
+    ...     demand = 20 - 2*price + .1*advertising_costs
+    ...     return n_roses < demand
+    >>> def objective(x):
+    ...     n_roses, price, advertising_costs = x
+    ...     production_costs = 1.5 * n_roses
+    ...     profits = n_roses * price - production_costs - advertising_costs
+    ...     return -profits
+    >>> bounds = [
+    ...     (0, 100),  # From zero to at most roses per day
+    ...     (.5, 9.),  # Price per rose sold
+    ...     (10, 20, 100),  # Advertising budget
+    ... ]
+    >>> from sambo import minimize
+    >>> result = minimize(fun=objective, bounds=bounds, constraints=demand)
+
+    References
+    ----------
+    * Endres, S.C., Sandrock, C. & Focke, W.W. A simplicial homology algorithm for Lipschitz optimisation. J Glob Optim 72, 181–217 (2018). https://doi.org/10.1007/s10898-018-0645-y
+    * Duan, Q.Y., Gupta, V.K. & Sorooshian, S. Shuffled complex evolution approach for effective and efficient global minimization. J Optim Theory Appl 76, 501–521 (1993). https://doi.org/10.1007/BF00939380
+    * Koziel, Slawomir, and Leifur Leifsson. Surrogate-based modeling and optimization. New York: Springer, 2013. https://doi.org/10.1007/978-1-4614-7551-4
+    * Head, T., Kumar, M., Nahrstaedt, H., Louppe, G., & Shcherbatyi, I. (2021). scikit-optimize/scikit-optimize (v0.9.0). Zenodo. https://doi.org/10.5281/zenodo.5565057
+    """  # noqa: E501
+    from sambo._space import Space
+    constraints = _sanitize_constraints(constraints)
+    rng = _check_random_state(rng)
+    bounds, x0, y0 = _check_bounds(bounds, x0, y0, assert_numeric=False)
+    space = Space(bounds, constraints, rng=rng)
+    bounds = tuple(space)
+
+    fun = _Args0TransformingFunc(fun, space.inverse_transform)
+    if constraints is not None:
+        constraints = _Args0TransformingFunc(constraints, space.inverse_transform)
+    if callback is not None:
+        callback = _Args0TransformingFunc(callback, space.inverse_transform_result)
+
+    if method == 'shgo':
+        from sambo._shgo import shgo as minimize_func
+    elif method == 'sceua':
+        from sambo._sceua import sceua as minimize_func
+    elif method == 'smbo':
+        from sambo._smbo import smbo as minimize_func
+    else:
+        assert False, f'Invalid method= parameter: {method!r}. Pls RTFM'
+
+    if n_iter_no_change is not None:
+        # Pass this iff specified b/c algos have different default values
+        kwargs['n_iter_no_change'] = n_iter_no_change
+
+    res = minimize_func(
+        fun, x0, args=args, bounds=bounds, constraints=constraints,
+        max_iter=max_iter, tol=tol, callback=callback, y0=y0,
+        n_jobs=n_jobs, disp=disp, rng=rng, **kwargs
+    )
+    res = space.inverse_transform_result(res)
+    res.space = space
+    return res
+
+

Find approximate optimum of an objective function in the +least number of evaluations.

+

Parameters

+
+
fun : Callable[[np.ndarray], float], optional
+
Objective function to minimize. Must take a single array-like argument +x (parameter combination) and return a scalar y (cost value).
+
x0 : tuple or list[tuple], optional
+
Initial guess(es) or starting point(s) for the optimization.
+
args : tuple, optional
+
Additional arguments to pass to the objective function and constraints.
+
bounds : list[tuple], optional
+
+

Bounds for parameter variables. +Should be a sequence of (min, max) pairs for each dimension, +or an enumeration of nominal values. For any dimension, +if min and max are integers, the dimension is assumed to be integral +on interval [min, max) (see warning below). +If min or max are floats, the dimension is assumed to be real. +In all other cases including if more than two values are specified, +the dimension is assumed to be that (nominal) enumeration of values. +See Examples below.

+
+

Note: Nominals are represented as ordinals

+

Categorical (nominal) enumerations, although often not inherently ordered, +are internally represented as integral dimensions. +If this appears to significantly affect your results +(e.g. if your nominals span many cases), +you may need to one-hot encode your nominal variables manually.

+
+
+

Warning: Mind the dot

+

If optimizing your problem fails to produce expected results, +make sure you're not specifying integer dimensions where real +(floating) values are expected. E.g.:

+
bounds = [(-2, 2)] * 2  # A 2D grid of {-2, -1, 0, 1}²
+bounds = [(-2., 2.)]    # A 1D dimension of ~ np.linspace(-2., 2., 1/eps)
+
+
+
+
constraints : Callable[[np.ndarray], bool], optional
+
Function representing constraints. +Must return True iff the parameter combination x satisfies the constraints.
>>> minimize(..., constraints=lambda x: (lb < x <= ub))
+
+
+
max_iter : int, optional
+
Maximum number of iterations (objective function evaluations) allowed.
+
method : {'shgo', 'sceua', 'smbo'}, default='shgo'
+
+

Global optimization algorithm to use. Options are:

+ +
+

Caution: Default method SHGO is only appropriate for Lipschitz-smooth functions

+

Smooth functions have gradients that vary gradually, while non-smooth functions +exhibit abrupt changes (e.g. neighboring values of categorical variables), +sharp corners (e.g. function abs()), +discontinuities (e.g. function tan()), +or unbounded growth (e.g. function exp()).

+

If your objective function is more of the latter kind, +you might prefer to set one of the other methods.

+
+
+
n_iter_no_change : int, optional
+
Number of iterations with no improvement before stopping. +Default is method-dependent.
+
tol : float, default FLOAT32_PRECISION
+
Tolerance for convergence. Optimization stops when +found optimum improvements are below this threshold.
+
y0 : float or tuple[float], optional
+
Initial value(s) of the objective function corresponding to x0.
+
callback : Callable[[OptimizeResult], bool], optional
+
A callback function that is called after each iteration. +The optimization stops If the callback returns True or +raises StopIteration.
+
n_jobs : int, default 1
+
Number of objective function evaluations to run in parallel. +Most applicate when n_candidates > 1.
+
disp : bool, default False
+
Display progress and intermediate results.
+
rng : int or np.random.RandomState or np.random.Generator, optional
+
Random number generator or seed for reproducibility.
+
**kwargs : dict, optional
+
+

Additional optional parameters to pass to optimization function. +Popular options are:

+
    +
  • for method="shgo": n_init (number of initial points), sampling_method="halton",
  • +
  • for method="smbo": n_init, n_candidates, n_models, estimator +(for explanation, see class Optimizer),
  • +
  • for method="sceua": n_complexes, complex_size (as in SCE-UA algorithm),
  • +
+
+
+

Examples

+

Basic constrained 10-dimensional example:

+
>>> from scipy.optimize import rosen
+>>> from sambo import minimize
+>>> result = minimize(rosen, bounds=[(-2, 2)] * 10,
+...                   constraints=lambda x: sum(x) <= len(x))
+>>> result
+ message: Optimization terminated successfully.
+ success: True
+     fun: 0.0
+       x: [1 1 1 1 1 1 1 1 1 1]
+    nfev: 1036
+      xv: [[-2 -2 ... -2 1]
+           [-2 -2 ... -2 1]
+           ...
+           [1 1 ... 1 1]
+           [1 1 ... 1 1]]
+    funv: [ 1.174e+04  1.535e+04 ...  0.000e+00  0.000e+00]
+
+

A more elaborate example, minimizing an objective function of three variables: +one integral, one real, and one nominal variable (see bounds=).

+
>>> def demand(x):
+...     n_roses, price, advertising_costs = x
+...     # Ground truth model: Demand falls with price, but grows if you advertise
+...     demand = 20 - 2*price + .1*advertising_costs
+...     return n_roses < demand
+>>> def objective(x):
+...     n_roses, price, advertising_costs = x
+...     production_costs = 1.5 * n_roses
+...     profits = n_roses * price - production_costs - advertising_costs
+...     return -profits
+>>> bounds = [
+...     (0, 100),  # From zero to at most roses per day
+...     (.5, 9.),  # Price per rose sold
+...     (10, 20, 100),  # Advertising budget
+... ]
+>>> from sambo import minimize
+>>> result = minimize(fun=objective, bounds=bounds, constraints=demand)
+
+

References

+
+
+
+
+
+

Classes

+
+
+class OptimizeResult +(*args, **kwargs) +
+
+
+ +Expand source code +Browse git + +
class OptimizeResult(_OptimizeResult):
+    """
+    Optimization result. Most fields are inherited from
+    `scipy.optimize.OptimizeResult`, with additional attributes: `xv`, `funv`, `model`.
+    """
+    success: bool  #: Whether or not the optimizer exited successfully.
+    message: str  #: More detailed cause of optimization termination.
+    x: np.ndarray  #: The solution of the optimization, `shape=(n_features,)`.
+    fun: np.ndarray  #: Value of objective function at `x`, aka the observed minimum.
+    nfev: int  #: Number of objective function evaluations.
+    nit: int  #: Number of iterations performed by the optimization algorithm.
+    xv: np.ndarray  #: All the parameter sets that have been tried, in sequence, `shape=(nfev, n_features)`.
+    funv: np.ndarray  #: Objective function values at points `xv`.
+    model: Optional[list[_SklearnLikeRegressor]]  #: The optimization model(s) used, if any.
+
+

Optimization result. Most fields are inherited from +scipy.optimize.OptimizeResult, with additional attributes: xv, funv, model.

+

Ancestors

+
    +
  • scipy.optimize._optimize.OptimizeResult
  • +
  • scipy._lib._util._RichResult
  • +
  • builtins.dict
  • +
+

Class variables

+
+
var fun : numpy.ndarray
+
+

Value of objective function at x, aka the observed minimum.

+
+
var funv : numpy.ndarray
+
+

Objective function values at points xv.

+
+
var message : str
+
+

More detailed cause of optimization termination.

+
+
var model : list[sambo._util._SklearnLikeRegressor] | None
+
+

The optimization model(s) used, if any.

+
+
var nfev : int
+
+

Number of objective function evaluations.

+
+
var nit : int
+
+

Number of iterations performed by the optimization algorithm.

+
+
var success : bool
+
+

Whether or not the optimizer exited successfully.

+
+
var x : numpy.ndarray
+
+

The solution of the optimization, shape=(n_features,).

+
+
var xv : numpy.ndarray
+
+

All the parameter sets that have been tried, in sequence, shape=(nfev, n_features).

+
+
+
+
+class Optimizer +(fun: Callable[[numpy.ndarray], float] | None,
x0: tuple[float] | list[tuple[float]] | None = None,
*,
args: tuple = (),
bounds: list[tuple] | None = None,
constraints: Callable[[numpy.ndarray], bool] | scipy.optimize._constraints.NonlinearConstraint | None = None,
max_iter: int = 2147483647,
n_init: int | None = None,
n_candidates: int | None = None,
n_iter_no_change: int = 5,
n_models: int = 1,
tol: float = 1e-06,
estimator: Literal['gp', 'et', 'gb'] | sambo._util._SklearnLikeRegressor = None,
y0: float | list[float] | None = None,
callback: Callable[[sambo._util.OptimizeResult], bool] | None = None,
n_jobs: int = 1,
disp: bool = False,
rng: int | numpy.random.mtrand.RandomState | numpy.random._generator.Generator | None = None)
+
+
+
+ +Expand source code +Browse git + +
class Optimizer:
+    """
+    A sequential optimizer that optimizes an objective function using a surrogate model.
+
+    Parameters
+    ----------
+    fun : Callable[[np.ndarray], float], optional
+        Objective function to minimize. Must take a single array-like argument
+        x (parameter combination) and return a scalar y (cost value).
+
+        When unspecified, the Optimizer can be used iteratively in an ask-tell
+        fashion using the methods named respectively.
+
+    x0 : tuple | list[tuple], optional
+        Initial guess(es) or starting point(s) for the optimization.
+
+    args : tuple, optional
+        Additional arguments to pass to the objective function and constraints.
+
+    bounds : list[tuple], optional
+        Bounds for the decision variables. A sequence of (min, max) pairs for each dimension.
+
+    constraints : Callable[[np.ndarray], bool], optional
+        Function representing constraints.
+        Must return True iff the parameter combination x satisfies the constraints.
+
+    max_iter : int, optional
+        Maximum number of iterations allowed.
+
+    n_init : int, optional
+        Number of initial evaluations of the objective function before
+        first fitting the surrogate model.
+
+    n_candidates : int, optional
+        Number of candidate solutions generated per iteration.
+
+    n_iter_no_change : int, default 10
+        Number of iterations with no improvement before stopping.
+
+    n_models : int, default 1
+        Number of most-recently-generated surrogate models to use for
+        next best-point prediction. Useful for small and
+        randomized estimators such as `"et"` with no fixed `rng=`.
+
+    tol : float, default FLOAT32_PRECISION
+        Tolerance for convergence. Optimization stops when
+        found optimum improvements are below this threshold.
+
+    estimator : {'gp', 'et', 'gb'} or scikit-learn-like regressor, default='gp'
+        Surrogate model for the optimizer.
+        Popular options include "gp" (Gaussian process), "et" (extra trees),
+        or "gb" (gradient boosting).
+
+        You can also provide your own regressor with a scikit-learn API,
+        (namely `fit()` and `predict()` methods).
+
+    y0 : float or tuple[float], optional
+        Initial value(s) of the objective function corresponding to `x0`.
+
+    callback : Callable[[OptimizeResult], bool], optional
+        A callback function that is called after each iteration.
+        The optimization stops If the callback returns True or
+        raises `StopIteration`.
+
+    n_jobs : int, default 1
+        Number of objective function evaluations to run in parallel.
+        Most applicate when n_candidates > 1.
+
+    disp : bool, default False
+        Display progress and intermediate results.
+
+    rng : int or np.random.RandomState or np.random.Generator, optional
+        Random number generator or seed for reproducibility.
+
+    Examples
+    --------
+    >>> from sambo import Optimizer
+    >>> def objective_func(x):
+    ...     return sum(x**2)
+    >>> optimizer = Optimizer(fun=objective_func, bounds=[(-5, 5), (-5, 5)])
+    >>> result = optimizer.run()
+
+    Using the ask-tell interface:
+    >>> optimizer = Optimizer(fun=None, bounds=[(-5, 5), (-5, 5)])
+    >>> suggested_x = optimizer.ask()
+    >>> y = [objective_func(x) for x in suggested_x]
+    >>> optimizer.tell(y, suggested_x)
+    """
+    def __init__(
+            self,
+            fun: Optional[Callable[[np.ndarray], float]],
+            x0: Optional[tuple[float] | list[tuple[float]]] = None,
+            *,
+            args: tuple = (),
+            bounds: Optional[list[tuple]] = None,
+            constraints: Optional[Callable[[np.ndarray], bool] | NonlinearConstraint] = None,
+            max_iter: int = INT32_MAX,
+            n_init: Optional[int] = None,
+            n_candidates: Optional[int] = None,
+            n_iter_no_change: int = 5,
+            n_models: int = 1,
+            tol: float = FLOAT32_PRECISION,
+            estimator: Literal['gp', 'et', 'gb'] | _SklearnLikeRegressor = None,
+            y0: Optional[float | list[float]] = None,
+            callback: Optional[Callable[[OptimizeResult], bool]] = None,
+            n_jobs: int = 1,
+            disp: bool = False,
+            rng: Optional[int | np.random.RandomState | np.random.Generator] = None,
+    ):
+        assert fun is None or callable(fun), fun
+        assert x0 is not None or bounds is not None, "Either x0= or bounds= must be provided"
+        constraints = _sanitize_constraints(constraints)
+        assert constraints is None or callable(constraints), constraints
+        assert isinstance(max_iter, Integral) and max_iter > 0, max_iter
+        assert isinstance(tol, Real) and 0 <= tol, tol
+        assert isinstance(n_iter_no_change, int) and n_iter_no_change > 0, n_iter_no_change
+        assert callback is None or callable(callback), callback
+        assert isinstance(n_jobs, Integral) and n_jobs != 0, n_jobs
+        assert isinstance(rng, (Integral, np.random.RandomState, np.random.Generator, type(None))), rng
+
+        assert n_init is None or isinstance(n_init, Integral) and n_init >= 0, n_init
+        assert n_candidates is None or isinstance(n_candidates, Integral) and n_candidates > 0, n_candidates
+        assert estimator is None or isinstance(estimator, (str, _SklearnLikeRegressor)), estimator
+        assert isinstance(n_models, Integral) and n_models > 0, n_models
+
+        bounds, x0, y0 = _check_bounds(bounds, x0, y0)
+        rng = _check_random_state(rng)
+
+        if n_init is None:
+            n_init = (0 if not callable(fun) else
+                      min(max(1, max_iter - 20),
+                          int(40 * len(bounds) * max(1, np.log2(len(bounds))))))
+        assert max_iter >= n_init, (max_iter, n_init)
+
+        if n_candidates is None:
+            n_candidates = max(1, int(np.log10(len(bounds))))
+
+        if estimator is None or isinstance(estimator, str):
+            from sambo._estimators import _estimator_factory
+
+            estimator = _estimator_factory(estimator, bounds, rng)
+        assert isinstance(estimator, _SklearnLikeRegressor), estimator
+
+        # Objective function can be None for the real-life function trials using ask-tell API
+        fun = None if fun is None else _ParallelFuncWrapper(
+            _ObjectiveFunctionWrapper(
+                func=fun,
+                max_nfev=max_iter,
+                callback=callback,
+                args=()),
+            n_jobs, args,
+        )
+
+        self.fun = fun
+        self.x0 = x0
+        self.y0 = y0
+        self.bounds = bounds
+        self.constraints = constraints
+        self.max_iter = max_iter
+        self.n_init = n_init
+        self.n_candidates = n_candidates
+        self.n_iter_no_change = n_iter_no_change
+        self.tol = tol
+        self.estimator = estimator
+        self.estimators = []
+        self.n_models = n_models
+        self.callback = callback
+        self.n_jobs = n_jobs
+        self.disp = disp
+        self.rng = rng
+
+        X, y = [], []
+        if y0 is not None:
+            y0 = np.atleast_1d(y0)
+            assert x0 is not None and len(x0) == len(y0), (x0, y0)
+            x0 = np.atleast_2d(x0)
+            assert len(x0) == len(y0), (x0, y0)
+            X, y = list(x0), list(y0)
+
+        self._X_ask = []
+        # Known points
+        self._X = X
+        self._y = y
+        assert len(X) == len(y), (X, y)
+
+        self._kde = None
+        self._prev_y_min = np.inf
+
+        # Cache methods on the _instance_
+        self._init_once = lru_cache(1)(self._init_once)
+        self.top_k = lru_cache(1)(self.top_k)
+
+    def _init_once(self):
+        assert not self.n_init or callable(self.fun), (self.n_init, self.fun)
+        if not self.n_init:
+            return
+        x0, n_init = self.x0, self.n_init
+        if self.y0 is not None:
+            # x0, y0 already added to _X, _Y in __init__
+            x0, n_init = None, max(0, self.n_init - len(self.x0))
+        if n_init:
+            X = _initialize_population(self.bounds, n_init, self.constraints, x0, self.rng)
+            y = self.fun(X)
+            self._X.extend(X)
+            self._y.extend(y)
+        self._fit()
+
+    def _fit(self):
+        from sklearn import clone
+
+        estimator = self.estimator
+        if self.n_models > 1 and hasattr(estimator, 'random_state'):
+            estimator = clone(self.estimator)
+            estimator.random_state = self.rng.randint(10000000)
+        estimator.fit(self._X, self._y)
+
+        self.estimators.append(estimator)
+        if len(self.estimators) > self.n_models:
+            self.estimators.pop(0)
+
+        self.top_k.cache_clear()
+
+    def _predict(self, X):
+        means, stds, masks = [], [], []
+        for estimator in self.estimators:
+            X_batched = [X[i:i + 10_000] for i in range(0, len(X), 10_000)]
+            try:
+                mean, std = np.concatenate(
+                    [estimator.predict(X, return_std=True) for X in X_batched], axis=1)
+            except TypeError as exc:
+                if 'return_std' not in exc.args[0]:
+                    raise
+                mean, std = np.concatenate([estimator.predict(X) for X in X_batched]), 0
+                mask = np.ones_like(mean, dtype=bool)
+            else:
+                # Only suggest new/unknown points
+                mask = std != 0
+
+            means.append(mean)
+            stds.append(std)
+            masks.append(mask)
+
+        mask = np.any(masks, axis=0)
+        mean = np.mean(means, axis=0)
+        std = np.mean(stds, axis=0)
+
+        if mask.any() and not mask.all():
+            X, mean, std = X[mask], mean[mask], std[mask]
+
+        return X, mean, std
+
+    #: Acquisition functions for selecting the best candidates from the sample.
+    #: Currently defined keys:
+    #: `"LCB"` — **lower confidence bound** (an inverse analog of "UCB")
+    #: which orders candidates by `mean - kappa * std`.
+    #: [//]: # (No blank line here! bug in pdoc)
+    #: .. note::
+    #:      To make any use of the `kappa` parameter, it is important for the
+    #:      estimator's `predict()` method to implement `return_std=` behavior.
+    #:      All built-in estimators (`"gp"`, `"et"`, `"gb"`) do so.
+    ACQ_FUNCS: dict = {
+        'LCB': _LCB,
+    }
+
+    def ask(
+            self,
+            n_candidates: Optional[int] = None,
+            *,
+            acq_func: Optional[Callable] = ACQ_FUNCS['LCB'],
+            kappa: float | list[float] = 0,
+    ) -> np.ndarray:
+        """
+        Propose candidate solutions for the next objective evaluation based on
+        the current surrogate model(s) and acquisition function.
+
+        Parameters
+        ----------
+        n_candidates : int, optional
+            Number of candidate solutions to propose.
+            If not specified, the default value set during initialization is used.
+
+        acq_func : Callable, default ACQ_FUNCS['LCB']
+            Acquisition function used to guide the selection of candidate solutions.
+            By default, lower confidence bound (i.e. `mean - kappa * std` where `mean`
+            and `std` are surrogate models' predicted results).
+
+            .. tip::
+                [See the source][_ghs] for how `ACQ_FUNCS['LCB']` is implemeted.
+                The passed parameters are open to extension to accommodate
+                alternative acquisition functions.
+
+                [_ghs]: https://github.com/search?q=repo%3Asambo-optimization%2Fsambo%20ACQ_FUNCS&type=code
+
+        kappa : float or list[float], default 0
+            The lower-confidence-bound parameter, used by `acq_func`, that
+            balances exploration (<0) vs exploitation (>0).
+
+            Can also be an array of values to use sequentially for `n_cadidates`.
+
+        Returns
+        -------
+        np.ndarray
+            An array of shape `(n_candidates, n_bounds)` containing the proposed
+            candidate solutions.
+
+        Notes
+        -----
+        Candidates are proposed in parallel according to `n_jobs` when `n_candidates > 1`.
+
+        Examples
+        --------
+        >>> candidates = optimizer.ask(n_candidates=2, kappa=2)
+        >>> candidates
+        array([[ 1.1, -0.2],
+               [ 0.8,  0.1]])
+        """
+        if n_candidates is None:
+            n_candidates = self.n_candidates
+        assert isinstance(n_candidates, Integral) and n_candidates > 0, n_candidates
+        assert isinstance(kappa, (Real, Iterable)), kappa
+        self._init_once()
+
+        n_points = min(self.MAX_POINTS_PER_ITER,
+                       self.POINTS_PER_DIM * int(len(self.bounds)**2))  # TODO: Make this a param?
+        nfev = len(self._X)
+        if nfev < 10 * len(self.bounds)**2:
+            X = _sample_population(self.bounds, n_points, self.constraints, self.rng)
+        else:
+            y_min = np.min(self._y)
+            if self._kde is None or (nfev < 200 or nfev % 5 == 0 or y_min < self._prev_y_min):
+                self._prev_y_min = y_min
+                self._kde = recompute_kde(np.array(self._X), np.array(self._y))
+            X = weighted_uniform_sampling(
+                self._kde, self.bounds, n_points, self.constraints, self.rng)
+
+        X, mean, std = self._predict(X)
+        criterion = acq_func(mean=mean, std=std, kappa=kappa)
+        n_candidates = min(n_candidates, criterion.shape[1])
+        best_indices = np.take_along_axis(
+            partitioned_inds := np.argpartition(criterion, n_candidates - 1)[:, :n_candidates],
+            np.argsort(np.take_along_axis(criterion, partitioned_inds, axis=1)),
+            axis=1).flatten('F')
+        X = X[best_indices]
+        X = X[:n_candidates]
+        self._X_ask.extend(map(tuple, X))
+        return X
+
+    #: In `Optimizer.ask()`, sample this many points (per dimension) and
+    #: use the estimator to _predict_ the objective values.
+    POINTS_PER_DIM = 20_000
+
+    #: In `Optimizer.ask()`, sample _at most_ this many points. This increases
+    #: computation time, but may also improve precision and convergence significantly.
+    MAX_POINTS_PER_ITER = 80_000
+
+    def tell(self, y: float | list[float],
+             x: Optional[float | tuple[float] | list[tuple[float]]] = None):
+        """
+        Provide incremental feedback to the optimizer by reporting back the objective
+        function values (`y`) at suggested or new candidate points (`x`).
+
+        This allows the optimizer to refine its underlying model(s) and better
+        guide subsequent proposals.
+
+        Parameters
+        ----------
+        y : float or list[float]
+            The observed value(s) of the objective function.
+
+        x : float or list[float], optional
+            The input point(s) corresponding to the observed objective function values `y`.
+            If omitted, the optimizer assumes that the `y` values correspond
+            to the most recent candidates proposed by the `ask` method (FIFO).
+
+            .. warning::
+                The function first takes `y`, then `x`, not the other way around!
+
+        Examples
+        --------
+        >>> candidates = optimizer.ask(n_candidates=3)
+        >>> ... # Evaluate candidate solutions IRL and tell it to the optimizer
+        >>> objective_values = [1.7, 3, .8]
+        >>> optimizer.tell(y=objective_values, x=candidates)
+        """
+        y = np.atleast_1d(y)
+        assert y.ndim == 1, 'y= should be at most 1-dimensional'
+        if x is None:
+            if not self._X_ask:
+                raise RuntimeError(
+                    f'`{self.tell.__qualname__}(y, x=None)` only allowed as many '
+                    f'times as `{self.ask.__qualname__}()` was called beforehand')
+            for x, yval in zip(tuple(self._X_ask), y):
+                self._X_ask.pop(0)
+                self._X.append(x)
+                self._y.append(yval)
+        else:
+            x = np.atleast_2d(x)
+            assert len(x) == len(y), 'y= and x= (if provided) must contain the same number of items'
+            for xi, yi in zip(x, y):
+                try:
+                    self._X_ask.pop(self._X_ask.index(tuple(xi)))
+                except (ValueError, IndexError):
+                    pass
+                self._X.append(xi)
+                self._y.append(yi)
+        self._fit()
+
+    def run(self, *,
+            max_iter: Optional[int] = None,
+            n_candidates: Optional[int] = None) -> OptimizeResult:
+        """
+        Execute the optimization process for (at most) a specified number of iterations
+        (function evaluations) and return the optimization result.
+
+        This method performs sequential optimization by iteratively proposing candidates using
+        method `ask()`, evaluating the objective function, and updating the optimizer state
+        with method `tell()`.
+        This continues until the maximum number of iterations (`max_iter`) is reached or other
+        stopping criteria are met.
+
+        This method encapsulates the entire optimization workflow, making it convenient
+        to use when you don't need fine-grained control over individual steps (`ask` and `tell`).
+        It cycles between exploration and exploitation by random sampling `kappa` appropriately.
+
+        Parameters
+        ----------
+        max_iter : int, optional
+            The maximum number of iterations to perform. If not specified, the
+            default value provided during initialization is used.
+
+        n_candidates : int, optional
+            Number of candidates to propose and evaluate in each iteration. If not specified,
+            the default value provided during initialization is used.
+
+        Returns
+        -------
+        OptimizeResult: OptimizeResult
+            Results of the optimization process.
+
+        Examples
+        --------
+        Run an optimization with a specified number of iterations:
+        >>> result = optimizer.run(max_iter=30)
+        >>> print(result.x, result.fun)  # Best x, y
+        """
+        max_iter = max_iter if max_iter is not None else 0 if self.fun is None else self.max_iter
+        assert callable(self.fun) or max_iter == 0, "Can't run optimizer when fun==None. Can only use ask-tell API."
+        assert n_candidates is None or isinstance(n_candidates, Integral) and n_candidates > 0, n_candidates
+        assert max_iter is None or isinstance(max_iter, Integral) and max_iter >= 0, max_iter
+
+        n_candidates = n_candidates or self.n_candidates
+        success = True
+        message = "Optimization hadn't been started"
+        iteration = 0
+        prev_best_value = np.inf
+        no_change = 0
+        try:
+            for iteration in range(1, max_iter + 1):
+                coefs = [self.rng.uniform(-2, 2) for i in range(n_candidates)]
+                X = self.ask(n_candidates, kappa=coefs)
+                y = self.fun(X)
+                self.tell(y)
+
+                best_value = min(self._y)
+                if self.tol and prev_best_value - best_value < self.tol or prev_best_value == best_value:
+                    no_change += 1
+                    if no_change == self.n_iter_no_change:
+                        message = 'Optimization converged (y_prev[n_iter_no_change] - y_best < tol)'
+                        break
+                else:
+                    assert best_value < prev_best_value
+                    no_change = 0
+                    prev_best_value = best_value
+
+                if self.disp:
+                    print(f"{__package__}: {self.estimator.__class__.__name__} "
+                          f"nit:{iteration}, nfev:{self.fun.func.nfev}, "
+                          f"fun:{np.min(self._y):.5g}")
+        except _ObjectiveFunctionWrapper.CallbackStopIteration:
+            message = 'Optimization callback returned True'
+        except _ObjectiveFunctionWrapper.MaximumFunctionEvaluationsReached:
+            message = f'Maximum function evaluations reached (max_iter = {max_iter})'
+            success = False
+        except KeyboardInterrupt:
+            message = 'KeyboardInterrupt'
+            success = False
+
+        if len(self._X) == 0 and self.fun is not None:
+            # We were interrupted before ._init_once() could finish
+            self._X = self.fun.func.xv
+            self._y = self.fun.func.funv
+
+        x, y = self.top_k(1)
+        result = OptimizeResult(
+            success=success,
+            message=message,
+            x=x,
+            fun=y,
+            nit=iteration,
+            nfev=len(self._y) - (len(self.y0) if self.y0 is not None else 0),
+            xv=np.array(self._X),
+            funv=np.array(self._y),
+            model=list(self.estimators),
+        )
+        return result
+
+    def top_k(self, k: int = 1):
+        """
+        Based on their objective function values,
+        retrieve the top-k best solutions found by the optimization process so far.
+
+        Parameters
+        ----------
+        k : int, default 1
+            The number of top solutions to retrieve.
+            If `k` exceeds the number of evaluated solutions,
+            all available solutions are returned.
+
+        Returns
+        -------
+        X : np.ndarray
+            A list of best points with shape `(k, n_bounds)`.
+        y : np.ndarray
+            Objective values at points of `X`.
+
+        Examples
+        --------
+        Retrieve the best solution:
+        >>> optimizer.run()
+        >>> best_x, best_y = optimizer.top_k(1)
+        """
+        assert isinstance(k, Integral) and k > 0, k
+        best_index = np.argsort(self._y)
+        index = slice(0, k) if k > 1 else (k - 1)
+        return self._X[best_index[index]], self._y[best_index[index]]
+
+

A sequential optimizer that optimizes an objective function using a surrogate model.

+

Parameters

+
+
fun : Callable[[np.ndarray], float], optional
+
+

Objective function to minimize. Must take a single array-like argument +x (parameter combination) and return a scalar y (cost value).

+

When unspecified, the Optimizer can be used iteratively in an ask-tell +fashion using the methods named respectively.

+
+
x0 : tuple | list[tuple], optional
+
Initial guess(es) or starting point(s) for the optimization.
+
args : tuple, optional
+
Additional arguments to pass to the objective function and constraints.
+
bounds : list[tuple], optional
+
Bounds for the decision variables. A sequence of (min, max) pairs for each dimension.
+
constraints : Callable[[np.ndarray], bool], optional
+
Function representing constraints. +Must return True iff the parameter combination x satisfies the constraints.
+
max_iter : int, optional
+
Maximum number of iterations allowed.
+
n_init : int, optional
+
Number of initial evaluations of the objective function before +first fitting the surrogate model.
+
n_candidates : int, optional
+
Number of candidate solutions generated per iteration.
+
n_iter_no_change : int, default 10
+
Number of iterations with no improvement before stopping.
+
n_models : int, default 1
+
Number of most-recently-generated surrogate models to use for +next best-point prediction. Useful for small and +randomized estimators such as "et" with no fixed rng=.
+
tol : float, default FLOAT32_PRECISION
+
Tolerance for convergence. Optimization stops when +found optimum improvements are below this threshold.
+
estimator : {'gp', 'et', 'gb'} or scikit-learn-like regressor, default='gp'
+
+

Surrogate model for the optimizer. +Popular options include "gp" (Gaussian process), "et" (extra trees), +or "gb" (gradient boosting).

+

You can also provide your own regressor with a scikit-learn API, +(namely fit() and predict() methods).

+
+
y0 : float or tuple[float], optional
+
Initial value(s) of the objective function corresponding to x0.
+
callback : Callable[[OptimizeResult], bool], optional
+
A callback function that is called after each iteration. +The optimization stops If the callback returns True or +raises StopIteration.
+
n_jobs : int, default 1
+
Number of objective function evaluations to run in parallel. +Most applicate when n_candidates > 1.
+
disp : bool, default False
+
Display progress and intermediate results.
+
rng : int or np.random.RandomState or np.random.Generator, optional
+
Random number generator or seed for reproducibility.
+
+

Examples

+
>>> from sambo import Optimizer
+>>> def objective_func(x):
+...     return sum(x**2)
+>>> optimizer = Optimizer(fun=objective_func, bounds=[(-5, 5), (-5, 5)])
+>>> result = optimizer.run()
+
+

Using the ask-tell interface:

+
>>> optimizer = Optimizer(fun=None, bounds=[(-5, 5), (-5, 5)])
+>>> suggested_x = optimizer.ask()
+>>> y = [objective_func(x) for x in suggested_x]
+>>> optimizer.tell(y, suggested_x)
+
+

Class variables

+
+
var ACQ_FUNCS : dict
+
+

Acquisition functions for selecting the best candidates from the sample. +Currently defined keys: +"LCB"lower confidence bound (an inverse analog of "UCB") +which orders candidates by mean - kappa * std.

+
+

Note

+

To make any use of the kappa parameter, it is important for the +estimator's predict() method to implement return_std= behavior. +All built-in estimators ("gp", "et", "gb") do so.

+
+
+
var MAX_POINTS_PER_ITER
+
+

In Optimizer.ask(), sample at most this many points. This increases +computation time, but may also improve precision and convergence significantly.

+
+
var POINTS_PER_DIM
+
+

In Optimizer.ask(), sample this many points (per dimension) and +use the estimator to predict the objective values.

+
+
+

Methods

+
+
+def ask(self,
n_candidates: int | None = None,
*,
acq_func: Callable | None = <function _LCB>,
kappa: float | list[float] = 0) ‑> numpy.ndarray
+
+
+
+ +Expand source code +Browse git + +
def ask(
+        self,
+        n_candidates: Optional[int] = None,
+        *,
+        acq_func: Optional[Callable] = ACQ_FUNCS['LCB'],
+        kappa: float | list[float] = 0,
+) -> np.ndarray:
+    """
+    Propose candidate solutions for the next objective evaluation based on
+    the current surrogate model(s) and acquisition function.
+
+    Parameters
+    ----------
+    n_candidates : int, optional
+        Number of candidate solutions to propose.
+        If not specified, the default value set during initialization is used.
+
+    acq_func : Callable, default ACQ_FUNCS['LCB']
+        Acquisition function used to guide the selection of candidate solutions.
+        By default, lower confidence bound (i.e. `mean - kappa * std` where `mean`
+        and `std` are surrogate models' predicted results).
+
+        .. tip::
+            [See the source][_ghs] for how `ACQ_FUNCS['LCB']` is implemeted.
+            The passed parameters are open to extension to accommodate
+            alternative acquisition functions.
+
+            [_ghs]: https://github.com/search?q=repo%3Asambo-optimization%2Fsambo%20ACQ_FUNCS&type=code
+
+    kappa : float or list[float], default 0
+        The lower-confidence-bound parameter, used by `acq_func`, that
+        balances exploration (<0) vs exploitation (>0).
+
+        Can also be an array of values to use sequentially for `n_cadidates`.
+
+    Returns
+    -------
+    np.ndarray
+        An array of shape `(n_candidates, n_bounds)` containing the proposed
+        candidate solutions.
+
+    Notes
+    -----
+    Candidates are proposed in parallel according to `n_jobs` when `n_candidates > 1`.
+
+    Examples
+    --------
+    >>> candidates = optimizer.ask(n_candidates=2, kappa=2)
+    >>> candidates
+    array([[ 1.1, -0.2],
+           [ 0.8,  0.1]])
+    """
+    if n_candidates is None:
+        n_candidates = self.n_candidates
+    assert isinstance(n_candidates, Integral) and n_candidates > 0, n_candidates
+    assert isinstance(kappa, (Real, Iterable)), kappa
+    self._init_once()
+
+    n_points = min(self.MAX_POINTS_PER_ITER,
+                   self.POINTS_PER_DIM * int(len(self.bounds)**2))  # TODO: Make this a param?
+    nfev = len(self._X)
+    if nfev < 10 * len(self.bounds)**2:
+        X = _sample_population(self.bounds, n_points, self.constraints, self.rng)
+    else:
+        y_min = np.min(self._y)
+        if self._kde is None or (nfev < 200 or nfev % 5 == 0 or y_min < self._prev_y_min):
+            self._prev_y_min = y_min
+            self._kde = recompute_kde(np.array(self._X), np.array(self._y))
+        X = weighted_uniform_sampling(
+            self._kde, self.bounds, n_points, self.constraints, self.rng)
+
+    X, mean, std = self._predict(X)
+    criterion = acq_func(mean=mean, std=std, kappa=kappa)
+    n_candidates = min(n_candidates, criterion.shape[1])
+    best_indices = np.take_along_axis(
+        partitioned_inds := np.argpartition(criterion, n_candidates - 1)[:, :n_candidates],
+        np.argsort(np.take_along_axis(criterion, partitioned_inds, axis=1)),
+        axis=1).flatten('F')
+    X = X[best_indices]
+    X = X[:n_candidates]
+    self._X_ask.extend(map(tuple, X))
+    return X
+
+

Propose candidate solutions for the next objective evaluation based on +the current surrogate model(s) and acquisition function.

+

Parameters

+
+
n_candidates : int, optional
+
Number of candidate solutions to propose. +If not specified, the default value set during initialization is used.
+
acq_func : Callable, default ACQ_FUNCS['LCB']
+
+

Acquisition function used to guide the selection of candidate solutions. +By default, lower confidence bound (i.e. mean - kappa * std where mean +and std are surrogate models' predicted results).

+
+

Tip

+

See the source for how ACQ_FUNCS['LCB'] is implemeted. +The passed parameters are open to extension to accommodate +alternative acquisition functions.

+
+
+
kappa : float or list[float], default 0
+
+

The lower-confidence-bound parameter, used by acq_func, that +balances exploration (<0) vs exploitation (>0).

+

Can also be an array of values to use sequentially for n_cadidates.

+
+
+

Returns

+
+
np.ndarray
+
An array of shape (n_candidates, n_bounds) containing the proposed +candidate solutions.
+
+

Notes

+

Candidates are proposed in parallel according to n_jobs when n_candidates > 1.

+

Examples

+
>>> candidates = optimizer.ask(n_candidates=2, kappa=2)
+>>> candidates
+array([[ 1.1, -0.2],
+       [ 0.8,  0.1]])
+
+
+
+def run(self, *, max_iter: int | None = None, n_candidates: int | None = None) ‑> sambo._util.OptimizeResult +
+
+
+ +Expand source code +Browse git + +
def run(self, *,
+        max_iter: Optional[int] = None,
+        n_candidates: Optional[int] = None) -> OptimizeResult:
+    """
+    Execute the optimization process for (at most) a specified number of iterations
+    (function evaluations) and return the optimization result.
+
+    This method performs sequential optimization by iteratively proposing candidates using
+    method `ask()`, evaluating the objective function, and updating the optimizer state
+    with method `tell()`.
+    This continues until the maximum number of iterations (`max_iter`) is reached or other
+    stopping criteria are met.
+
+    This method encapsulates the entire optimization workflow, making it convenient
+    to use when you don't need fine-grained control over individual steps (`ask` and `tell`).
+    It cycles between exploration and exploitation by random sampling `kappa` appropriately.
+
+    Parameters
+    ----------
+    max_iter : int, optional
+        The maximum number of iterations to perform. If not specified, the
+        default value provided during initialization is used.
+
+    n_candidates : int, optional
+        Number of candidates to propose and evaluate in each iteration. If not specified,
+        the default value provided during initialization is used.
+
+    Returns
+    -------
+    OptimizeResult: OptimizeResult
+        Results of the optimization process.
+
+    Examples
+    --------
+    Run an optimization with a specified number of iterations:
+    >>> result = optimizer.run(max_iter=30)
+    >>> print(result.x, result.fun)  # Best x, y
+    """
+    max_iter = max_iter if max_iter is not None else 0 if self.fun is None else self.max_iter
+    assert callable(self.fun) or max_iter == 0, "Can't run optimizer when fun==None. Can only use ask-tell API."
+    assert n_candidates is None or isinstance(n_candidates, Integral) and n_candidates > 0, n_candidates
+    assert max_iter is None or isinstance(max_iter, Integral) and max_iter >= 0, max_iter
+
+    n_candidates = n_candidates or self.n_candidates
+    success = True
+    message = "Optimization hadn't been started"
+    iteration = 0
+    prev_best_value = np.inf
+    no_change = 0
+    try:
+        for iteration in range(1, max_iter + 1):
+            coefs = [self.rng.uniform(-2, 2) for i in range(n_candidates)]
+            X = self.ask(n_candidates, kappa=coefs)
+            y = self.fun(X)
+            self.tell(y)
+
+            best_value = min(self._y)
+            if self.tol and prev_best_value - best_value < self.tol or prev_best_value == best_value:
+                no_change += 1
+                if no_change == self.n_iter_no_change:
+                    message = 'Optimization converged (y_prev[n_iter_no_change] - y_best < tol)'
+                    break
+            else:
+                assert best_value < prev_best_value
+                no_change = 0
+                prev_best_value = best_value
+
+            if self.disp:
+                print(f"{__package__}: {self.estimator.__class__.__name__} "
+                      f"nit:{iteration}, nfev:{self.fun.func.nfev}, "
+                      f"fun:{np.min(self._y):.5g}")
+    except _ObjectiveFunctionWrapper.CallbackStopIteration:
+        message = 'Optimization callback returned True'
+    except _ObjectiveFunctionWrapper.MaximumFunctionEvaluationsReached:
+        message = f'Maximum function evaluations reached (max_iter = {max_iter})'
+        success = False
+    except KeyboardInterrupt:
+        message = 'KeyboardInterrupt'
+        success = False
+
+    if len(self._X) == 0 and self.fun is not None:
+        # We were interrupted before ._init_once() could finish
+        self._X = self.fun.func.xv
+        self._y = self.fun.func.funv
+
+    x, y = self.top_k(1)
+    result = OptimizeResult(
+        success=success,
+        message=message,
+        x=x,
+        fun=y,
+        nit=iteration,
+        nfev=len(self._y) - (len(self.y0) if self.y0 is not None else 0),
+        xv=np.array(self._X),
+        funv=np.array(self._y),
+        model=list(self.estimators),
+    )
+    return result
+
+

Execute the optimization process for (at most) a specified number of iterations +(function evaluations) and return the optimization result.

+

This method performs sequential optimization by iteratively proposing candidates using +method ask(), evaluating the objective function, and updating the optimizer state +with method tell(). +This continues until the maximum number of iterations (max_iter) is reached or other +stopping criteria are met.

+

This method encapsulates the entire optimization workflow, making it convenient +to use when you don't need fine-grained control over individual steps (ask and tell). +It cycles between exploration and exploitation by random sampling kappa appropriately.

+

Parameters

+
+
max_iter : int, optional
+
The maximum number of iterations to perform. If not specified, the +default value provided during initialization is used.
+
n_candidates : int, optional
+
Number of candidates to propose and evaluate in each iteration. If not specified, +the default value provided during initialization is used.
+
+

Returns

+
+
OptimizeResult : OptimizeResult
+
Results of the optimization process.
+
+

Examples

+

Run an optimization with a specified number of iterations:

+
>>> result = optimizer.run(max_iter=30)
+>>> print(result.x, result.fun)  # Best x, y
+
+
+
+def tell(self,
y: float | list[float],
x: float | tuple[float] | list[tuple[float]] | None = None)
+
+
+
+ +Expand source code +Browse git + +
def tell(self, y: float | list[float],
+         x: Optional[float | tuple[float] | list[tuple[float]]] = None):
+    """
+    Provide incremental feedback to the optimizer by reporting back the objective
+    function values (`y`) at suggested or new candidate points (`x`).
+
+    This allows the optimizer to refine its underlying model(s) and better
+    guide subsequent proposals.
+
+    Parameters
+    ----------
+    y : float or list[float]
+        The observed value(s) of the objective function.
+
+    x : float or list[float], optional
+        The input point(s) corresponding to the observed objective function values `y`.
+        If omitted, the optimizer assumes that the `y` values correspond
+        to the most recent candidates proposed by the `ask` method (FIFO).
+
+        .. warning::
+            The function first takes `y`, then `x`, not the other way around!
+
+    Examples
+    --------
+    >>> candidates = optimizer.ask(n_candidates=3)
+    >>> ... # Evaluate candidate solutions IRL and tell it to the optimizer
+    >>> objective_values = [1.7, 3, .8]
+    >>> optimizer.tell(y=objective_values, x=candidates)
+    """
+    y = np.atleast_1d(y)
+    assert y.ndim == 1, 'y= should be at most 1-dimensional'
+    if x is None:
+        if not self._X_ask:
+            raise RuntimeError(
+                f'`{self.tell.__qualname__}(y, x=None)` only allowed as many '
+                f'times as `{self.ask.__qualname__}()` was called beforehand')
+        for x, yval in zip(tuple(self._X_ask), y):
+            self._X_ask.pop(0)
+            self._X.append(x)
+            self._y.append(yval)
+    else:
+        x = np.atleast_2d(x)
+        assert len(x) == len(y), 'y= and x= (if provided) must contain the same number of items'
+        for xi, yi in zip(x, y):
+            try:
+                self._X_ask.pop(self._X_ask.index(tuple(xi)))
+            except (ValueError, IndexError):
+                pass
+            self._X.append(xi)
+            self._y.append(yi)
+    self._fit()
+
+

Provide incremental feedback to the optimizer by reporting back the objective +function values (y) at suggested or new candidate points (x).

+

This allows the optimizer to refine its underlying model(s) and better +guide subsequent proposals.

+

Parameters

+
+
y : float or list[float]
+
The observed value(s) of the objective function.
+
x : float or list[float], optional
+
+

The input point(s) corresponding to the observed objective function values y. +If omitted, the optimizer assumes that the y values correspond +to the most recent candidates proposed by the ask method (FIFO).

+
+

Warning

+

The function first takes y, then x, not the other way around!

+
+
+
+

Examples

+
>>> candidates = optimizer.ask(n_candidates=3)
+>>> ... # Evaluate candidate solutions IRL and tell it to the optimizer
+>>> objective_values = [1.7, 3, .8]
+>>> optimizer.tell(y=objective_values, x=candidates)
+
+
+
+def top_k(self, k: int = 1) +
+
+
+ +Expand source code +Browse git + +
def top_k(self, k: int = 1):
+    """
+    Based on their objective function values,
+    retrieve the top-k best solutions found by the optimization process so far.
+
+    Parameters
+    ----------
+    k : int, default 1
+        The number of top solutions to retrieve.
+        If `k` exceeds the number of evaluated solutions,
+        all available solutions are returned.
+
+    Returns
+    -------
+    X : np.ndarray
+        A list of best points with shape `(k, n_bounds)`.
+    y : np.ndarray
+        Objective values at points of `X`.
+
+    Examples
+    --------
+    Retrieve the best solution:
+    >>> optimizer.run()
+    >>> best_x, best_y = optimizer.top_k(1)
+    """
+    assert isinstance(k, Integral) and k > 0, k
+    best_index = np.argsort(self._y)
+    index = slice(0, k) if k > 1 else (k - 1)
+    return self._X[best_index[index]], self._y[best_index[index]]
+
+

Based on their objective function values, +retrieve the top-k best solutions found by the optimization process so far.

+

Parameters

+
+
k : int, default 1
+
The number of top solutions to retrieve. +If k exceeds the number of evaluated solutions, +all available solutions are returned.
+
+

Returns

+
+
X : np.ndarray
+
A list of best points with shape (k, n_bounds).
+
y : np.ndarray
+
Objective values at points of X.
+
+

Examples

+

Retrieve the best solution:

+
>>> optimizer.run()
+>>> best_x, best_y = optimizer.top_k(1)
+
+
+
+
+
+class SamboSearchCV +(estimator,
param_grid: dict,
*,
max_iter: int = 100,
method: Literal['shgo', 'sceua', 'smbo'] = 'smbo',
rng: int | numpy.random.mtrand.RandomState | numpy.random._generator.Generator | None = None,
**kwargs)
+
+
+
+ +Expand source code +Browse git + +
class SamboSearchCV(BaseSearchCV):
+    """
+    SAMBO hyper-parameter search with cross-validation that can be
+    used to **optimize hyperparameters of machine learning estimator pipelines**
+    like those of scikit-learn.
+    **Similar to `BayesSearchCV`** from _scikit-optimize_ or
+    `GridSearchCV` from _scikit-learn_,
+    but hopefully **much faster for large parameter spaces**.
+
+    Parameters
+    ----------
+    estimator : BaseEstimator
+        The base model or pipeline to optimize parameters for.
+        It needs to implement `fit()` and `predict()` methods.
+
+    param_grid : dict
+        Dictionary with parameters names (str) as keys and lists of parameter
+        choices to try as values. Supports both continuous parameter ranges and
+        discrete/string parameter enumerations.
+
+    max_iter : int, optional, default=100
+        The maximum number of iterations for the optimization.
+
+    method : {'shgo', 'sceua', 'smbo'}, optional, default='smbo'
+        The optimization algorithm to use. See method `sambo.minimize()` for comparison.
+
+    rng : int or np.random.RandomState or np.random.RandomGenerator or None, optional
+        Random seed for reproducibility.
+
+    **kwargs : dict, optional
+        Additional parameters to pass to `BaseSearchCV`
+        (`scoring=`, `n_jobs=`, `refit=` `cv=`, `verbose=`, `pre_dispatch=`,
+        `error_score=`, `return_train_score=`). For explanation, see documentation
+        on [`GridSearchCV`][skl_gridsearchcv].
+
+        [skl_gridsearchcv]: https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
+
+    Attributes
+    ----------
+    opt_result_ : OptimizeResult
+        The result of the optimization process.
+
+    See Also
+    --------
+    1: https://scikit-learn.org/stable/modules/grid_search.html
+    """
+    def __init__(
+            self,
+            estimator,
+            param_grid: dict,
+            *,
+            max_iter: int = 100,
+            method: Literal['shgo', 'sceua', 'smbo'] = 'smbo',
+            rng: Optional[int | np.random.RandomState | np.random.Generator] = None,
+            **kwargs
+    ):
+        super().__init__(estimator=estimator, **kwargs)
+        self.param_grid = param_grid
+        self.max_iter = max_iter
+        self.method = method
+        self.rng = rng
+
+    def _run_search(self, evaluate_candidates):
+        import joblib
+
+        @lru_cache(key=joblib.hash)  # TODO: lru_cache(max_iter) objective function calls always??
+        def _objective(x):
+            res = evaluate_candidates([dict(zip(self.param_grid.keys(), x))])
+            y = -res['mean_test_score'][-1]
+            nonlocal it
+            it += 1
+            if self.verbose:
+                print(f'{self.__class__.__name__}: it={it}; y={y}; x={x}')
+            return y
+
+        bounds = [((sv := sorted(v))[0], sv[-1] + 1) if all(isinstance(i, Integral) for i in v) else
+                  ((sv := sorted(v))[0], sv[-1]) if all(isinstance(i, Real) for i in v) else
+                  list({i: 1 for i in v})
+                  for v in self.param_grid.values()]
+        kwargs = {}
+        if self.max_iter is not None:
+            kwargs = {'max_iter': self.max_iter}
+
+        from ._minimize import minimize
+
+        it = 0
+        self.opt_result_ = minimize(
+            _objective, bounds=bounds, method=self.method,
+            disp=self.verbose, rng=0, **kwargs)
+
+

SAMBO hyper-parameter search with cross-validation that can be +used to optimize hyperparameters of machine learning estimator pipelines +like those of scikit-learn. +Similar to BayesSearchCV from scikit-optimize or +GridSearchCV from scikit-learn, +but hopefully much faster for large parameter spaces.

+

Parameters

+
+
estimator : BaseEstimator
+
The base model or pipeline to optimize parameters for. +It needs to implement fit() and predict() methods.
+
param_grid : dict
+
Dictionary with parameters names (str) as keys and lists of parameter +choices to try as values. Supports both continuous parameter ranges and +discrete/string parameter enumerations.
+
max_iter : int, optional, default=100
+
The maximum number of iterations for the optimization.
+
method : {'shgo', 'sceua', 'smbo'}, optional, default='smbo'
+
The optimization algorithm to use. See method minimize() for comparison.
+
rng : int or np.random.RandomState or np.random.RandomGenerator or None, optional
+
Random seed for reproducibility.
+
**kwargs : dict, optional
+
+

Additional parameters to pass to BaseSearchCV +(scoring=, n_jobs=, refit= cv=, verbose=, pre_dispatch=, +error_score=, return_train_score=). For explanation, see documentation +on GridSearchCV.

+
+
+

Attributes

+
+
opt_result_ : OptimizeResult
+
The result of the optimization process.
+
+

See Also

+
+
1
+
https://scikit-learn.org/stable/modules/grid_search.html
+
+

Ancestors

+
    +
  • sklearn.model_selection._search.BaseSearchCV
  • +
  • sklearn.base.MetaEstimatorMixin
  • +
  • sklearn.base.BaseEstimator
  • +
  • sklearn.utils._repr_html.base.ReprHTMLMixin
  • +
  • sklearn.utils._repr_html.base._HTMLDocumentationLinkMixin
  • +
  • sklearn.utils._metadata_requests._MetadataRequester
  • +
+
+
+
+
+ +
+ + + diff --git a/doc/sambo/plot.html b/doc/sambo/plot.html new file mode 100644 index 0000000..e8fee41 --- /dev/null +++ b/doc/sambo/plot.html @@ -0,0 +1,777 @@ + + + + + + +Codestin Search App + + + + + + + + + + + + + + +
+
+
+

Module sambo.plot

+
+
+

The module contains functions for plotting +convergence, regret, partial dependence, sequence of evaluations …

+

Example

+
>>> import matplotlib.pyplot as plt
+>>> from scipy.optimize import rosen
+>>> from sambo import minimize
+>>> result = minimize(rosen, bounds=[(-2, 2), (-2, 2)],
+...                   constraints=lambda x: sum(x) <= len(x))
+>>> plot_convergence(result)
+>>> plot_regret(result)
+>>> plot_objective(result)
+>>> plot_evaluations(result)
+>>> plt.show()
+
+
+
+
+
+
+
+

Functions

+
+
+def plot_convergence(*results: sambo._util.OptimizeResult | tuple[str, sambo._util.OptimizeResult],
true_minimum: float | None = None,
xscale: Literal['linear', 'log'] = 'linear',
yscale: Literal['linear', 'log'] = 'linear') ‑> matplotlib.figure.Figure
+
+
+
+ +Expand source code +Browse git + +
def plot_convergence(
+        *results: OptimizeResult | tuple[str, OptimizeResult],
+        true_minimum: Optional[float] = None,
+        xscale: Literal['linear', 'log'] = 'linear',
+        yscale: Literal['linear', 'log'] = 'linear',
+) -> Figure:
+    """
+    Plot one or several convergence traces,
+    showing how an error estimate evolved during the optimization process.
+
+    Parameters
+    ----------
+    *results : OptimizeResult or tuple[str, OptimizeResult]
+        The result(s) for which to plot the convergence trace.
+        In tuple format, the string is used as the legend label
+        for that result.
+
+    true_minimum : float, optional
+        The true minimum *value* of the objective function, if known.
+
+    xscale, yscale : {'linear', 'log'}, optional, default='linear'
+        The scales for the axes.
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure
+        The matplotlib figure.
+
+    Example
+    -------
+    .. image:: /convergence.svg
+    """
+    assert results, results
+
+    fig = plt.figure()
+    _watermark(fig)
+    ax = plt.gca()
+    ax.set_title("Convergence")
+    ax.set_xlabel("Number of function evaluations $n$")
+    ax.set_ylabel(r"$\min\ f(x)$ after $n$ evaluations")
+    ax.grid()
+    _set_xscale_yscale(ax, xscale, yscale)
+    fig.set_layout_engine('tight')
+
+    MARKER = cycle(_MARKER_SEQUENCE)
+
+    for i, result in enumerate(results, 1):
+        name = f'#{i}' if len(results) > 1 else None
+        if isinstance(result, tuple):
+            name, result = result
+        result = _check_result(result)
+
+        nfev = _check_nfev(result)
+        mins = np.minimum.accumulate(result.funv)
+
+        ax.plot(range(1, nfev + 1), mins,
+                label=name, marker=next(MARKER), markevery=(.05 + .05 * i, .2),
+                linestyle='--', alpha=.7, markersize=6, lw=2)
+
+    if true_minimum is not None:
+        ax.axhline(true_minimum, color="k", linestyle='--', lw=1, label="True minimum")
+
+    if true_minimum is not None or name is not None:
+        ax.legend(loc="upper right")
+
+    return fig
+
+

Plot one or several convergence traces, +showing how an error estimate evolved during the optimization process.

+

Parameters

+
+
*results : OptimizeResult or tuple[str, OptimizeResult]
+
The result(s) for which to plot the convergence trace. +In tuple format, the string is used as the legend label +for that result.
+
true_minimum : float, optional
+
The true minimum value of the objective function, if known.
+
xscale, yscale : {'linear', 'log'}, optional, default='linear'
+
The scales for the axes.
+
+

Returns

+
+
fig : matplotlib.figure.Figure
+
The matplotlib figure.
+
+

Example

+

+
+
+def plot_evaluations(result: sambo._util.OptimizeResult,
*,
bins: int = 10,
names: list[str] | None = None,
plot_dims: list[int] | None = None,
jitter: float = 0.02,
size: int = 1.7,
cmap: str = 'summer') ‑> matplotlib.figure.Figure
+
+
+
+ +Expand source code +Browse git + +
def plot_evaluations(
+        result: OptimizeResult,
+        *,
+        bins: int = 10,
+        names: Optional[list[str]] = None,
+        plot_dims: Optional[list[int]] = None,
+        jitter: float = .02,
+        size: int = _DEFAULT_SUBPLOT_SIZE,
+        cmap: str = 'summer',
+) -> Figure:
+    """Visualize the order in which points were evaluated during optimization.
+
+    This creates a 2D matrix plot where the diagonal plots are histograms
+    that show distribution of samples for each variable.
+
+    Plots below the diagonal are scatter-plots of the sample points,
+    with the color indicating the order in which the samples were evaluated.
+
+    A red star shows the best found parameters.
+
+    Parameters
+    ----------
+    result : `OptimizeResult`
+        The optimization result.
+
+    bins : int, default=10
+        Number of bins to use for histograms on the diagonal. This value is
+        used for real dimensions, whereas categorical and integer dimensions
+        use number of bins equal to their distinct values.
+
+    names : list of str, default=None
+        Labels of the dimension variables. Defaults to `['x0', 'x1', ...]`.
+
+    plot_dims : list of int, default=None
+        List of dimension indices to be included in the plot.
+        Default uses all non-constant dimensions of
+        the search-space.
+
+    jitter : float, default=.02
+        Ratio of jitter to add to scatter plots.
+        Default looks clear for categories of up to about 8 items.
+
+    size : float, default=2
+        Height (in inches) of each subplot/facet.
+
+    cmap: str or Colormap, default='summer'
+        Color map for the sequence of scatter points.
+
+    .. todo::
+        Figure out how to lay out multiple Figure objects side-by-side.
+        Alternatively, figure out how to take parameter `ax=` to plot onto.
+        Then we can show a plot of evaluations for each of the built-in methods
+        (`TestDocs.test_make_doc_plots()`).
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure
+        A 2D matrix of subplots.
+
+    Example
+    -------
+    .. image:: /evaluations.svg
+    """
+    result = _check_result(result)
+    space = _check_space(result)
+    plot_dims = _check_plot_dims(plot_dims, space._bounds)
+    n_dims = len(plot_dims)
+    bounds = dict(zip(plot_dims, space._bounds[plot_dims]))
+
+    assert names is None or isinstance(names, Iterable) and len(names) == n_dims, \
+        (names, n_dims, plot_dims)
+
+    x_min = space.transform(np.atleast_2d(result.x))[0]
+    samples = space.transform(result.xv)
+    color = np.arange(len(samples))
+
+    fig, axs = _subplots_grid(n_dims, size, "Sequence & distribution of function evaluations")
+
+    for _i, i in enumerate(plot_dims):
+        for _j, j in enumerate(plot_dims[:_i + 1]):
+            ax = axs[_i, _j]
+            # diagonal histogram
+            if i == j:
+                # if dim.prior == 'log-uniform':
+                #     bins_ = np.logspace(*np.log10(bounds[i]), bins)
+                ax.hist(
+                    samples[:, i],
+                    bins=(int(bounds[i][1] + 1) if space._is_cat(i) else
+                          min(bins, int(bounds[i][1] - bounds[i][0] + 1)) if space._is_int(i) else
+                          bins),
+                    range=None if space._is_cat(i) else bounds[i]
+                )
+            # lower triangle scatter plot
+            elif i > j:
+                x, y = samples[:, j], samples[:, i]
+                if jitter:
+                    x, y = _maybe_jitter(jitter, (j, x), (i, y), space=space)
+                ax.scatter(x, y, c=color, s=40, cmap=cmap, lw=.5, edgecolor='k')
+                ax.scatter(x_min[j], x_min[i], c='#d009', s=400, marker='*', lw=.5, edgecolor='k')
+
+    _format_scatter_plot_axes(fig, axs, space, plot_dims=plot_dims, dim_labels=names, size=size)
+    return fig
+
+

Visualize the order in which points were evaluated during optimization.

+

This creates a 2D matrix plot where the diagonal plots are histograms +that show distribution of samples for each variable.

+

Plots below the diagonal are scatter-plots of the sample points, +with the color indicating the order in which the samples were evaluated.

+

A red star shows the best found parameters.

+

Parameters

+
+
result : OptimizeResult
+
The optimization result.
+
bins : int, default=10
+
Number of bins to use for histograms on the diagonal. This value is +used for real dimensions, whereas categorical and integer dimensions +use number of bins equal to their distinct values.
+
names : list of str, default=None
+
Labels of the dimension variables. Defaults to ['x0', 'x1', ...].
+
plot_dims : list of int, default=None
+
List of dimension indices to be included in the plot. +Default uses all non-constant dimensions of +the search-space.
+
jitter : float, default=.02
+
Ratio of jitter to add to scatter plots. +Default looks clear for categories of up to about 8 items.
+
size : float, default=2
+
Height (in inches) of each subplot/facet.
+
cmap : str or Colormap, default='summer'
+
Color map for the sequence of scatter points.
+
+
+

TODO

+

Figure out how to lay out multiple Figure objects side-by-side. +Alternatively, figure out how to take parameter ax= to plot onto. +Then we can show a plot of evaluations for each of the built-in methods +(TestDocs.test_make_doc_plots()).

+
+

Returns

+
+
fig : matplotlib.figure.Figure
+
A 2D matrix of subplots.
+
+

Example

+

+
+
+def plot_objective(result: sambo._util.OptimizeResult,
*,
levels: int = 10,
resolution: int = 16,
n_samples: int = 250,
estimator: str | sambo._util._SklearnLikeRegressor | None = None,
size: float = 1.7,
zscale: Literal['linear', 'log'] = 'linear',
names: list[str] | None = None,
true_minimum: list[float] | list[list[float]] | None = None,
plot_dims: list[int] | None = None,
plot_max_points: int = 200,
jitter: float = 0.02,
cmap: str = 'viridis_r') ‑> matplotlib.figure.Figure
+
+
+
+ +Expand source code +Browse git + +
def plot_objective(
+        result: OptimizeResult,
+        *,
+        levels: int = 10,
+        resolution: int = 16,
+        n_samples: int = 250,
+        estimator: Optional[str | _SklearnLikeRegressor] = None,
+        size: float = _DEFAULT_SUBPLOT_SIZE,
+        zscale: Literal['linear', 'log'] = 'linear',
+        names: Optional[list[str]] = None,
+        true_minimum: Optional[list[float] | list[list[float]]] = None,
+        plot_dims: Optional[list[int]] = None,
+        plot_max_points: int = 200,
+        jitter: float = .02,
+        cmap: str = 'viridis_r',
+) -> Figure:
+    """Plot a 2D matrix of partial dependence plots that show the
+    individual influence of each variable on the objective function.
+
+    The diagonal plots show the effect of a single dimension on the
+    objective function, while the plots below the diagonal show
+    the effect on the objective function when varying two dimensions.
+
+    Partial dependence plot shows how the values of any two variables
+    influence `estimator` predictions after "averaging out"
+    the influence of all other variables.
+
+    Partial dependence is calculated by averaging the objective value
+    for a number of random samples in the search-space,
+    while keeping one or two dimensions fixed at regular intervals. This
+    averages out the effect of varying the other dimensions and shows
+    the influence of just one or two dimensions on the objective function.
+
+    Black dots indicate the points evaluated during optimization.
+
+    A red star indicates the best found minimum (or `true_minimum`,
+    if provided).
+
+    .. note::
+          Partial dependence plot is only an estimation of the surrogate
+          model which in turn is only an estimation of the true objective
+          function that has been optimized. This means the plots show
+          an "estimate of an estimate" and may therefore be quite imprecise,
+          especially if relatively few samples have been collected during the
+          optimization, and especially in regions of the search-space
+          that have been sparsely sampled (e.g. regions far away from the
+          found optimum).
+
+    Parameters
+    ----------
+    result : OptimizeResult
+        The optimization result.
+
+    levels : int, default=10
+        Number of levels to draw on the contour plot, passed directly
+        to `plt.contourf()`.
+
+    resolution : int, default=16
+        Number of points at which to evaluate the partial dependence
+        along each dimension.
+
+    n_samples : int, default=250
+        Number of samples to use for averaging the model function
+        at each of the `n_points`.
+
+    estimator
+        Last fitted model for estimating the objective function.
+
+    size : float, default=2
+        Height (in inches) of each subplot/facet.
+
+    zscale : {'linear', 'log'}, default='linear'
+        Scale to use for the z axis of the contour plots.
+
+    names : list of str, default=None
+        Labels of the dimension variables. Defaults to `['x0', 'x1', ...]`.
+
+    plot_dims : list of int, default=None
+        List of dimension indices to be included in the plot.
+        Default uses all non-constant dimensions of
+        the search-space.
+
+    true_minimum : list of floats, default=None
+        Value(s) of the red point(s) in the plots.
+        Default uses best found X parameters from the result.
+
+    plot_max_points: int, default=200
+        Plot at most this many randomly-chosen evaluated points
+        overlaying the contour plots.
+
+    jitter : float, default=.02
+        Amount of jitter to add to categorical and integer dimensions.
+        Default looks clear for categories of up to about 8 items.
+
+    cmap: str or Colormap, default='viridis_r'
+        Color map for contour plots, passed directly to
+        `plt.contourf()`.
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure
+        A 2D matrix of partial dependence sub-plots.
+
+    Example
+    -------
+    .. image:: /objective.svg
+    """
+    result = _check_result(result)
+    space = _check_space(result)
+    plot_dims = _check_plot_dims(plot_dims, space._bounds)
+    n_dims = len(plot_dims)
+    bounds = dict(zip(plot_dims, space._bounds[plot_dims]))
+
+    assert names is None or isinstance(names, Iterable) and len(names) == n_dims, (n_dims, plot_dims, names)
+
+    if true_minimum is None:
+        true_minimum = result.x
+    true_minimum = np.atleast_2d(true_minimum)
+    assert true_minimum.shape[1] == len(result.x), (true_minimum, result)
+
+    true_minimum = space.transform(true_minimum)
+
+    assert isinstance(plot_max_points, Integral) and plot_max_points >= 0, plot_max_points
+    rng = np.random.default_rng(0)
+    # Sample points to plot, but don't include points exactly at res.x
+    inds = np.setdiff1d(
+        np.arange(len(result.xv)),
+        np.where(np.all(result.xv == result.x, axis=1))[0],
+        assume_unique=True)
+    plot_max_points = min(len(inds), plot_max_points)
+    inds = np.sort(rng.choice(inds, plot_max_points, replace=False))
+
+    x_samples = space.transform(result.xv[inds])
+    samples = space.sample(n_samples)
+
+    assert zscale in ('log', 'linear', None), zscale
+    locator = LogLocator() if zscale == 'log' else None
+
+    fig, axs = _subplots_grid(n_dims, size, "Partial dependence")
+
+    result_estimator = getattr(result, 'model', [None])[-1]
+    from sambo._estimators import _estimator_factory
+
+    if estimator is None and result_estimator is not None:
+        estimator = result_estimator
+    else:
+        _estimator_arg = estimator
+        estimator = _estimator_factory(estimator, bounds, rng=0)
+        if result_estimator is None and _estimator_arg is None:
+            warnings.warn(
+                'The optimization result process does not appear to have been '
+                'driven by a model. You can still still observe partial dependence '
+                f'of the variables as modeled by estimator={estimator!r}',
+                UserWarning, stacklevel=2)
+        estimator.fit(space.transform(result.xv), result.funv)
+    assert isinstance(estimator, _SklearnLikeRegressor), estimator
+
+    for _i, i in enumerate(plot_dims):
+        for _j, j in enumerate(plot_dims[:_i + 1]):
+            ax = axs[_i, _j]
+            # diagonal line plot
+            if i == j:
+                xi, yi = _partial_dependence(
+                    space, bounds, estimator, i, j=None, sample_points=samples, resolution=resolution)
+                ax.plot(xi, yi)
+                for m in true_minimum:
+                    ax.axvline(m[i], linestyle="--", color="r", lw=1)
+            # lower triangle contour field
+            elif i > j:
+                xi, yi, zi = _partial_dependence(
+                    space, bounds, estimator, i, j, sample_points=samples, resolution=resolution)
+                ax.contourf(xi, yi, zi, levels, locator=locator, cmap=cmap,
+                            alpha=(1 - .2 * int(bool(plot_max_points))))
+                for m in true_minimum:
+                    ax.scatter(m[j], m[i], c='#d00', s=200, lw=.5, marker='*')
+                if plot_max_points:
+                    x, y = x_samples[:, j], x_samples[:, i]
+                    if jitter:
+                        x, y = _maybe_jitter(jitter, (j, x), (i, y), space=space)
+                    ax.scatter(x, y, c='k', s=12, lw=0, alpha=.5)
+
+    _format_scatter_plot_axes(fig, axs, space, plot_dims=plot_dims, dim_labels=names, size=size)
+    return fig
+
+

Plot a 2D matrix of partial dependence plots that show the +individual influence of each variable on the objective function.

+

The diagonal plots show the effect of a single dimension on the +objective function, while the plots below the diagonal show +the effect on the objective function when varying two dimensions.

+

Partial dependence plot shows how the values of any two variables +influence estimator predictions after "averaging out" +the influence of all other variables.

+

Partial dependence is calculated by averaging the objective value +for a number of random samples in the search-space, +while keeping one or two dimensions fixed at regular intervals. This +averages out the effect of varying the other dimensions and shows +the influence of just one or two dimensions on the objective function.

+

Black dots indicate the points evaluated during optimization.

+

A red star indicates the best found minimum (or true_minimum, +if provided).

+
+

Note

+

Partial dependence plot is only an estimation of the surrogate +model which in turn is only an estimation of the true objective +function that has been optimized. This means the plots show +an "estimate of an estimate" and may therefore be quite imprecise, +especially if relatively few samples have been collected during the +optimization, and especially in regions of the search-space +that have been sparsely sampled (e.g. regions far away from the +found optimum).

+
+

Parameters

+
+
result : OptimizeResult
+
The optimization result.
+
levels : int, default=10
+
Number of levels to draw on the contour plot, passed directly +to plt.contourf().
+
resolution : int, default=16
+
Number of points at which to evaluate the partial dependence +along each dimension.
+
n_samples : int, default=250
+
Number of samples to use for averaging the model function +at each of the n_points.
+
estimator
+
Last fitted model for estimating the objective function.
+
size : float, default=2
+
Height (in inches) of each subplot/facet.
+
zscale : {'linear', 'log'}, default='linear'
+
Scale to use for the z axis of the contour plots.
+
names : list of str, default=None
+
Labels of the dimension variables. Defaults to ['x0', 'x1', ...].
+
plot_dims : list of int, default=None
+
List of dimension indices to be included in the plot. +Default uses all non-constant dimensions of +the search-space.
+
true_minimum : list of floats, default=None
+
Value(s) of the red point(s) in the plots. +Default uses best found X parameters from the result.
+
plot_max_points : int, default=200
+
Plot at most this many randomly-chosen evaluated points +overlaying the contour plots.
+
jitter : float, default=.02
+
Amount of jitter to add to categorical and integer dimensions. +Default looks clear for categories of up to about 8 items.
+
cmap : str or Colormap, default='viridis_r'
+
Color map for contour plots, passed directly to +plt.contourf().
+
+

Returns

+
+
fig : matplotlib.figure.Figure
+
A 2D matrix of partial dependence sub-plots.
+
+

Example

+

+
+
+def plot_regret(*results: sambo._util.OptimizeResult | tuple[str, sambo._util.OptimizeResult],
true_minimum: float | None = None,
xscale: Literal['linear', 'log'] = 'linear',
yscale: Literal['linear', 'log'] = 'linear') ‑> matplotlib.figure.Figure
+
+
+
+ +Expand source code +Browse git + +
def plot_regret(
+        *results: OptimizeResult | tuple[str, OptimizeResult],
+        true_minimum: Optional[float] = None,
+        xscale: Literal['linear', 'log'] = 'linear',
+        yscale: Literal['linear', 'log'] = 'linear',
+) -> Figure:
+    """
+    Plot one or several cumulative [regret] traces.
+    Regret is the difference between achieved objective and its optimum.
+
+    [regret]: https://en.wikipedia.org/wiki/Regret_(decision_theory)
+
+    Parameters
+    ----------
+    *results : OptimizeResult or tuple[str, OptimizeResult]
+        The result(s) for which to plot the convergence trace.
+        In tuple format, the string is used as the legend label
+        for that result.
+
+    true_minimum : float, optional
+        The true minimum *value* of the objective function, if known.
+        If unspecified, minimum is assumed to be the minimum of the
+        values found in `results`.
+
+    xscale, yscale : {'linear', 'log'}, optional, default='linear'
+        The scales for the axes.
+
+    Returns
+    -------
+    fig : matplotlib.figure.Figure
+        The matplotlib figure.
+
+    Example
+    -------
+    .. image:: /regret.svg
+    """
+    assert results, results
+
+    fig = plt.figure()
+    _watermark(fig)
+    ax = fig.gca()
+    ax.set_title("Cumulative regret")
+    ax.set_xlabel("Number of function evaluations $n$")
+    ax.set_ylabel(r"Cumulative regret after $n$ evaluations: "
+                  r"$\ \sum_t^n{\,\left[\,f\,\left(x_t\right) - f_{\mathrm{opt}}\,\right]}$")
+    ax.grid()
+    _set_xscale_yscale(ax, xscale, yscale)
+    ax.yaxis.set_major_formatter(FormatStrFormatter('$%.3g$'))
+    fig.set_layout_engine('tight')
+
+    MARKER = cycle(_MARKER_SEQUENCE)
+
+    if true_minimum is None:
+        true_minimum = np.min([
+            np.min((r[1] if isinstance(r, tuple) else r).funv)  # TODO ensure funv???
+            for r in results
+        ])
+
+    for i, result in enumerate(results, 1):
+        name = f'#{i}' if len(results) > 1 else None
+        if isinstance(result, tuple):
+            name, result = result
+        result = _check_result(result)
+
+        nfev = _check_nfev(result)
+        regrets = [np.sum(result.funv[:i] - true_minimum)
+                   for i in range(1, nfev + 1)]
+
+        ax.plot(range(1, nfev + 1), regrets,
+                label=name, marker=next(MARKER), markevery=(.05 + .05 * i, .2),
+                linestyle='--', alpha=.7, markersize=6, lw=2)
+
+    if name is not None:
+        ax.legend(loc="lower right")
+
+    return fig
+
+

Plot one or several cumulative regret traces. +Regret is the difference between achieved objective and its optimum.

+

Parameters

+
+
*results : OptimizeResult or tuple[str, OptimizeResult]
+
The result(s) for which to plot the convergence trace. +In tuple format, the string is used as the legend label +for that result.
+
true_minimum : float, optional
+
The true minimum value of the objective function, if known. +If unspecified, minimum is assumed to be the minimum of the +values found in results.
+
xscale, yscale : {'linear', 'log'}, optional, default='linear'
+
The scales for the axes.
+
+

Returns

+
+
fig : matplotlib.figure.Figure
+
The matplotlib figure.
+
+

Example

+

+
+
+
+
+
+
+ +
+ + + diff --git a/evaluations.svg b/evaluations.svg new file mode 100644 index 0000000..3d79b38 --- /dev/null +++ b/evaluations.svg @@ -0,0 +1,967 @@ + + + + + + + + 2025-07-10T03:04:32.102487 + image/svg+xml + + + Matplotlib v3.10.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + −1.6 + + + + + + + + + + + + + + + −0.8 + + + + + + + + + + + + + + + 0.0 + + + + + + + + + + + + + + + 0.8 + + + + + + + + + + + + + + + 1.6 + + + + + + + x + 0 + + + + + + + + + + + + + + + + 4 + + + + + + + + + + 8 + + + + + + + + + + 12 + + + + + + + + + + 16 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + −1.6 + + + + + + + + + + + + + + + −0.8 + + + + + + + + + + + + + + + 0.0 + + + + + + + + + + + + + + + 0.8 + + + + + + + + + + + + + + + 1.6 + + + + + + + x + 0 + + + + + + + + + + + + + + + + −1.6 + + + + + + + + + + −0.8 + + + + + + + + + + 0.0 + + + + + + + + + + 0.8 + + + + + + + + + + 1.6 + + + + + + + x + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + −1.6 + + + −1.6 + + + + + + + + + + + + + + + −0.8 + + + −0.8 + + + + + + + + + + + + + + + 0.0 + + + 0.0 + + + + + + + + + + + + + + + 0.8 + + + 0.8 + + + + + + + + + + + + + + + 1.6 + + + 1.6 + + + + + + + x + 1 + + + + + + + + + + + + + 0 + + + + + + + + + + 4 + + + + + + + + + + 8 + + + + + + + + + + 12 + + + + + + + + + + 16 + + + + + + + + + + + + + + + + + + Created with SAMBO, https://sambo-optimization.github.io + + + Sequence & distribution of function evaluations + + + + + + + + + + + + + + diff --git a/icon.png b/icon.png new file mode 100644 index 0000000..0e40bb7 Binary files /dev/null and b/icon.png differ diff --git a/index.html b/index.html new file mode 100644 index 0000000..e139055 --- /dev/null +++ b/index.html @@ -0,0 +1,678 @@ + + + + + + + + + Codestin Search App + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
+
+

SAMBO

+

Sequential and model-based optimization [for Python]

+
+
+ +
+
+

+ Say you're a senior baker in a large pharmaceutical, tasked to ship a new medicine + drug for suppressing symptoms of any of the growingly lucrative health conditions of the developed world. + Among simple paper-pushing and the more menial tasks, your drug development pipeline involves figuring + out the correct, for business reasons quite optimal, combination to hundreds of parameters, such as: + ratios of active ingredients, + bulking agents, + centrifugation times, + pH levels, + temperature profiles at each processing step, + choice of solvents and purification steps, + ideal dosage forms, + whether to include any of the other common processes of your establishment, + and so on. +

+ +

+ The problem is: because some processes can't be skipped or sped up, + every combination you decide to try takes two of your assistant researchers + nearly two weeks of laboratory work. + But your new flagship drug is expected due next Thursday ... +

+ +
+
Codestin Search App
+
+

It is easy to get a thousand prescriptions, but hard to get one single remedy.

+ — Chinese proverb +
+
+ +

+ Thank god for SAMBO, Rambo of global optimization. + It gets in and then it finds minimums of the + objective criteria function quickly and efficiently, + in least number of evaluations. + + SAMBO stands for Sequential and Model-Based Optimization. + This simple optimization package consists of the following items, each with its own neat, + user-friendly, Pythonic interface: +

+ +

See below examples for usage.

+
    +
  1. 1 scikit-optimize/scikit-optimize. DOI: 10.5281/zenodo.1157319
  2. +
  3. 2 SHGO: Simplicial homology global optimization. DOI: 10.1007/s10898-018-0645-y
  4. +
  5. 3 SMBO: Sequential Model-Based Optimization for General Algorithm Configuration. DOI: 10.1007/978-3-642-25566-3 40
  6. +
  7. 4 SCE-UA: Effective and efficient global optimization for conceptual rainfall-runoff models. DOI: 10.1029/91WR02985
  8. +
+
+ +
+ +
+ +
+

Download

+ +
+ +
+

Usage Examples

+ +
+
+

Use case №1: Find global minimium of an objective/cost function

+

+ We quickly find the global optimum of an example 2D + Rosenbrock's banana function, + constrained to a circle with r=2, all in comparatively just a few evaluations. +

+

While this is a simple 2D example, partial-dependence plots and sequence-of-evaluations plots + generalize well to several dimensions.

+
+
import sambo
+from sambo.plot import *
+# Extras
+import matplotlib.pyplot as plt
+from scipy.optimize import rosen
+
+result = sambo.minimize(
+    rosen, bounds=[(-2., 2.)] * 2,  # Mind the dots
+    constraints=lambda x: sum(x**2) <= 2**len(x),
+    max_iter=50, method='shgo')
+
+plot_convergence(result)
+plot_objective(result)    # Partial dependence
+plot_evaluations(result)  # Sequence of evaluations
+plot_regret(result)
+
+plt.show()
+
+
<class 'sambo.OptimizeResult'>
+
+ message: Optimization terminated successfully.
+ success: True
+     fun: 5.205243704996618e-08
+       x: [ 9.998e-01  9.996e-01]
+    nfev: 68
+      xv: [[ 0.000e+00  0.000e+00]
+           [ 0.000e+00  0.000e+00]
+           ...
+           [ 9.998e-01  9.996e-01]
+           [ 9.998e-01  9.996e-01]]
+    funv: [ 1.000e+00  ...  5.210e-08]
+
+ Convergence of different algorithms: SHGO, SCE-UA, SMBO +
+
+ Partial dependence / objective landscape +
+
+ Sequence of evaluations +
+
+ Cumulative regret +
+
+ +
+
+

Use case №2: Sequential surrogate model-based "Ask-and-Tell" optimization

+

+ When your optimization objective is an external process, + you may not be able to express it as a simple Python function. + Instead, you may ask the optimizer process for the next suggested + candidate point x (solution candidate), + execute the trial (e.g. the two-week "baking" process), + then report back your findings (objective result y) + to the optimizer for further consideration and refitting. + We call this an "ask-and-tell" API. +

+

+ The estimator= can be any object with a scikit-learn fit-predict API, + including neural networks and modern AI. +

+
+
from sambo import Optimizer
+
+def evaluate(x):
+   ...  # Abstract long and laborious process
+   return rosen(x)
+
+bounds = [(-2., 2.)] * 4  # 4D
+optimizer = Optimizer(
+    fun=None,  # Implies Ask-And-Tell interface
+    bounds=bounds,
+    estimator='gp',  # or bring your own
+)
+
+n_iter = 50
+for i in range(n_iter):
+    suggested_x = optimizer.ask(1)
+    y = [evaluate(x) for x in suggested_x]
+    optimizer.tell(y)
+
+best_x, best_fvals = optimizer.top_k()
+# Continue the optimization ...
+result: OptimizeResult = optimizer.run()
+
+
+ Convergence plot of SMBO estimators +
+
+ +
+
+

Use case №3: Hyperparameter tuning for machine-learning in quasi-logarithmic time

+

+ Use sambo.SamboSearchCV as a drop-in replacement for + GridSearchCV (or even HalvingRandomSearchCV) from scikit-learn + to optimize your machine learning pipeline hyperparameters in sub-linear time, + yet with an algorithm considerably better-informed than simple random search! +

+
+
# Example setup of a scikit-learn pipeline
+from sklearn.datasets import load_breast_cancer
+from sklearn.tree import DecisionTreeClassifier
+from sklearn.model_selection import GridSearchCV
+
+X, y = load_breast_cancer(return_X_y=True)
+clf = DecisionTreeClassifier()
+param_grid = {
+    'max_depth': list(range(1, 30)),
+    'min_samples_split': [2, 5, 10, 20, 50, 100],
+    'min_samples_leaf': list(range(1, 20)),
+    'criterion': ['gini', 'entropy'],
+    'max_features': [None, 'sqrt', 'log2'],
+}
+search = GridSearchCV(clf, param_grid)
+# Trying all ~20k combinations takes a long time ...
+search.fit(X, y)
+print(search.best_params_)
+print(search.best_score_)
+
+# Alternatively ...
+from sambo import SamboSearchCV
+search = SamboSearchCV(clf, param_grid, max_iter=100)
+search.fit(X, y)  # Fast, good enough
+print(search.best_params_)
+print(search.best_score_)
+print(search.opt_result_)
+
+
{'criterion': 'gini',
+ 'max_depth': 6,
+ 'max_features': 'sqrt',
+ 'min_samples_leaf': 1,
+ 'min_samples_split': 5}
+0.947269582406721
+
+{'criterion': 'entropy',
+ 'max_depth': 20,
+ 'max_features': None,
+ 'min_samples_leaf': 5,
+ 'min_samples_split': 6}
+0.9419940696812453
+
+ message: Reached n_iter_no_change (=10) w/o improvement
+ success: True
+     fun: -0.9419940696812453
+       x: [20 6 5 'entropy' None]
+     nit: 26
+    nfev: 77
+      xv: [[15 86 ... 'gini' None]
+           [1 57 ... 'entropy' 'sqrt']
+           ...
+           [19 5 ... 'gini' None]
+           [20 8 ... 'gini' None]]
+    funv: [-9.244e-01 -9.034e-01 ... -9.139e-01 -9.191e-01]
+   model: [GaussianProcessRegressor()]
+
+
+ +
+

Benchmark

+
+
+

+ It's 2020, + + and if you're still doing + particle swarm, basin-hopping, Monte Carlo or genetic/evolutionary algorithms optimization, + you're likely throwing away precious computing cycles, at large! + According to published benchmark + of most common optimization algorithm implementations + on several popular global optimization functions, including some multi-dimensional ones (2–10D), + SAMBO out-of-the-box most often converges to correct global optimum, + in fewest total objective evaluations, + yielding smallest absolute error, + with runtime just as fast as that of the best + (full stdout output), + proving the implementation sound and justified. +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodCorrect %EvaluationsError %Duration
sambo.minimize(smbo)100%239025.37
sambo.minimize(sceua)100%55100.08
direct †100%138800.02
dual_annealing †100%646200.27
sambo.minimize(shgo)92%21910.03
differential_evolution92%1395300.16
scikit-optimize75%290260.60
Nelder-Mead †75%301140.01
Optuna75%36022.51
nevergrad75%104074.05
COBYQA67%13470.15
COBYLA67%215150.00
shgo67%241130.01
SLSQP67%266120.01
Powell †67%323160.00
hyperopt67%99829.39
trust-constr67%104470.16
TNC †58%233160.01
basinhopping58%3424210.11
CG †50%414190.01
† Non-constrained method; constrained by patching the objective s.t.:

+     + + f + (x) + = + + { + + + f(x) + x is admissible + + + + otherwise + + + + + + +
∗ The following implementations were considered: +
  • way too slow: Open-Box, AMPGO,
  • +
  • too complex: SMT, HyperBO, DEAP, PyMOO, Pyomo, OSQP.
+     To consider: jdb78/LIPO, Stefan-Endres/TGO. Speculations welcome.
+
+ +
+
+Contour landscapes of benchmarked functions + +
+

Citation

+

If you find this package useful in your academic research, please consider citing:

+
@software{SAMBO,
+    author = {Kernc},
+    title = {SAMBO: Sequential and Model-Based Optimization: Efficient global optimization in Python},
+    url = {https://sambo-optimization.github.io},
+    doi = {https://doi.org/10.5281/zenodo.14461363},
+    year = {2024}
+}
+
+ +
+

What Users are Saying

+
+
+

The proof of [this] program's value is its existence.

+

A. Perlis

+
+
+

We are all tasked to balance and optimize ourselves.

+

M. Jemison

+
+
+

[...] When all else fails, read the instructions.

+

Cahn

+
+
+

You're bound to be unhappy if you optimize everything.

+

D. Knuth

+
+
+

+ After scikit-optimize went MIA, the release of this Bayesian optimization software package is just about optimally timed.

+

B. Kralz

+
+
+
+ +
+ + + + + + + + diff --git a/logo.svg b/logo.svg new file mode 100644 index 0000000..504fbb6 --- /dev/null +++ b/logo.svg @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/objective.svg b/objective.svg new file mode 100644 index 0000000..440767d --- /dev/null +++ b/objective.svg @@ -0,0 +1,1138 @@ + + + + + + + + 2025-07-10T03:04:31.939806 + image/svg+xml + + + Matplotlib v3.10.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + −1.6 + + + + + + + + + + + + + + + −0.8 + + + + + + + + + + + + + + + 0.0 + + + + + + + + + + + + + + + 0.8 + + + + + + + + + + + + + + + 1.6 + + + + + + + x + 0 + + + + + + + + + + + + + + + + 0 + + + + + + + + + + 500 + + + + + + + + + + 1000 + + + + + + + + + + 1500 + + + + + + + + + + 2000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + −1.6 + + + + + + + + + + + + + + + −0.8 + + + + + + + + + + + + + + + 0.0 + + + + + + + + + + + + + + + 0.8 + + + + + + + + + + + + + + + 1.6 + + + + + + + x + 0 + + + + + + + + + + + + + + + + −1.6 + + + + + + + + + + −0.8 + + + + + + + + + + 0.0 + + + + + + + + + + 0.8 + + + + + + + + + + 1.6 + + + + + + + x + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + −1.6 + + + −1.6 + + + + + + + + + + + + + + + −0.8 + + + −0.8 + + + + + + + + + + + + + + + 0.0 + + + 0.0 + + + + + + + + + + + + + + + 0.8 + + + 0.8 + + + + + + + + + + + + + + + 1.6 + + + 1.6 + + + + + + + x + 1 + + + + + + + + + + + + + 0 + + + + + + + + + + 500 + + + + + + + + + + 1000 + + + + + + + + + + 1500 + + + + + + + + + + 2000 + + + + + + + + + + + + + + + + + + + + + + + + Created with SAMBO, https://sambo-optimization.github.io + + + Partial dependence + + + + + + + + + + + + + + diff --git a/regret.svg b/regret.svg new file mode 100644 index 0000000..f89d40b --- /dev/null +++ b/regret.svg @@ -0,0 +1,713 @@ + + + + + + + + 2025-07-10T03:04:31.488428 + image/svg+xml + + + Matplotlib v3.10.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + 6 + + + + + + + + + + + + + 12 + + + + + + + + + + + + + 18 + + + + + + + + + + + + + 24 + + + + + + + + + + + + + 30 + + + + + + + + + + + + + 36 + + + + + + + + + + + + + 42 + + + + + + + + + + + + + 48 + + + + + + + N + u + m + b + e + r +   + o + f +   + f + u + n + c + t + i + o + n +   + e + v + a + l + u + a + t + i + o + n + s +   + n + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + + + + + + + + + + + 2 + + + 0 + 3 + e + + + + + + + + + + + + + + + + + + 4 + + + 0 + 3 + e + + + + + + + + + + + + + + + + + + 6 + + + 0 + 3 + e + + + + + + + + + + + + + + + + + + 8 + + + 0 + 3 + e + + + + + + + + + + + + + + + + + + 1 + + + 0 + 4 + e + + + + + + + + + C + u + m + u + l + a + t + i + v + e +   + r + e + g + r + e + t +   + a + f + t + e + r +   +   + e + v + a + l + u + a + t + i + o + n + s + : +   + ( + ) + + n + f + x + f + n + t + t + + [ + ] + o + p + t + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Cumulative regret + + + + + + + + + + + + + method='shgo' + + + + + + + + + method='sceua' + + + + + + + + + method='smbo' + + + + + Created with SAMBO, https://sambo-optimization.github.io + + + + + + + + diff --git a/robots.txt b/robots.txt new file mode 100644 index 0000000..898c9d8 --- /dev/null +++ b/robots.txt @@ -0,0 +1 @@ +Sitemap: https://sambo-optimization.github.io/sitemap.txt diff --git a/sitemap.txt b/sitemap.txt new file mode 100644 index 0000000..b8c660a --- /dev/null +++ b/sitemap.txt @@ -0,0 +1,4 @@ +https://sambo-optimization.github.io/ +https://sambo-optimization.github.io/doc/doc-search.html +https://sambo-optimization.github.io/doc/sambo/ +https://sambo-optimization.github.io/doc/sambo/plot.html