From 09b11f004829ab6703441d8f1be2fdb5177f2104 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 14 Mar 2025 10:02:37 +0000 Subject: [PATCH 1/7] add initial wrapper for migrad --- .tools/envs/testenv-linux.yml | 1 + .tools/envs/testenv-numpy.yml | 1 + .tools/envs/testenv-others.yml | 1 + .tools/envs/testenv-pandas.yml | 1 + environment.yml | 1 + pyproject.toml | 1 + src/optimagic/algorithms.py | 17 +++++ src/optimagic/optimizers/iminuit_migrad.py | 86 ++++++++++++++++++++++ 8 files changed, 109 insertions(+) create mode 100644 src/optimagic/optimizers/iminuit_migrad.py diff --git a/.tools/envs/testenv-linux.yml b/.tools/envs/testenv-linux.yml index cc554a08b..b30bb579d 100644 --- a/.tools/envs/testenv-linux.yml +++ b/.tools/envs/testenv-linux.yml @@ -27,6 +27,7 @@ dependencies: - pyyaml # dev, tests - jinja2 # dev, tests - annotated-types # dev, tests + - iminuit # dev, tests - pip: # dev, tests, docs - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests diff --git a/.tools/envs/testenv-numpy.yml b/.tools/envs/testenv-numpy.yml index cea3822ac..3ba24549a 100644 --- a/.tools/envs/testenv-numpy.yml +++ b/.tools/envs/testenv-numpy.yml @@ -25,6 +25,7 @@ dependencies: - pyyaml # dev, tests - jinja2 # dev, tests - annotated-types # dev, tests + - iminuit # dev, tests - pip: # dev, tests, docs - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests diff --git a/.tools/envs/testenv-others.yml b/.tools/envs/testenv-others.yml index 6b3a76db0..d0db9516c 100644 --- a/.tools/envs/testenv-others.yml +++ b/.tools/envs/testenv-others.yml @@ -25,6 +25,7 @@ dependencies: - pyyaml # dev, tests - jinja2 # dev, tests - annotated-types # dev, tests + - iminuit # dev, tests - pip: # dev, tests, docs - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests diff --git a/.tools/envs/testenv-pandas.yml b/.tools/envs/testenv-pandas.yml index 6a5e01fda..93295affc 100644 --- a/.tools/envs/testenv-pandas.yml +++ b/.tools/envs/testenv-pandas.yml @@ -25,6 +25,7 @@ dependencies: - pyyaml # dev, tests - jinja2 # dev, tests - annotated-types # dev, tests + - iminuit # dev, tests - pip: # dev, tests, docs - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests diff --git a/environment.yml b/environment.yml index 70a63f49a..d98db5e44 100644 --- a/environment.yml +++ b/environment.yml @@ -36,6 +36,7 @@ dependencies: - jinja2 # dev, tests - furo # dev, docs - annotated-types # dev, tests + - iminuit # dev, tests - pip: # dev, tests, docs - DFO-LS>=1.5.3 # dev, tests - Py-BOBYQA # dev, tests diff --git a/pyproject.toml b/pyproject.toml index 40b93ff8d..f458cfd44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ dependencies = [ "sqlalchemy>=1.3", "annotated-types", "typing-extensions", + "iminuit", ] dynamic = ["version"] keywords = [ diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py index a892f5a51..a4a5ec685 100644 --- a/src/optimagic/algorithms.py +++ b/src/optimagic/algorithms.py @@ -14,6 +14,7 @@ from optimagic.optimization.algorithm import Algorithm from optimagic.optimizers.bhhh import BHHH from optimagic.optimizers.fides import Fides +from optimagic.optimizers.iminuit_migrad import IminuitMigrad from optimagic.optimizers.ipopt import Ipopt from optimagic.optimizers.nag_optimizers import NagDFOLS, NagPyBOBYQA from optimagic.optimizers.neldermead import NelderMeadParallel @@ -485,6 +486,7 @@ def Scalar(self) -> BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms @dataclass(frozen=True) class BoundedGradientFreeLocalScalarAlgorithms(AlgoSelection): + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -1150,6 +1152,7 @@ def Scalar(self) -> GlobalGradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeLocalAlgorithms(AlgoSelection): + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA @@ -1199,6 +1202,7 @@ def Scalar(self) -> GradientFreeLocalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GradientFreeLocalScalarAlgorithms(AlgoSelection): + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA @@ -1290,6 +1294,7 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeScalarAlgorithms(AlgoSelection): + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -1674,6 +1679,7 @@ def Scalar(self) -> BoundedLocalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedLocalScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA @@ -2160,6 +2166,7 @@ def Scalar(self) -> GlobalGradientFreeScalarAlgorithms: @dataclass(frozen=True) class GradientFreeLocalAlgorithms(AlgoSelection): + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel @@ -2200,6 +2207,7 @@ def Scalar(self) -> GradientFreeLocalScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeAlgorithms(AlgoSelection): + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA @@ -2296,6 +2304,7 @@ def Scalar(self) -> GradientFreeNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GradientFreeScalarAlgorithms(AlgoSelection): + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA @@ -2577,6 +2586,7 @@ def Scalar(self) -> GlobalParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedLocalAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA @@ -2659,6 +2669,7 @@ def Scalar(self) -> LocalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class LocalScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel @@ -2809,6 +2820,7 @@ def Scalar(self) -> BoundedNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA @@ -3115,6 +3127,7 @@ def Scalar(self) -> GradientBasedScalarAlgorithms: @dataclass(frozen=True) class GradientFreeAlgorithms(AlgoSelection): + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel @@ -3246,6 +3259,7 @@ def Scalar(self) -> GlobalScalarAlgorithms: class LocalAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA @@ -3316,6 +3330,7 @@ def Scalar(self) -> LocalScalarAlgorithms: @dataclass(frozen=True) class BoundedAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA @@ -3451,6 +3466,7 @@ def Scalar(self) -> NonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class ScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel @@ -3625,6 +3641,7 @@ def Scalar(self) -> ParallelScalarAlgorithms: class Algorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA diff --git a/src/optimagic/optimizers/iminuit_migrad.py b/src/optimagic/optimizers/iminuit_migrad.py new file mode 100644 index 000000000..9170cfb0d --- /dev/null +++ b/src/optimagic/optimizers/iminuit_migrad.py @@ -0,0 +1,86 @@ +from dataclasses import dataclass +from typing import Optional + +import numpy as np +from iminuit import Minuit # type: ignore +from numpy.typing import NDArray + +from optimagic import mark +from optimagic.optimization.algo_options import ( + STOPPING_MAXFUN, + STOPPING_MAXITER, +) +from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult +from optimagic.optimization.internal_optimization_problem import ( + InternalOptimizationProblem, +) +from optimagic.typing import AggregationLevel + + +@mark.minimizer( + name="iminuit_migrad", + solver_type=AggregationLevel.SCALAR, + is_available=True, + is_global=False, + needs_jac=False, + needs_hess=False, + supports_parallelism=False, + supports_bounds=True, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, +) +@dataclass(frozen=True) +class IminuitMigrad(Algorithm): + stopping_maxfun: int = STOPPING_MAXFUN + stopping_maxiter: int = STOPPING_MAXITER + errordef: Optional[float] = None + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, params: NDArray[np.float64] + ) -> InternalOptimizeResult: + def wrapped_objective(x: NDArray[np.float64]) -> float: + return float(problem.fun(x)) + + m = Minuit( + wrapped_objective, + params, + ) + + if problem.bounds: + lower_bounds = problem.bounds.lower + upper_bounds = problem.bounds.upper + + if lower_bounds is not None and upper_bounds is not None: + for i, (lower, upper) in enumerate( + zip(lower_bounds, upper_bounds, strict=False) + ): + if lower is not None or upper is not None: + m.limits[i] = (lower, upper) + + m.migrad( + ncall=self.stopping_maxfun, + iterate=self.stopping_maxiter, + ) + print(m.params) + + res = process_minuit_result(m) + return res + + +def process_minuit_result(minuit_result: Minuit) -> InternalOptimizeResult: + x = np.array(minuit_result.values) + fun = minuit_result.fval + success = minuit_result.valid + message = repr(minuit_result.fmin) + jac: Optional[NDArray[np.float64]] = None + if hasattr(minuit_result, "gradient"): + jac = np.array(minuit_result.gradient) + + return InternalOptimizeResult( + x=x, + fun=fun, + success=success, + message=message, + jac=jac, + ) From 38ce9e41fb215d5ed89396f56c8833b22f8bd94f Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 14 Mar 2025 20:44:45 +0000 Subject: [PATCH 2/7] add hess and minos --- src/optimagic/optimizers/iminuit_migrad.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/optimagic/optimizers/iminuit_migrad.py b/src/optimagic/optimizers/iminuit_migrad.py index 9170cfb0d..e75406444 100644 --- a/src/optimagic/optimizers/iminuit_migrad.py +++ b/src/optimagic/optimizers/iminuit_migrad.py @@ -77,10 +77,18 @@ def process_minuit_result(minuit_result: Minuit) -> InternalOptimizeResult: if hasattr(minuit_result, "gradient"): jac = np.array(minuit_result.gradient) + hessian = np.array(minuit_result.hesse().params) + covariance = np.array(minuit_result.covariance) + + info = {"minos": minuit_result.minos()} + return InternalOptimizeResult( x=x, fun=fun, success=success, message=message, jac=jac, + hess=hessian, + hess_inv=covariance, + info=info, ) From fcd9443e3e57a40d7890446408a4f01468eecd6f Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 21 Mar 2025 15:36:48 +0000 Subject: [PATCH 3/7] add support for custom derivative passing to migrad --- src/optimagic/algorithms.py | 16 ++--- src/optimagic/optimizers/iminuit_migrad.py | 80 ++++++++++++---------- 2 files changed, 52 insertions(+), 44 deletions(-) diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py index a4a5ec685..540853192 100644 --- a/src/optimagic/algorithms.py +++ b/src/optimagic/algorithms.py @@ -287,6 +287,7 @@ def Scalar(self) -> BoundedGradientBasedLocalNonlinearConstrainedScalarAlgorithm @dataclass(frozen=True) class BoundedGradientBasedLocalScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -486,7 +487,6 @@ def Scalar(self) -> BoundedGradientFreeLocalNonlinearConstrainedScalarAlgorithms @dataclass(frozen=True) class BoundedGradientFreeLocalScalarAlgorithms(AlgoSelection): - iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -842,6 +842,7 @@ def NonlinearConstrained( @dataclass(frozen=True) class BoundedGradientBasedLocalAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -891,6 +892,7 @@ def Scalar(self) -> GradientBasedLocalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GradientBasedLocalScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -958,6 +960,7 @@ def Scalar(self) -> BoundedGradientBasedNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientBasedScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -1152,7 +1155,6 @@ def Scalar(self) -> GlobalGradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeLocalAlgorithms(AlgoSelection): - iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA @@ -1202,7 +1204,6 @@ def Scalar(self) -> GradientFreeLocalNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GradientFreeLocalScalarAlgorithms(AlgoSelection): - iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA @@ -1294,7 +1295,6 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeScalarAlgorithms(AlgoSelection): - iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA nlopt_cobyla: Type[NloptCOBYLA] = NloptCOBYLA @@ -1949,6 +1949,7 @@ def Scalar(self) -> GlobalGradientBasedScalarAlgorithms: class GradientBasedLocalAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -1991,6 +1992,7 @@ def Scalar(self) -> GradientBasedLocalScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientBasedAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -2060,6 +2062,7 @@ def Scalar(self) -> GradientBasedNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GradientBasedScalarAlgorithms(AlgoSelection): fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -2166,7 +2169,6 @@ def Scalar(self) -> GlobalGradientFreeScalarAlgorithms: @dataclass(frozen=True) class GradientFreeLocalAlgorithms(AlgoSelection): - iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel @@ -2207,7 +2209,6 @@ def Scalar(self) -> GradientFreeLocalScalarAlgorithms: @dataclass(frozen=True) class BoundedGradientFreeAlgorithms(AlgoSelection): - iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA @@ -2304,7 +2305,6 @@ def Scalar(self) -> GradientFreeNonlinearConstrainedScalarAlgorithms: @dataclass(frozen=True) class GradientFreeScalarAlgorithms(AlgoSelection): - iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nlopt_bobyqa: Type[NloptBOBYQA] = NloptBOBYQA @@ -3075,6 +3075,7 @@ def Local(self) -> LeastSquaresLocalParallelAlgorithms: class GradientBasedAlgorithms(AlgoSelection): bhhh: Type[BHHH] = BHHH fides: Type[Fides] = Fides + iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nlopt_ccsaq: Type[NloptCCSAQ] = NloptCCSAQ nlopt_lbfgsb: Type[NloptLBFGSB] = NloptLBFGSB @@ -3127,7 +3128,6 @@ def Scalar(self) -> GradientBasedScalarAlgorithms: @dataclass(frozen=True) class GradientFreeAlgorithms(AlgoSelection): - iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel diff --git a/src/optimagic/optimizers/iminuit_migrad.py b/src/optimagic/optimizers/iminuit_migrad.py index e75406444..b05a9068f 100644 --- a/src/optimagic/optimizers/iminuit_migrad.py +++ b/src/optimagic/optimizers/iminuit_migrad.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import Optional +from typing import Callable, Optional import numpy as np from iminuit import Minuit # type: ignore @@ -8,7 +8,6 @@ from optimagic import mark from optimagic.optimization.algo_options import ( STOPPING_MAXFUN, - STOPPING_MAXITER, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( @@ -22,7 +21,7 @@ solver_type=AggregationLevel.SCALAR, is_available=True, is_global=False, - needs_jac=False, + needs_jac=True, needs_hess=False, supports_parallelism=False, supports_bounds=True, @@ -33,8 +32,6 @@ @dataclass(frozen=True) class IminuitMigrad(Algorithm): stopping_maxfun: int = STOPPING_MAXFUN - stopping_maxiter: int = STOPPING_MAXITER - errordef: Optional[float] = None def _solve_internal_problem( self, problem: InternalOptimizationProblem, params: NDArray[np.float64] @@ -42,10 +39,23 @@ def _solve_internal_problem( def wrapped_objective(x: NDArray[np.float64]) -> float: return float(problem.fun(x)) - m = Minuit( - wrapped_objective, - params, - ) + wrapped_gradient = None + jac_func = None + if problem.jac is not None: + + def wrapped_gradient(x: NDArray[np.float64]) -> NDArray[np.float64]: + return problem.jac(x) + + jac_func = problem.jac + elif problem.fun_and_jac is not None: + + def wrapped_gradient(x: NDArray[np.float64]) -> NDArray[np.float64]: + _, jac = problem.fun_and_jac(x) + return jac + + jac_func = lambda x: problem.fun_and_jac(x)[1] + + m = Minuit(wrapped_objective, params, grad=wrapped_gradient) if problem.bounds: lower_bounds = problem.bounds.lower @@ -58,37 +68,35 @@ def wrapped_objective(x: NDArray[np.float64]) -> float: if lower is not None or upper is not None: m.limits[i] = (lower, upper) - m.migrad( - ncall=self.stopping_maxfun, - iterate=self.stopping_maxiter, - ) - print(m.params) + m.migrad(ncall=self.stopping_maxfun) - res = process_minuit_result(m) + res = process_minuit_result(m, jac_func) return res -def process_minuit_result(minuit_result: Minuit) -> InternalOptimizeResult: - x = np.array(minuit_result.values) - fun = minuit_result.fval - success = minuit_result.valid - message = repr(minuit_result.fmin) - jac: Optional[NDArray[np.float64]] = None - if hasattr(minuit_result, "gradient"): - jac = np.array(minuit_result.gradient) - - hessian = np.array(minuit_result.hesse().params) - covariance = np.array(minuit_result.covariance) - - info = {"minos": minuit_result.minos()} +def process_minuit_result( + minuit_result: Minuit, + jac_fun: Optional[Callable[[NDArray[np.float64]], NDArray[np.float64]]] = None, +) -> InternalOptimizeResult: + jac = None + if jac_fun is not None: + jac = jac_fun(np.array(minuit_result.values)) - return InternalOptimizeResult( - x=x, - fun=fun, - success=success, - message=message, + res = InternalOptimizeResult( + x=np.array(minuit_result.values), + fun=minuit_result.fval, + success=minuit_result.valid, + message=repr(minuit_result.fmin), + n_fun_evals=minuit_result.nfcn, + n_jac_evals=minuit_result.ngrad, + n_hess_evals=None, + n_iterations=None, + status=None, jac=jac, - hess=hessian, - hess_inv=covariance, - info=info, + hess=np.array(minuit_result.hesse()), + hess_inv=np.array(minuit_result.covariance), + max_constraint_violation=None, + info={"minos": minuit_result.minos()}, + history=None, ) + return res From 4292b636a2b0bcf850555dcfc24ba1edd3551189 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Sat, 22 Mar 2025 19:06:30 +0000 Subject: [PATCH 4/7] add corrections --- src/optimagic/optimizers/iminuit_migrad.py | 131 ++++++++++++++------- 1 file changed, 90 insertions(+), 41 deletions(-) diff --git a/src/optimagic/optimizers/iminuit_migrad.py b/src/optimagic/optimizers/iminuit_migrad.py index b05a9068f..2524e9ac7 100644 --- a/src/optimagic/optimizers/iminuit_migrad.py +++ b/src/optimagic/optimizers/iminuit_migrad.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import Callable, Optional +from typing import Optional import numpy as np from iminuit import Minuit # type: ignore @@ -8,6 +8,7 @@ from optimagic import mark from optimagic.optimization.algo_options import ( STOPPING_MAXFUN, + STOPPING_MAXITER, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( @@ -32,6 +33,7 @@ @dataclass(frozen=True) class IminuitMigrad(Algorithm): stopping_maxfun: int = STOPPING_MAXFUN + stopping_maxiter: int = STOPPING_MAXITER def _solve_internal_problem( self, problem: InternalOptimizationProblem, params: NDArray[np.float64] @@ -39,48 +41,24 @@ def _solve_internal_problem( def wrapped_objective(x: NDArray[np.float64]) -> float: return float(problem.fun(x)) - wrapped_gradient = None - jac_func = None - if problem.jac is not None: + m = Minuit(wrapped_objective, params, grad=problem.jac) - def wrapped_gradient(x: NDArray[np.float64]) -> NDArray[np.float64]: - return problem.jac(x) + bounds = _convert_bounds_to_minuit_limits( + problem.bounds.lower, problem.bounds.upper + ) + _set_minuit_limits(m, bounds) - jac_func = problem.jac - elif problem.fun_and_jac is not None: + m.migrad( + ncall=self.stopping_maxfun, + iterate=self.stopping_maxiter, # review + ) - def wrapped_gradient(x: NDArray[np.float64]) -> NDArray[np.float64]: - _, jac = problem.fun_and_jac(x) - return jac - - jac_func = lambda x: problem.fun_and_jac(x)[1] - - m = Minuit(wrapped_objective, params, grad=wrapped_gradient) - - if problem.bounds: - lower_bounds = problem.bounds.lower - upper_bounds = problem.bounds.upper - - if lower_bounds is not None and upper_bounds is not None: - for i, (lower, upper) in enumerate( - zip(lower_bounds, upper_bounds, strict=False) - ): - if lower is not None or upper is not None: - m.limits[i] = (lower, upper) - - m.migrad(ncall=self.stopping_maxfun) - - res = process_minuit_result(m, jac_func) + res = process_minuit_result(m) return res -def process_minuit_result( - minuit_result: Minuit, - jac_fun: Optional[Callable[[NDArray[np.float64]], NDArray[np.float64]]] = None, -) -> InternalOptimizeResult: - jac = None - if jac_fun is not None: - jac = jac_fun(np.array(minuit_result.values)) +def process_minuit_result(minuit_result: Minuit) -> InternalOptimizeResult: + """Convert iminuit result to Optimagic's internal result format.""" res = InternalOptimizeResult( x=np.array(minuit_result.values), @@ -90,13 +68,84 @@ def process_minuit_result( n_fun_evals=minuit_result.nfcn, n_jac_evals=minuit_result.ngrad, n_hess_evals=None, - n_iterations=None, + n_iterations=minuit_result.nfcn, status=None, - jac=jac, - hess=np.array(minuit_result.hesse()), + jac=None, + hess=None, hess_inv=np.array(minuit_result.covariance), max_constraint_violation=None, - info={"minos": minuit_result.minos()}, + info=None, history=None, ) return res + + +def _convert_bounds_to_minuit_limits( + lower_bounds: Optional[NDArray[np.float64]], + upper_bounds: Optional[NDArray[np.float64]], +) -> list[tuple[Optional[float], Optional[float]]]: + """Convert optimization bounds to Minuit-compatible limit format. + + Transforms numpy arrays of bounds into List of tuples as expected by iminuit. + Handles special values like np.inf, -np.inf, and np.nan by converting + them to None where appropriate, as required by Minuit's limits API. + + Parameters + ---------- + lower_bounds : Optional[NDArray[np.float64]] + Array of lower bounds for parameters. + upper_bounds : Optional[NDArray[np.float64]] + Array of upper bounds for parameters. + + Returns + ------- + list[tuple[Optional[float], Optional[float]]] + List of (lower, upper) limit tuples in Minuit format, where: + - None indicates unbounded (equivalent to infinity) + - Float values represent actual bounds + + Notes + ----- + Minuit expects bounds as tuples of (lower, upper) where: + - `None` indicates no bound (equivalent to -inf or +inf) + - A finite float value indicates a specific bound + - Bounds can be asymmetric (e.g., one side bounded, one side not) + + """ + if lower_bounds is None or upper_bounds is None: + return [] + + return [ + ( + None if np.isneginf(lower) or np.isnan(lower) else float(lower), + None if np.isposinf(upper) or np.isnan(upper) else float(upper), + ) + for lower, upper in zip(lower_bounds, upper_bounds, strict=True) + ] + + +def _set_minuit_limits( + m: Minuit, bounds: list[tuple[Optional[float], Optional[float]]] +) -> None: + """Set parameter limits on a Minuit minimizer instance. + + Applies the converted bounds to an iminuit.Minuit object. Minuit expects + parameter limits as tuples of (lower, upper) for each parameter, where + None indicates an unbounded direction. + + Parameters + ---------- + m : Minuit + The iminuit minimizer instance to configure. + bounds : list[tuple[Optional[float], Optional[float]]] + List of parameter bounds as (lower, upper) tuples in Minuit format. + For each tuple: + - (None, None): Fully unbounded parameter + - (value, None): Lower bound only + - (None, value): Upper bound only + - (min, max): Two-sided constraint + + """ + for i, (lower, upper) in enumerate(bounds): + if lower is not None or upper is not None: + m.limits[i] = (lower, upper) From 8ed506ddf080c1384ce86e10585eda6b3a2d5d20 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Sun, 23 Mar 2025 17:49:07 +0000 Subject: [PATCH 5/7] make process minuit result private --- src/optimagic/optimizers/iminuit_migrad.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/optimagic/optimizers/iminuit_migrad.py b/src/optimagic/optimizers/iminuit_migrad.py index 2524e9ac7..9095b57a0 100644 --- a/src/optimagic/optimizers/iminuit_migrad.py +++ b/src/optimagic/optimizers/iminuit_migrad.py @@ -53,11 +53,11 @@ def wrapped_objective(x: NDArray[np.float64]) -> float: iterate=self.stopping_maxiter, # review ) - res = process_minuit_result(m) + res = _process_minuit_result(m) return res -def process_minuit_result(minuit_result: Minuit) -> InternalOptimizeResult: +def _process_minuit_result(minuit_result: Minuit) -> InternalOptimizeResult: """Convert iminuit result to Optimagic's internal result format.""" res = InternalOptimizeResult( From 4bdd45b3d7dab2c81ea5d5c5828cc9f876864aa9 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Tue, 25 Mar 2025 22:54:18 +0000 Subject: [PATCH 6/7] add conditional import --- pyproject.toml | 1 + src/optimagic/config.py | 8 ++++++++ src/optimagic/optimizers/iminuit_migrad.py | 7 +++++-- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f458cfd44..bfa2310c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -379,5 +379,6 @@ module = [ "optimagic._version", "annotated_types", "pdbp", + "iminuit", ] ignore_missing_imports = true diff --git a/src/optimagic/config.py b/src/optimagic/config.py index d63ef54ac..c41a3f6f1 100644 --- a/src/optimagic/config.py +++ b/src/optimagic/config.py @@ -92,6 +92,14 @@ IS_NUMBA_INSTALLED = True +try: + import iminuit # noqa: F401 +except ImportError: + IS_IMINUIT_INSTALLED = False +else: + IS_IMINUIT_INSTALLED = True + + # ====================================================================================== # Check if pandas version is newer or equal to version 2.1.0 # ====================================================================================== diff --git a/src/optimagic/optimizers/iminuit_migrad.py b/src/optimagic/optimizers/iminuit_migrad.py index 9095b57a0..3a3c73893 100644 --- a/src/optimagic/optimizers/iminuit_migrad.py +++ b/src/optimagic/optimizers/iminuit_migrad.py @@ -2,10 +2,10 @@ from typing import Optional import numpy as np -from iminuit import Minuit # type: ignore from numpy.typing import NDArray from optimagic import mark +from optimagic.config import IS_IMINUIT_INSTALLED from optimagic.optimization.algo_options import ( STOPPING_MAXFUN, STOPPING_MAXITER, @@ -16,11 +16,14 @@ ) from optimagic.typing import AggregationLevel +if IS_IMINUIT_INSTALLED: + from iminuit import Minuit + @mark.minimizer( name="iminuit_migrad", solver_type=AggregationLevel.SCALAR, - is_available=True, + is_available=IS_IMINUIT_INSTALLED, is_global=False, needs_jac=True, needs_hess=False, From 9040028bd3ae49a236d90cd636379f599a725b38 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Tue, 1 Apr 2025 00:55:58 +0000 Subject: [PATCH 7/7] fix iterate and add docs --- docs/source/algorithms.md | 48 ++++++++++ docs/source/refs.bib | 13 +++ src/optimagic/optimization/algo_options.py | 13 +++ src/optimagic/optimizers/iminuit_migrad.py | 38 ++------ .../optimizers/test_iminuit_migrad.py | 95 +++++++++++++++++++ 5 files changed, 176 insertions(+), 31 deletions(-) create mode 100644 tests/optimagic/optimizers/test_iminuit_migrad.py diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index 4ec46b6b5..b34234ecf 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -3913,6 +3913,54 @@ addition to optimagic when using an NLOPT algorithm. To install nlopt run 10 * (number of parameters + 1). ``` +## Optimizers from iminuit + +optimagic supports the [IMINUIT MIGRAD Optimizer](https://iminuit.readthedocs.io/). To +use MIGRAD, you need to have +[the iminuit package](https://github.com/scikit-hep/iminuit) installed (pip install +iminuit). + +```{eval-rst} +.. dropdown:: iminuit_migrad + + .. code-block:: + + "iminuit_migrad" + + `MIGRAD `_ is + the workhorse algorithm of the MINUIT optimization suite, which has been widely used in the + high-energy physics community since 1975. The IMINUIT package is a Python interface to the + Minuit2 C++ library developed by CERN. + + Migrad uses a quasi-Newton method, updating the Hessian matrix iteratively + to guide the optimization. The algorithm adapts dynamically to challenging landscapes + using several key techniques: + + - **Quasi-Newton updates**: The Hessian is updated iteratively rather than recalculated at + each step, improving efficiency. + - **Steepest descent fallback**: When the Hessian update fails, Migrad falls back to steepest + descent with line search. + - **Box constraints handling**: Parameters with bounds are transformed internally to ensure + they remain within allowed limits. + - **Heuristics for numerical stability**: Special cases such as flat gradients or singular + Hessians are managed using pre-defined heuristics. + - **Stopping criteria based on Estimated Distance to Minimum (EDM)**: The optimization halts + when the predicted improvement becomes sufficiently small. + + For details see :cite:`JAMES1975343`. + + **Optimizer Parameters:** + + - **stopping.maxfun** (int): Maximum number of function evaluations. If reached, the optimization stops + but this is not counted as successful convergence. Function evaluations used for numerical gradient + calculations do not count toward this limit. Default is 1,000,000. + + - **n_restarts** (int): Number of times to restart the optimizer if convergence is not reached. + + - A value of 1 (the default) indicates that the optimizer will only run once, disabling the restart feature. + - Values greater than 1 specify the maximum number of restart attempts. +``` + ## References ```{eval-rst} diff --git a/docs/source/refs.bib b/docs/source/refs.bib index bdae3f4f2..45f183b84 100644 --- a/docs/source/refs.bib +++ b/docs/source/refs.bib @@ -893,4 +893,17 @@ @book{Conn2009 URL = {https://epubs.siam.org/doi/abs/10.1137/1.9780898718768}, } +@article{JAMES1975343, +title = {Minuit - a system for function minimization and analysis of the parameter errors and correlations}, +journal = {Computer Physics Communications}, +volume = {10}, +number = {6}, +pages = {343-367}, +year = {1975}, +issn = {0010-4655}, +doi = {https://doi.org/10.1016/0010-4655(75)90039-9}, +url = {https://www.sciencedirect.com/science/article/pii/0010465575900399}, +author = {F. James and M. Roos} +} + @Comment{jabref-meta: databaseType:bibtex;} diff --git a/src/optimagic/optimization/algo_options.py b/src/optimagic/optimization/algo_options.py index 1846ba081..2f7c6fca5 100644 --- a/src/optimagic/optimization/algo_options.py +++ b/src/optimagic/optimization/algo_options.py @@ -122,6 +122,19 @@ """ +N_RESTARTS = 1 +"""int: Number of times to restart the optimizer if convergence is not reached. + This parameter controls how many times the optimization process is restarted + in an attempt to achieve convergence. + + - A value of 1 (the default) indicates that the optimizer will only run once, + disabling the restart feature. + - Values greater than 1 specify the maximum number of restart attempts. + + Note: This is distinct from `STOPPING_MAXITER`, which limits the number of + iterations within a single optimizer run, not the number of restarts. +""" + def get_population_size(population_size, x, lower_bound=10): """Default population size for genetic algorithms.""" diff --git a/src/optimagic/optimizers/iminuit_migrad.py b/src/optimagic/optimizers/iminuit_migrad.py index 3a3c73893..c58e0f014 100644 --- a/src/optimagic/optimizers/iminuit_migrad.py +++ b/src/optimagic/optimizers/iminuit_migrad.py @@ -7,8 +7,8 @@ from optimagic import mark from optimagic.config import IS_IMINUIT_INSTALLED from optimagic.optimization.algo_options import ( + N_RESTARTS, STOPPING_MAXFUN, - STOPPING_MAXITER, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( @@ -36,7 +36,7 @@ @dataclass(frozen=True) class IminuitMigrad(Algorithm): stopping_maxfun: int = STOPPING_MAXFUN - stopping_maxiter: int = STOPPING_MAXITER + n_restarts: int = N_RESTARTS def _solve_internal_problem( self, problem: InternalOptimizationProblem, params: NDArray[np.float64] @@ -49,11 +49,14 @@ def wrapped_objective(x: NDArray[np.float64]) -> float: bounds = _convert_bounds_to_minuit_limits( problem.bounds.lower, problem.bounds.upper ) - _set_minuit_limits(m, bounds) + + for i, (lower, upper) in enumerate(bounds): + if lower is not None or upper is not None: + m.limits[i] = (lower, upper) m.migrad( ncall=self.stopping_maxfun, - iterate=self.stopping_maxiter, # review + iterate=self.n_restarts, ) res = _process_minuit_result(m) @@ -125,30 +128,3 @@ def _convert_bounds_to_minuit_limits( ) for lower, upper in zip(lower_bounds, upper_bounds, strict=True) ] - - -def _set_minuit_limits( - m: Minuit, bounds: list[tuple[Optional[float], Optional[float]]] -) -> None: - """Set parameter limits on a Minuit minimizer instance. - - Applies the converted bounds to an iminuit.Minuit object. Minuit expects - parameter limits as tuples of (lower, upper) for each parameter, where - None indicates an unbounded direction. - - Parameters - ---------- - m : Minuit - The iminuit minimizer instance to configure. - bounds : list[tuple[Optional[float], Optional[float]]] - List of parameter bounds as (lower, upper) tuples in Minuit format. - For each tuple: - - (None, None): Fully unbounded parameter - - (value, None): Lower bound only - - (None, value): Upper bound only - - (min, max): Two-sided constraint - - """ - for i, (lower, upper) in enumerate(bounds): - if lower is not None or upper is not None: - m.limits[i] = (lower, upper) diff --git a/tests/optimagic/optimizers/test_iminuit_migrad.py b/tests/optimagic/optimizers/test_iminuit_migrad.py new file mode 100644 index 000000000..48e435ef4 --- /dev/null +++ b/tests/optimagic/optimizers/test_iminuit_migrad.py @@ -0,0 +1,95 @@ +"""Test suite for the iminuit migrad optimizer.""" + +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal as aaae + +from optimagic.config import IS_IMINUIT_INSTALLED +from optimagic.optimization.optimize import minimize +from optimagic.optimizers.iminuit_migrad import ( + IminuitMigrad, + _convert_bounds_to_minuit_limits, +) + + +def sphere(x): + return (x**2).sum() + + +def sphere_grad(x): + return 2 * x + + +def test_convert_bounds_unbounded(): + """Test converting unbounded bounds.""" + lower = np.array([-np.inf, -np.inf]) + upper = np.array([np.inf, np.inf]) + limits = _convert_bounds_to_minuit_limits(lower, upper) + + assert len(limits) == 2 + assert limits[0] == (None, None) + assert limits[1] == (None, None) + + +def test_convert_bounds_lower_only(): + """Test converting lower bounds only.""" + lower = np.array([1.0, 2.0]) + upper = np.array([np.inf, np.inf]) + limits = _convert_bounds_to_minuit_limits(lower, upper) + + assert len(limits) == 2 + assert limits[0] == (1.0, None) + assert limits[1] == (2.0, None) + + +def test_convert_bounds_upper_only(): + """Test converting upper bounds only.""" + lower = np.array([-np.inf, -np.inf]) + upper = np.array([1.0, 2.0]) + limits = _convert_bounds_to_minuit_limits(lower, upper) + + assert len(limits) == 2 + assert limits[0] == (None, 1.0) + assert limits[1] == (None, 2.0) + + +def test_convert_bounds_two_sided(): + """Test converting two-sided bounds.""" + lower = np.array([1.0, -2.0]) + upper = np.array([2.0, -1.0]) + limits = _convert_bounds_to_minuit_limits(lower, upper) + + assert len(limits) == 2 + assert limits[0] == (1.0, 2.0) + assert limits[1] == (-2.0, -1.0) + + +def test_convert_bounds_mixed(): + """Test converting mixed bounds (some infinite, some finite).""" + lower = np.array([-np.inf, 0.0, 1.0]) + upper = np.array([1.0, np.inf, 2.0]) + limits = _convert_bounds_to_minuit_limits(lower, upper) + + assert len(limits) == 3 + assert limits[0] == (None, 1.0) + assert limits[1] == (0.0, None) + assert limits[2] == (1.0, 2.0) + + +@pytest.mark.skipif(not IS_IMINUIT_INSTALLED, reason="iminuit not installed.") +def test_iminuit_migrad(): + """Test basic optimization with sphere function.""" + x0 = np.array([1.0, 2.0, 3.0]) + algorithm = IminuitMigrad() + + res = minimize( + fun=sphere, + jac=sphere_grad, + algorithm=algorithm, + x0=x0, + ) + + assert res.success + aaae(res.x, np.zeros(3), decimal=6) + assert res.n_fun_evals > 0 + assert res.n_jac_evals > 0