From 4a226b7e7176c3f467a5ed2812de6f98bf622200 Mon Sep 17 00:00:00 2001 From: Patrick Kofod Mogensen Date: Sun, 29 Jan 2017 19:20:53 +0100 Subject: [PATCH 1/2] NonDifferentiableFunction -> NonDifferentiable, DifferentiableFunction -> OnceDifferentiable, TwiceDifferentiableFunction -> TwiceDifferentiable. --- docs/src/algo/precondition.md | 2 +- docs/src/user/minimization.md | 6 +++--- docs/src/user/tipsandtricks.md | 2 +- src/Optim.jl | 4 ++-- src/fminbox.jl | 4 ++-- src/optimize.jl | 30 ++++++++++++++-------------- src/types.jl | 22 ++++++++++---------- test/accelerated_gradient_descent.jl | 2 +- test/api.jl | 6 +++--- test/callbacks.jl | 4 ++-- test/cg.jl | 4 ++-- test/constrained.jl | 2 +- test/extrapolate.jl | 2 +- test/gradient_descent.jl | 4 ++-- test/newton.jl | 14 ++++++------- test/newton_trust_region.jl | 6 +++--- test/optimize.jl | 2 +- test/precon.jl | 2 +- test/type_stability.jl | 4 ++-- 19 files changed, 61 insertions(+), 61 deletions(-) diff --git a/docs/src/algo/precondition.md b/docs/src/algo/precondition.md index f9c45faa1..5a792110d 100644 --- a/docs/src/algo/precondition.md +++ b/docs/src/algo/precondition.md @@ -40,7 +40,7 @@ initial_x = zeros(100) plap(U; n = length(U)) = (n-1)*sum((0.1 + diff(U).^2).^2 ) - sum(U) / (n-1) plap1 = ForwardDiff.gradient(plap) precond(n) = spdiagm((-ones(n-1), 2*ones(n), -ones(n-1)), (-1,0,1), n, n)*(n+1) -df = DifferentiableFunction(x -> plap([0; X; 0]), +df = OnceDifferentiable(x -> plap([0; X; 0]), (x, g) -> copy!(g, (plap1([0; X; 0]))[2:end-1])) result = Optim.optimize(df, initial_x, method = ConjugateGradient(P = nothing)) result = Optim.optimize(df, initial_x, method = ConjugateGradient(P = precond(100))) diff --git a/docs/src/user/minimization.md b/docs/src/user/minimization.md index d9f408472..45311a2a9 100644 --- a/docs/src/user/minimization.md +++ b/docs/src/user/minimization.md @@ -75,7 +75,7 @@ A primal interior-point algorithm for simple "box" constraints (lower and upper lower = [1.25, -2.1] upper = [Inf, Inf] initial_x = [2.0, 2.0] -results = optimize(DifferentiableFunction(f, g!), initial_x, lower, upper, Fminbox(), optimizer = GradientDescent) +results = optimize(OnceDifferentiable(f, g!), initial_x, lower, upper, Fminbox(), optimizer = GradientDescent) ``` This performs optimization with a barrier penalty, successively scaling down the barrier coefficient and using the chosen `optimizer` for convergence at each step. Notice that the `Optimizer` type, not an instance should be passed. This means that the keyword should be passed as `optimizer = GradientDescent` not `optimizer = GradientDescent()`, as you usually would. @@ -86,11 +86,11 @@ There are two iterations parameters: an outer iterations parameter used to contr For example, the following restricts the optimization to 2 major iterations ```julia -results = optimize(DifferentiableFunction(f, g!), initial_x, lower, upper, Fminbox(); optimizer = GradientDescent, iterations = 2) +results = optimize(OnceDifferentiable(f, g!), initial_x, lower, upper, Fminbox(); optimizer = GradientDescent, iterations = 2) ``` In contrast, the following sets the maximum number of iterations for each `ConjugateGradient` optimization to 2 ```julia -results = Optim.optimize(DifferentiableFunction(f, g!), initial_x, lower, upper, Fminbox(); optimizer = GradientDescent, optimizer_o = Optim.Options(iterations = 2)) +results = Optim.optimize(OnceDifferentiable(f, g!), initial_x, lower, upper, Fminbox(); optimizer = GradientDescent, optimizer_o = Optim.Options(iterations = 2)) ``` ## Minimizing a univariate function on a bounded interval diff --git a/docs/src/user/tipsandtricks.md b/docs/src/user/tipsandtricks.md index 6b60230cf..7bfade8f0 100644 --- a/docs/src/user/tipsandtricks.md +++ b/docs/src/user/tipsandtricks.md @@ -81,7 +81,7 @@ using Optim initial_x = ... buffer = Array{eltype(initial_x)}(...) # Preallocate an appropriate buffer last_x = similar(initial_x) -df = TwiceDifferentiableFunction(x -> f(x, buffer, initial_x), +df = TwiceDifferentiable(x -> f(x, buffer, initial_x), (x, stor) -> g!(x, stor, buffer, last_x)) optimize(df, initial_x) ``` diff --git a/src/Optim.jl b/src/Optim.jl index 652484f9a..9a51c4705 100644 --- a/src/Optim.jl +++ b/src/Optim.jl @@ -17,8 +17,8 @@ module Optim Base.setindex! export optimize, - DifferentiableFunction, - TwiceDifferentiableFunction, + OnceDifferentiable, + TwiceDifferentiable, OptimizationOptions, OptimizationState, OptimizationTrace, diff --git a/src/fminbox.jl b/src/fminbox.jl index 82cc314c9..7e2fb6dcc 100644 --- a/src/fminbox.jl +++ b/src/fminbox.jl @@ -103,7 +103,7 @@ end immutable Fminbox <: Optimizer end function optimize{T<:AbstractFloat}( - df::DifferentiableFunction, + df::OnceDifferentiable, initial_x::Array{T}, l::Array{T}, u::Array{T}, @@ -183,7 +183,7 @@ function optimize{T<:AbstractFloat}( # Optimize with current setting of mu funcc = (x, g) -> barrier_combined(x, g, gfunc, gbarrier, fb, mu) fval0 = funcc(x, nothing) - dfbox = DifferentiableFunction(x->funcc(x,nothing), (x,g)->(funcc(x,g); g), funcc) + dfbox = OnceDifferentiable(x->funcc(x,nothing), (x,g)->(funcc(x,g); g), funcc) if show_trace > 0 println("#### Calling optimizer with mu = ", mu, " ####") end diff --git a/src/optimize.jl b/src/optimize.jl index 65be145cd..bb477f946 100644 --- a/src/optimize.jl +++ b/src/optimize.jl @@ -28,7 +28,7 @@ function optimize{F<:Function, G<:Function}(f::F, g!::G, initial_x::Array; kwarg checked_kwargs, method = check_kwargs(kwargs, BFGS()) optimize(f, g!, initial_x, method, Options(;checked_kwargs...)) end -function optimize(d::DifferentiableFunction, initial_x::Array; kwargs...) +function optimize(d::OnceDifferentiable, initial_x::Array; kwargs...) checked_kwargs, method = check_kwargs(kwargs, BFGS()) optimize(d, initial_x, method, Options(checked_kwargs...)) end @@ -41,28 +41,28 @@ function optimize{F<:Function, G<:Function, H<:Function}(f::F, optimize(f, g!, h!, initial_x, method, Options(checked_kwargs...)) end -function optimize(d::TwiceDifferentiableFunction, initial_x::Array; kwargs...) +function optimize(d::TwiceDifferentiable, initial_x::Array; kwargs...) checked_kwargs, method = check_kwargs(kwargs, Newton()) optimize(d, initial_x, method, Options(;kwargs...)) end optimize(d::Function, initial_x, options::Options) = optimize(d, initial_x, NelderMead(), options) -optimize(d::DifferentiableFunction, initial_x, options::Options) = optimize(d, initial_x, BFGS(), options) -optimize(d::TwiceDifferentiableFunction, initial_x, options::Options) = optimize(d, initial_x, Newton(), options) +optimize(d::OnceDifferentiable, initial_x, options::Options) = optimize(d, initial_x, BFGS(), options) +optimize(d::TwiceDifferentiable, initial_x, options::Options) = optimize(d, initial_x, Newton(), options) function optimize{F<:Function, G<:Function}(f::F, g!::G, initial_x::Array, method::Optimizer, options::Options = Options()) - d = DifferentiableFunction(f, g!) + d = OnceDifferentiable(f, g!) optimize(d, initial_x, method, options) end function optimize{F<:Function, G<:Function}(f::F, g!::G, initial_x::Array, options::Options) - d = DifferentiableFunction(f, g!) + d = OnceDifferentiable(f, g!) optimize(d, initial_x, BFGS(), options) end @@ -72,7 +72,7 @@ function optimize{F<:Function, G<:Function, H<:Function}(f::F, initial_x::Array, method::Optimizer, options::Options = Options()) - d = TwiceDifferentiableFunction(f, g!, h!) + d = TwiceDifferentiable(f, g!, h!) optimize(d, initial_x, method, options) end function optimize{F<:Function, G<:Function, H<:Function}(f::F, @@ -80,7 +80,7 @@ function optimize{F<:Function, G<:Function, H<:Function}(f::F, h!::H, initial_x::Array, options) - d = TwiceDifferentiableFunction(f, g!, h!) + d = TwiceDifferentiable(f, g!, h!) optimize(d, initial_x, Newton(), options) end @@ -90,7 +90,7 @@ function optimize{F<:Function, T, M <: Union{FirstOrderSolver, SecondOrderSolver options::Options) if !options.autodiff if M <: FirstOrderSolver - d = DifferentiableFunction(f) + d = OnceDifferentiable(f) else error("No gradient or Hessian was provided. Either provide a gradient and Hessian, set autodiff = true in the Options if applicable, or choose a solver that doesn't require a Hessian.") end @@ -105,18 +105,18 @@ function optimize{F<:Function, T, M <: Union{FirstOrderSolver, SecondOrderSolver end if M <: FirstOrderSolver - d = DifferentiableFunction(f, g!, fg!) + d = OnceDifferentiable(f, g!, fg!) else hcfg = ForwardDiff.HessianConfig(initial_x) h! = (x, out) -> ForwardDiff.hessian!(out, f, x, hcfg) - d = TwiceDifferentiableFunction(f, g!, fg!, h!) + d = TwiceDifferentiable(f, g!, fg!, h!) end end optimize(d, initial_x, method, options) end -function optimize(d::DifferentiableFunction, +function optimize(d::OnceDifferentiable, initial_x::Array, method::Newton, options::Options) @@ -126,10 +126,10 @@ function optimize(d::DifferentiableFunction, hcfg = ForwardDiff.HessianConfig(initial_x) h! = (x, out) -> ForwardDiff.hessian!(out, d.f, x, hcfg) end - optimize(TwiceDifferentiableFunction(d.f, d.g!, d.fg!, h!), initial_x, method, options) + optimize(TwiceDifferentiable(d.f, d.g!, d.fg!, h!), initial_x, method, options) end -function optimize(d::DifferentiableFunction, +function optimize(d::OnceDifferentiable, initial_x::Array, method::NewtonTrustRegion, options::Options) @@ -139,7 +139,7 @@ function optimize(d::DifferentiableFunction, hcfg = ForwardDiff.HessianConfig(initial_x) h! = (x, out) -> ForwardDiff.hessian!(out, d.f, x, hcfg) end - optimize(TwiceDifferentiableFunction(d.f, d.g!, d.fg!, h!), initial_x, method, options) + optimize(TwiceDifferentiable(d.f, d.g!, d.fg!, h!), initial_x, method, options) end update_g!(d, state, method) = nothing diff --git a/src/types.jl b/src/types.jl index dcc82f1d6..224c3161d 100644 --- a/src/types.jl +++ b/src/types.jl @@ -93,17 +93,17 @@ type UnivariateOptimizationResults{T,M} <: OptimizationResults f_calls::Int end -immutable NonDifferentiableFunction +immutable NonDifferentiable f::Function end -immutable DifferentiableFunction +immutable OnceDifferentiable f::Function g!::Function fg!::Function end -immutable TwiceDifferentiableFunction +immutable TwiceDifferentiable f::Function g!::Function fg!::Function @@ -187,7 +187,7 @@ function Base.append!(a::MultivariateOptimizationResults, b::MultivariateOptimiz end # TODO: Expose ability to do forward and backward differencing -function DifferentiableFunction(f::Function) +function OnceDifferentiable(f::Function) function g!(x::Array, storage::Array) Calculus.finite_difference!(f, x, storage, :central) return @@ -196,18 +196,18 @@ function DifferentiableFunction(f::Function) g!(x, storage) return f(x) end - return DifferentiableFunction(f, g!, fg!) + return OnceDifferentiable(f, g!, fg!) end -function DifferentiableFunction(f::Function, g!::Function) +function OnceDifferentiable(f::Function, g!::Function) function fg!(x::Array, storage::Array) g!(x, storage) return f(x) end - return DifferentiableFunction(f, g!, fg!) + return OnceDifferentiable(f, g!, fg!) end -function TwiceDifferentiableFunction(f::Function) +function TwiceDifferentiable(f::Function) function g!(x::Vector, storage::Vector) Calculus.finite_difference!(f, x, storage, :central) return @@ -220,15 +220,15 @@ function TwiceDifferentiableFunction(f::Function) Calculus.finite_difference_hessian!(f, x, storage) return end - return TwiceDifferentiableFunction(f, g!, fg!, h!) + return TwiceDifferentiable(f, g!, fg!, h!) end -function TwiceDifferentiableFunction(f::Function, +function TwiceDifferentiable(f::Function, g!::Function, h!::Function) function fg!(x::Vector, storage::Vector) g!(x, storage) return f(x) end - return TwiceDifferentiableFunction(f, g!, fg!, h!) + return TwiceDifferentiable(f, g!, fg!, h!) end diff --git a/test/accelerated_gradient_descent.jl b/test/accelerated_gradient_descent.jl index dbc538d1e..48860583d 100644 --- a/test/accelerated_gradient_descent.jl +++ b/test/accelerated_gradient_descent.jl @@ -6,7 +6,7 @@ return end - d = DifferentiableFunction(f, g!) + d = OnceDifferentiable(f, g!) initial_x = [1.0] options = Optim.Options(show_trace = true, iterations = 10) diff --git a/test/api.jl b/test/api.jl index 4d72f820c..43a725d90 100644 --- a/test/api.jl +++ b/test/api.jl @@ -6,9 +6,9 @@ h! = rosenbrock.h! initial_x = rosenbrock.initial_x - d1 = DifferentiableFunction(f) - d2 = DifferentiableFunction(f, g!) - d3 = TwiceDifferentiableFunction(f, g!, h!) + d1 = OnceDifferentiable(f) + d2 = OnceDifferentiable(f, g!) + d3 = TwiceDifferentiable(f, g!, h!) Optim.optimize(f, initial_x, BFGS()) Optim.optimize(f, g!, initial_x, BFGS()) diff --git a/test/callbacks.jl b/test/callbacks.jl index d045e26ad..08ad8d3ec 100644 --- a/test/callbacks.jl +++ b/test/callbacks.jl @@ -5,8 +5,8 @@ g! = problem.g! h! = problem.h! initial_x = problem.initial_x - d2 = DifferentiableFunction(f, g!) - d3 = TwiceDifferentiableFunction(f, g!, h!) + d2 = OnceDifferentiable(f, g!) + d3 = TwiceDifferentiable(f, g!, h!) for method in (NelderMead(), SimulatedAnnealing()) ot_run = false diff --git a/test/cg.jl b/test/cg.jl index d7161c495..2ed3dbe1f 100644 --- a/test/cg.jl +++ b/test/cg.jl @@ -2,7 +2,7 @@ # Test Optim.cg for all differentiable functions in Optim.UnconstrainedProblems.examples for (name, prob) in Optim.UnconstrainedProblems.examples if prob.isdifferentiable - df = DifferentiableFunction(prob.f, prob.g!) + df = OnceDifferentiable(prob.f, prob.g!) res = Optim.optimize(df, prob.initial_x, ConjugateGradient()) @test norm(Optim.minimizer(res) - prob.solutions) < 1e-2 end @@ -19,7 +19,7 @@ srand(1) B = rand(2,2) - df = Optim.DifferentiableFunction(X -> objective(X, B), (X, G) -> objective_gradient!(X, G, B)) + df = Optim.OnceDifferentiable(X -> objective(X, B), (X, G) -> objective_gradient!(X, G, B)) results = Optim.optimize(df, rand(2,2), ConjugateGradient()) @test Optim.converged(results) @test Optim.minimum(results) < 1e-8 diff --git a/test/constrained.jl b/test/constrained.jl index 86c30073a..7e6a65e2d 100644 --- a/test/constrained.jl +++ b/test/constrained.jl @@ -26,7 +26,7 @@ initial_x = randn(N) tmp = similar(initial_x) func = (x, g) -> quadratic!(x, g, AtA, A'*b, tmp) - objective = Optim.DifferentiableFunction(x->func(x, nothing), (x,g)->func(x,g), func) + objective = Optim.OnceDifferentiable(x->func(x, nothing), (x,g)->func(x,g), func) results = Optim.optimize(objective, initial_x, ConjugateGradient()) results = Optim.optimize(objective, Optim.minimizer(results), ConjugateGradient()) # restart to ensure high-precision convergence @test Optim.converged(results) diff --git a/test/extrapolate.jl b/test/extrapolate.jl index 52b6c9eba..0f3fd558c 100644 --- a/test/extrapolate.jl +++ b/test/extrapolate.jl @@ -30,7 +30,7 @@ import LineSearches precond(x::Vector) = precond(length(x)) precond(n::Number) = spdiagm(( -ones(n-1), 2*ones(n), -ones(n-1) ), (-1,0,1), n, n) * (n+1) - df = DifferentiableFunction( X->plap([0;X;0]), + df = OnceDifferentiable( X->plap([0;X;0]), (X, G)->copy!(G, (plap1([0;X;0]))[2:end-1]) ) GRTOL = 1e-6 N = 100 diff --git a/test/gradient_descent.jl b/test/gradient_descent.jl index 6b927bde5..1e2511fa9 100644 --- a/test/gradient_descent.jl +++ b/test/gradient_descent.jl @@ -28,7 +28,7 @@ initial_x = [0.0] - d = DifferentiableFunction(f_gd_1, g_gd_1) + d = OnceDifferentiable(f_gd_1, g_gd_1) results = Optim.optimize(d, initial_x, GradientDescent()) @test_throws ErrorException Optim.x_trace(results) @@ -46,7 +46,7 @@ storage[2] = eta * x[2] end - d = DifferentiableFunction(f_gd_2, g_gd_2) + d = OnceDifferentiable(f_gd_2, g_gd_2) results = Optim.optimize(d, [1.0, 1.0], GradientDescent()) @test_throws ErrorException Optim.x_trace(results) diff --git a/test/newton.jl b/test/newton.jl index 84125b8f9..d40014e0b 100644 --- a/test/newton.jl +++ b/test/newton.jl @@ -11,11 +11,11 @@ storage[1, 1] = 12.0 * (x[1] - 5.0)^2 end - d = TwiceDifferentiableFunction(f_1, g!_1, h!_1) + d = TwiceDifferentiable(f_1, g!_1, h!_1) # Need to specify autodiff! - @test_throws ErrorException Optim.optimize(DifferentiableFunction(f_1, g!_1), [0.0], Newton()) - Optim.optimize(DifferentiableFunction(f_1, g!_1), [0.0], Newton(), Optim.Options(autodiff = true)) + @test_throws ErrorException Optim.optimize(OnceDifferentiable(f_1, g!_1), [0.0], Newton()) + Optim.optimize(OnceDifferentiable(f_1, g!_1), [0.0], Newton(), Optim.Options(autodiff = true)) results = Optim.optimize(d, [0.0], Newton()) @test_throws ErrorException Optim.x_trace(results) @@ -40,7 +40,7 @@ storage[2, 2] = eta end - d = TwiceDifferentiableFunction(f_2, g!_2, h!_2) + d = TwiceDifferentiable(f_2, g!_2, h!_2) results = Optim.optimize(d, [127.0, 921.0], Newton()) @test_throws ErrorException Optim.x_trace(results) @test Optim.g_converged(results) @@ -50,7 +50,7 @@ @testset "Optim problems" begin for (name, prob) in Optim.UnconstrainedProblems.examples if prob.istwicedifferentiable - ddf = TwiceDifferentiableFunction(prob.f, prob.g!,prob.h!) + ddf = TwiceDifferentiable(prob.f, prob.g!,prob.h!) res = Optim.optimize(ddf, prob.initial_x, Newton()) @test norm(Optim.minimizer(res) - prob.solutions) < 1e-2 end @@ -59,7 +59,7 @@ let prob=Optim.UnconstrainedProblems.examples["Himmelblau"] - ddf = TwiceDifferentiableFunction(prob.f, prob.g!, prob.h!) + ddf = TwiceDifferentiable(prob.f, prob.g!, prob.h!) res = optimize(ddf, [0., 0.], Newton()) @test norm(Optim.minimizer(res) - prob.solutions) < 1e-9 end @@ -67,7 +67,7 @@ @testset "Optim problems (ForwardDiff)" begin for (name, prob) in Optim.UnconstrainedProblems.examples if prob.istwicedifferentiable - ddf = DifferentiableFunction(prob.f, prob.g!) + ddf = OnceDifferentiable(prob.f, prob.g!) res = Optim.optimize(ddf, prob.initial_x, Newton(), Optim.Options(autodiff = true)) @test norm(Optim.minimizer(res) - prob.solutions) < 1e-2 res = Optim.optimize(ddf.f, prob.initial_x, Newton(), Optim.Options(autodiff = true)) diff --git a/test/newton_trust_region.jl b/test/newton_trust_region.jl index 28b3ca1b1..a01c2b598 100644 --- a/test/newton_trust_region.jl +++ b/test/newton_trust_region.jl @@ -148,7 +148,7 @@ end storage[1, 1] = 12.0 * (x[1] - 5.0)^2 end - d = TwiceDifferentiableFunction(f, g!, h!) + d = TwiceDifferentiable(f, g!, h!) results = Optim.optimize(d, [0.0], NewtonTrustRegion()) @test length(results.trace) == 0 @@ -173,7 +173,7 @@ end storage[2, 2] = eta end - d = TwiceDifferentiableFunction(f_2, g!_2, h!_2) + d = TwiceDifferentiable(f_2, g!_2, h!_2) results = Optim.optimize(d, Float64[127, 921], NewtonTrustRegion()) @test results.g_converged @@ -183,7 +183,7 @@ end # Optim.UnconstrainedProblems.examples for (name, prob) in Optim.UnconstrainedProblems.examples if prob.istwicedifferentiable - ddf = DifferentiableFunction(prob.f, prob.g!) + ddf = OnceDifferentiable(prob.f, prob.g!) res = Optim.optimize(ddf, prob.initial_x, NewtonTrustRegion(), Optim.Options(autodiff = true)) @test norm(Optim.minimizer(res) - prob.solutions) < 1e-2 res = Optim.optimize(ddf.f, prob.initial_x, NewtonTrustRegion(), Optim.Options(autodiff = true)) diff --git a/test/optimize.jl b/test/optimize.jl index a57f8367c..f4fc08aa8 100644 --- a/test/optimize.jl +++ b/test/optimize.jl @@ -37,7 +37,7 @@ initial_invH = zeros(2,2) h1([127.0, 921.0],initial_invH) initial_invH = diagm(diag(initial_invH)) - results = optimize(DifferentiableFunction(f1, g1), [127.0, 921.0], BFGS(initial_invH = x -> initial_invH), Optim.Options()) + results = optimize(OnceDifferentiable(f1, g1), [127.0, 921.0], BFGS(initial_invH = x -> initial_invH), Optim.Options()) @test Optim.g_converged(results) @test norm(Optim.minimizer(results) - [0.0, 0.0]) < 0.01 diff --git a/test/precon.jl b/test/precon.jl index 1347e29be..d44fbbe6c 100644 --- a/test/precon.jl +++ b/test/precon.jl @@ -14,7 +14,7 @@ precond(x::Vector) = precond(length(x)) precond(n::Number) = spdiagm( ( -ones(n-1), 2*ones(n), -ones(n-1) ), (-1,0,1), n, n) * (n+1) - df = DifferentiableFunction( X->plap([0;X;0]), + df = OnceDifferentiable( X->plap([0;X;0]), (X, G)->copy!(G, (plap1([0;X;0]))[2:end-1]) ) GRTOL = 1e-6 diff --git a/test/type_stability.jl b/test/type_stability.jl index e11f5a696..cedbc604c 100644 --- a/test/type_stability.jl +++ b/test/type_stability.jl @@ -22,9 +22,9 @@ storage[2, 2] = 2*c end - d2 = DifferentiableFunction(rosenbrock, + d2 = OnceDifferentiable(rosenbrock, rosenbrock_gradient!) - d3 = TwiceDifferentiableFunction(rosenbrock, + d3 = TwiceDifferentiable(rosenbrock, rosenbrock_gradient!, rosenbrock_hessian!) From 451881d6b39795fc44a988242d68d46b0ec59157 Mon Sep 17 00:00:00 2001 From: Patrick Kofod Mogensen Date: Wed, 1 Feb 2017 12:34:08 +0100 Subject: [PATCH 2/2] Depreacte old names. --- src/Optim.jl | 4 ++++ src/deprecate.jl | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/Optim.jl b/src/Optim.jl index 9a51c4705..50ff4967f 100644 --- a/src/Optim.jl +++ b/src/Optim.jl @@ -17,6 +17,10 @@ module Optim Base.setindex! export optimize, + NonDifferentiableFunction, + OnceDifferentiableFunction, + TwiceDifferentiableFunction, + NonDifferentiable, OnceDifferentiable, TwiceDifferentiable, OptimizationOptions, diff --git a/src/deprecate.jl b/src/deprecate.jl index 4b1da21e8..e79077521 100644 --- a/src/deprecate.jl +++ b/src/deprecate.jl @@ -73,3 +73,7 @@ function get_neighbor(neighbor!, neighbor) end neighbor end + +@deprecate NonDifferentiableFunction(args...) NonDifferentiable(args) +@deprecate DifferentiableFunction(args...) OnceDifferentiable(args) +@deprecate TwiceDifferentiableFunction(args...) TwiceDifferentiable(args)