From 01f4ec4306cb05952092733bfcadd6b10089a913 Mon Sep 17 00:00:00 2001 From: john verzani Date: Fri, 8 Apr 2022 10:05:58 -0400 Subject: [PATCH] bump major version for breaking changes (#287) * bump major version for breaking changes * Bracketing cleanup (#288) * update bisection: remove BisectionExact, Accelerated; rework assess_convergence, ... * clean up alefeld potra shi; tolerances for FalsePosition(12) * fix errors (#290) * bracketing edits; change halley <: newton (#291) * code cleanup (#292) * Cleanup find zero (#293) * change to iterator interface, could deprecate * more consistent naming * remove deprecated run_bisection * change default solves for `solve(::ZeroProblem)`, update documentation * remove maxfnevals from tolerances which is no longer used * Cleanup convergence (#295) * remove BisectionExact * check for exact zero (#296) * check f==0 and f~ 0 separately * use different format string for printing values (#299) * break out hybrid from Order0; fix logging bug (#298) * Cleanup trace unicode (#300) * use unicode subscripts when displaying trace * clean up tracks, add generics, improve docs * minor documentation edits (#301) * special case bisection (#302) * cleanup (#304) * fix error seen in lith docs * add test for v1.6; allow v1.0.X to fail (#306) * testing fix * Cleanup ci (#307) * avoid inferred for < v1.6.0 * maxevals -> maxiters; allow old use * add nan(T) method * copy edit * Mark deprecations (#289) (#310) --- .github/workflows/ci.yml | 3 +- .gitignore | 2 +- Project.toml | 2 +- docs/src/index.md | 31 +- docs/src/reference.md | 6 +- docs/src/roots.md | 38 +- src/Bracketing/accelerated_bisection.jl | 23 - src/Bracketing/alefeld_potra_shi.jl | 396 ++++++++---------- src/Bracketing/bisection.jl | 211 ++++++---- src/Bracketing/bracketing.jl | 150 ++----- src/Bracketing/bracketing_deprecate.jl | 43 -- src/Bracketing/brent.jl | 23 +- src/Bracketing/chandrapatlu.jl | 7 +- src/Bracketing/false_position.jl | 37 +- src/Bracketing/itp.jl | 12 +- src/Bracketing/ridders.jl | 7 +- src/Derivative/halley_like.jl | 62 +-- src/Derivative/lith.jl | 44 +- src/Derivative/newton.jl | 48 +-- src/Derivative/thukralb.jl | 12 +- src/DerivativeFree/derivative_free.jl | 15 +- .../derivative_free_refactor.jl | 10 - src/DerivativeFree/esser.jl | 18 +- src/DerivativeFree/king.jl | 9 +- src/DerivativeFree/order0.jl | 38 ++ src/DerivativeFree/order16.jl | 43 +- src/DerivativeFree/order5.jl | 43 +- src/DerivativeFree/order8.jl | 28 +- src/DerivativeFree/secant.jl | 7 +- src/DerivativeFree/steffensen.jl | 15 +- src/Roots.jl | 7 +- src/abstract_types.jl | 20 +- src/alternative_interfaces.jl | 2 +- src/convergence.jl | 280 ++++++++----- src/find_zero.jl | 274 ++++++------ src/{order0.jl => hybrid.jl} | 85 +--- src/simple.jl | 6 +- src/state.jl | 50 ++- src/trace.jl | 135 +++--- src/utils.jl | 8 +- test/test_allocations.jl | 2 +- test/test_bracketing.jl | 27 +- test/test_find_zero.jl | 103 ++--- test/test_find_zeros.jl | 10 +- test/test_fzero.jl | 1 - 45 files changed, 1123 insertions(+), 1270 deletions(-) delete mode 100644 src/Bracketing/accelerated_bisection.jl delete mode 100644 src/Bracketing/bracketing_deprecate.jl delete mode 100644 src/DerivativeFree/derivative_free_refactor.jl create mode 100644 src/DerivativeFree/order0.jl rename src/{order0.jl => hybrid.jl} (67%) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 68353f8a..b7c93f26 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,7 +15,8 @@ jobs: fail-fast: false matrix: version: - - '1.0' # lowest supported version + - '1.0' # lowest supported version; though possibly with errors + - '1.6' # LTS support - '1' # last released version os: - ubuntu-latest diff --git a/.gitignore b/.gitignore index fcf83d60..998c90ca 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,4 @@ docs/build docs/site test/benchmarks.json Manifest.toml -TODO.md \ No newline at end of file +TODO.md diff --git a/Project.toml b/Project.toml index 30968b61..96cf759f 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "Roots" uuid = "f2b01f46-fcfa-551c-844a-d8ac1e96c665" -version = "1.4.1" +version = "2.0.0" [deps] CommonSolve = "38540f10-b2f7-11e9-35d8-d573e4eb0ff2" diff --git a/docs/src/index.md b/docs/src/index.md index e650a57a..5529ed67 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -12,13 +12,17 @@ primary interface. It supports various algorithms through the specification of a method. These include: * Bisection-like algorithms. For functions where a bracketing interval - is known (one where ``f(a)`` and ``f(b)`` have alternate signs), the - `Bisection` method can be specified. For most floating point number - types, bisection occurs in a manner exploiting floating point - storage conventions. For others, an algorithm of Alefeld, Potra, and - Shi is used. These methods are guaranteed to converge. Methods - include `Bisection`, `Roots.A42`, `Roots.AlefeldPotraShi`, - `Roots.Brent`, and ``12``-flavors of `FalsePosition`. + is known (one where ``f(a)`` and ``f(b)`` have alternate signs), + there are several bracketing methods, including `Bisection`. For + most floating point number types, bisection occurs in a manner + exploiting floating point storage conventions leading to an exact + zero or a bracketing interval as small as floating point + computations allows. Other methods include `Roots.A42`, + `Roots.AlefeldPotraShi`, `Roots.Brent`, `Roots.Chandrapatlu`, + `Roots.ITP`, `Roots.Ridders`, and ``12``-flavors of + `FalsePosition`. The default bracketing method is `Bisection`, as it + is more robust to some inputs, but `A42` and `AlefeldPotraShi` + typically converge in a few iterations and are more performant. * Several derivative-free methods are implemented. These are specified @@ -34,10 +38,15 @@ specification of a method. These include: multiplicity of the zero. -* There are historic methods that require a derivative or two: - `Roots.Newton` and `Roots.Halley`. `Roots.Schroder` provides a - quadratic method, like Newton's method, which is independent of the - multiplicity of the zero. +* There are methods that require a derivative or two: `Roots.Newton`, + `Roots.Halley` are classical ones, `Roots.QuadraticInverse`, + `Roots.ChebyshevLike`, `Roots.SuperHaller` are others. + `Roots.Schroder` provides a quadratic method, like Newton's method, + which is independent of the multiplicity of the zero. The + `Roots.ThukralXB`, `X=2`, `3`, `4`, or `5` are also multiplicity + three. The `X` denotes the number of derivatives that need + specifying. The `Roots.LithBoonkkampIJzerman{S,D}` methods remember + `S` steps and use `D` derivatives. diff --git a/docs/src/reference.md b/docs/src/reference.md index 8c47ee00..cb9757bc 100644 --- a/docs/src/reference.md +++ b/docs/src/reference.md @@ -31,7 +31,7 @@ find_zero find_zeros ``` -### CommonSolve interface +## CommonSolve interface The problem-algorithm-solve interface is a pattern popularized in `Julia` by the `DifferentialEquations.jl` suite of packages. This can be used as an alternative to `find_zero`. Unlike `find_zero`, `solve` will return `NaN` on non-convergence. @@ -141,7 +141,7 @@ Roots.LithBoonkkampIJzermanBracket The order of convergence for most methods is for *simple* zeros, values ``\alpha`` where ``f(x) = (x-\alpha) \cdot g(x)``, with ``g(\alpha)`` being non-zero. For methods which are of order ``k`` for non-simple zeros, usually an additional function call is needed per step. For example, this is the case for `Roots.Newton` as compared to `Roots.Schroder`. -Derivative-free methods for non-simple zeros have the following implemented +Derivative-free methods for non-simple zeros have the following implemented: ```@docs Roots.King @@ -258,7 +258,7 @@ When an algorithm returns a `NaN` value, it terminates. This can happen nea The use of relative tolerances to check if ``f(x) \approx 0`` can lead to spurious answers where ``x`` is very large (and hence the relative tolerance is large). The return of very large solutions should be checked against expectations of the answer. -Deciding if an algorithm won't terminate is done through counting the number or iterations performed; the default adjusted through `maxevals`. As most algorithms are superlinear, convergence happens rapidly near the answer, but all the algorithms can take a while to get near an answer, even when progress is made. As such, the maximum must be large enough to consider linear cases, yet small enough to avoid too many steps when an algorithm is non-convergent. +Deciding if an algorithm won't terminate is done through counting the number or iterations performed; the default adjusted through `maxiters`. As most algorithms are superlinear, convergence happens rapidly near the answer, but all the algorithms can take a while to get near an answer, even when progress is made. As such, the maximum must be large enough to consider linear cases, yet small enough to avoid too many steps when an algorithm is non-convergent. Convergence criteria are method dependent and are determined by the `Roots.assess_convergence` methods. diff --git a/docs/src/roots.md b/docs/src/roots.md index 0e652657..79467088 100644 --- a/docs/src/roots.md +++ b/docs/src/roots.md @@ -62,7 +62,7 @@ julia> x0, M = (0, pi/2), Bisection() julia> find_zero(g, x0, M) # as before, solve cos(x) - x = 0 using default p=1 0.7390851332151607 -julia> find_zero(g, x0, M, p=2) # solves cos(x) - x/2 = 0 +julia> find_zero(g, x0, M; p=2) # solves cos(x) - x/2 = 0 1.0298665293222589 ``` @@ -121,25 +121,25 @@ julia> find_zero(x -> Inf*sign(x), (-Inf, Inf)) # Float64 only The basic algorithm used for bracketing when the values are simple floating point values is a modification of the bisection method, where -the midpoint is taken over the bit representation of `a` and `b`. For -big float values, an algorithm due to Alefeld, Potra, and Shi is used. +the midpoint is taken over the bit representation of `a` and `b`. -```jldoctest roots -julia> find_zero(sin, (big(3), big(4))) # uses a different algorithm than for (3,4) -3.141592653589793238462643383279502884197169399375105820974944592307816406286198 -``` +For big float values, bisection is the default (with non-zero +tolerances), but its use is definitely not suggested. Simple +bisection over `BigFloat` values can take *many* more +iterations. For the problem of finding a zero of `sin` in the interval +`(big(3), big(4))`, the default bisection takes ``252`` iterations, +whereas the `A42` method takes ``4``. -(Simple bisection over `BigFloat` values can take *many* steps.) - -The algorithms of Alefeld, Potra, and Shi and the well known algorithm of Brent, also start with a bracketing algorithm. For many problems these will take far fewer steps than the bisection algorithm to reach convergence. These may be called directly. For example, +The algorithms of Alefeld, Potra, and Shi and the well known algorithm +of Brent, also start with a bracketing algorithm. For many problems +these will take far fewer steps than the bisection algorithm to reach +convergence. These may be called directly. For example, ```jldoctest roots julia> find_zero(sin, (3,4), A42()) 3.141592653589793 ``` -The above call takes ``9`` function evaluations, the default method takes ``53``. - By default, bisection will converge to machine tolerance. This may provide more accuracy than desired. A tolerance may be specified to @@ -274,14 +274,14 @@ To investigate an algorithm and its convergence, the argument For some functions, adjusting the default tolerances may be necessary to achieve convergence. The tolerances include `atol` and `rtol`, which are used to check if $f(x_n) \approx 0$; -`xatol` and `xrtol`, to check if $x_n \approx x_{n-1}$; and `maxevals` to limit the -number of steps in the algorithm. +`xatol` and `xrtol`, to check if $x_n \approx x_{n-1}$; and `maxiters` to limit the +number of iterations in the algorithm. ## Classical methods -The package provides some classical methods for root finding: +The package provides some classical methods for root finding, such as `Roots.Newton`, `Roots.Halley`, and `Roots.Schroder`. (Currently these are not exported, so must be prefixed with the package name to be used.) We can see how each works on a problem studied by Newton. @@ -401,10 +401,10 @@ julia> g(x, p=1) = cos(x) - x/p; julia> Z = ZeroProblem(g, (0.0, pi/2)) ZeroProblem{typeof(g), Tuple{Float64, Float64}}(g, (0.0, 1.5707963267948966)) -julia> solve(Z, Secant(), 2) # uses p=2 +julia> solve(Z, Secant(); p=2) # uses p=2 1.0298665293222589 -julia> solve(Z, Bisection(), 3, xatol=1/16) # p=3; uses keywords for tolerances +julia> solve(Z, Bisection(); p=3, xatol=1/16) # p=3; uses keywords for tolerances 1.1959535058744393 ``` @@ -1007,7 +1007,7 @@ julia> function Roots.update_state(::Chandrapatla, F, o, options, l=NullTracks() b, a, c = o.xn1, o.xn0, o.c fb, fa, fc = o.fxn1, o.fxn0, o.fc - # encoding: a = xₙ, b=xₙ₋₁, c= xₙ₋₂ + # encoding: a = xₙ, b = xₙ₋₁, c = xₙ₋₂ ξ = (a - b) / (c - b) ϕ = (fa - fb) / (fc - fb) ϕ² = ϕ^2 @@ -1038,7 +1038,7 @@ end ``` -This algorithm chooses between an inverse quadratic step or a bisection step depending on the relationship between the computed `ξ` and `Φ`. The tolerances are from the default for `AbstractAcceleratedBisection`, which choose `eps(T)^2` for the absolute ``x``-tolerance and `eps(t)` for the relative ``x``-tolerance. +This algorithm chooses between an inverse quadratic step or a bisection step depending on the relationship between the computed `ξ` and `Φ`. The tolerances are from the default for `AbstractBracketingMethod`. To see that the algorithm works, we have: diff --git a/src/Bracketing/accelerated_bisection.jl b/src/Bracketing/accelerated_bisection.jl deleted file mode 100644 index 88eca0b2..00000000 --- a/src/Bracketing/accelerated_bisection.jl +++ /dev/null @@ -1,23 +0,0 @@ -## -------------------------------------------------- -## AbstractAcceleratedBisection - -# use xatol, xrtol only, but give some breathing room over the strict ones and cap number of steps -function default_tolerances(::AbstractAcceleratedBisection, ::Type{T}, ::Type{S}) where {T,S} - xatol = eps(real(T))^3 * oneunit(real(T)) - xrtol = 2eps(real(T)) # unitless - atol = zero(real(S)) * oneunit(real(S)) - rtol = zero(real(S)) - maxevals = 60 - maxfnevals = typemax(Int) - strict = false - (xatol, xrtol, atol, rtol, maxevals, maxfnevals, strict) -end - -function init_state(M::AbstractAcceleratedBisection, F, x₀, x₁, fx₀, fx₁) - (iszero(fx₀) || iszero(fx₁)) && return UnivariateZeroState(x₁, x₀, fx₁, fx₀) - assert_bracket(fx₀, fx₁) - a, b, fa, fb = (x₀ < x₁) ? (x₀, x₁, fx₀, fx₁) : (x₁, x₀, fx₁, fx₀) - UnivariateZeroState(b, a, fb, fa) -end - -initial_fncalls(::AbstractAcceleratedBisection) = 2 diff --git a/src/Bracketing/alefeld_potra_shi.jl b/src/Bracketing/alefeld_potra_shi.jl index 8a5942ac..09abf4a8 100644 --- a/src/Bracketing/alefeld_potra_shi.jl +++ b/src/Bracketing/alefeld_potra_shi.jl @@ -1,12 +1,9 @@ ## -------------------------------------------------- -# f[a, b] -@inline f_ab(a, b, fa, fb) = (fb - fa) / (b - a) -# f[a,b,d] -@inline function f_abd(a, b, d, fa, fb, fd) - fab, fbd = f_ab(a, b, fa, fb), f_ab(b, d, fb, fd) - (fbd - fab) / (d - a) -end + +## Two algorithms of Alefeld, Potra, and Shi + +## -------------------------------------------------- # assume fc != 0 ## return a1,b1,d with a < a1 < < b1 < b, d not there @@ -20,28 +17,14 @@ end end end -# inverse cubic interporation -function ipzero(a, b, c, d, fa, fb, fc, fd, delta=zero(a)) - Q11 = (c - d) * fc / (fd - fc) - Q21 = (b - c) * fb / (fc - fb) - Q31 = (a - b) * fa / (fb - fa) - D21 = (b - c) * fc / (fc - fb) - D31 = (a - b) * fb / (fb - fa) - Q22 = (D21 - Q11) * fb / (fd - fb) - Q32 = (D31 - Q21) * fa / (fc - fa) - D32 = (D31 - Q21) * fc / (fc - fa) - Q33 = (D32 - Q22) * fa / (fd - fa) - c = a + (Q31 + Q32 + Q33) - - (a + 2delta < c < b - 2delta) && return c - - newton_quadratic(a, b, d, fa, fb, fd, 3, delta) -end - # return c in (a+delta, b-delta) # adds part of `bracket` from paper with `delta` function newton_quadratic(a, b, d, fa, fb, fd, k::Int, delta=zero(a)) + a′, b′ = a + 2delta, b - 2delta + A = f_abd(a, b, d, fa, fb, fd) + + r = isbracket(A, fa) ? b : a # use quadratic step; if that fails, use secant step; if that fails, bisection @@ -53,27 +36,28 @@ function newton_quadratic(a, b, d, fa, fb, fd, k::Int, delta=zero(a)) Prp = (B + A * (2r - a - b)) r -= Pr / Prp end - if a + 2delta < r < b - 2delta + + if a′ < r < b′ return r end end # try secant step r = secant_step(a, b, fa, fb) - if a + 2delta < r < b - 2delta + if a′ < r < b′ return r end return _middle(a, b) end -# Cubic if possible, if not, quadratic(3) -function take_a42_step(a, b, d, ee, fa, fb, fd, fe, k, delta=zero(a)) - # if r is NaN or Inf we move on by condition. Faster than checking ahead of time for - # distinctness - r = ipzero(a, b, d, ee, fa, fb, fd, fe, delta) # let error and see difference in allcoation? - (a + 2delta < r < b - 2delta) && return r - r = newton_quadratic(a, b, d, fa, fb, fd, 3, delta) +# f[a, b] divided differences +@inline f_ab(a, b, fa, fb) = (fb - fa) / (b - a) + +# f[a,b,d] +@inline function f_abd(a, b, d, fa, fb, fd) + fab, fbd = f_ab(a, b, fa, fb), f_ab(b, d, fb, fd) + (fbd - fab) / (d - a) end @@ -82,9 +66,153 @@ end ## Alefeld, Potra, Shi have two algorithms belosw, one is most efficient, but ## slightly slower than other. -abstract type AbstractAlefeldPotraShi <: AbstractAcceleratedBisection end +abstract type AbstractAlefeldPotraShi <: AbstractBracketingMethod end initial_fncalls(::AbstractAlefeldPotraShi) = 3 # worst case assuming fx₀, fx₁,fc must be computed +## ---- + +""" + Roots.AlefeldPotraShi() + +Follows algorithm in "ON ENCLOSING SIMPLE ROOTS OF NONLINEAR +EQUATIONS", by Alefeld, Potra, Shi; DOI: +[10.1090/S0025-5718-1993-1192965-2](https://doi.org/10.1090/S0025-5718-1993-1192965-2). + +The order of convergence is `2 + √5`; asymptotically there are 3 function evaluations per step. +Asymptotic efficiency index is ``(2+√5)^(1/3) ≈ 1.618...``. Less efficient, but can run faster than the [`A42`](@ref) method. + +Originally by John Travers. +""" +struct AlefeldPotraShi <: AbstractAlefeldPotraShi end + +struct AlefeldPotraShiState{T,S} <: AbstractUnivariateZeroState{T,S} + xn1::T + xn0::T + d::T + fxn1::S + fxn0::S + fd::S +end + +function init_state(::AlefeldPotraShi, F, x₀, x₁, fx₀, fx₁; c=_middle(x₀, x₁), fc=F(c)) + a, b, fa, fb = x₀, x₁, fx₀, fx₁ + isinf(a) && (a = nextfloat(a)) + isinf(b) && (b = prevfloat(b)) + + if a > b + a, b, fa, fb = b, a, fb, fa + end + + # check if fa*fb ≥ 0 + (iszero(fa) || iszero(fb)) && return AlefeldPotraShiState(b, a, a, fb, fa, fa) + assert_bracket(fa, fb) + + a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc) + sign(a) * sign(b) < 0 && throw(ArgumentError("_middle error")) + + return AlefeldPotraShiState(b, a, d, fb, fa, fd) +end + + +# ## 3, maybe 4, functions calls per step +function update_state(M::AlefeldPotraShi, f, state::AlefeldPotraShiState{T,S}, + options, l=NullTracks()) where {T,S} + a::T, b::T, d::T = state.xn0, state.xn1, state.d + + fa::S, fb::S, fd::S = state.fxn0, state.fxn1, state.fd + μ, λ = 0.5, 0.7 + + tole = max(options.xabstol, min(abs(a), abs(b)) * options.xreltol) # paper uses 2|u|*rtol + atol + delta = λ * tole + + c::T = newton_quadratic(a, b, d, fa, fb, fd, 2, delta) + fc::S = f(c) + incfn(l) + + (iszero(fc) || isnan(fc)) && return (_set(state, (c,fc)), true) + (isnan(c) || isinf(c)) && return (state, true) + + a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc) + + c = newton_quadratic(a, b, d, fa, fb, fd, 3, delta) + fc = f(c) + incfn(l) + + (iszero(fc) || isnan(fc)) && return (_set(state, (c,fc)), true) + if isnan(c) || isinf(c) + # tighten up bracket + state = _set(state, (b, fb), (a, fa)) + @set! state.d = d + @set! state.fd = fd + + return (state, false) + end + + a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc) + + u::T, fu::S = choose_smallest(a, b, fa, fb) + c = u - 2 * fu * (b - a) / (fb - fa) + if abs(c - u) > 0.5 * (b - a) + c = __middle(a, b) + end + fc = f(c) + incfn(l) + + (iszero(fc) || isnan(fc)) && return (_set(state, (c,fc)), true) + if isnan(c) || isinf(c) + # tighten up bracket + state = _set(state, (b, fb), (a, fa)) + @set! state.d = d + @set! state.fd = fd + + return (state, false) + end + + ahat::T, bhat::T, dhat::T, fahat::S, fbhat::S, fdhat::S = bracket(a, b, c, fa, fb, fc) + if bhat - ahat < μ * (b - a) + #a, b, d, fa, fb, fd = ahat, b, dhat, fahat, fb, fdhat # typo in paper + a, b, d, fa, fb, fd = ahat, bhat, dhat, fahat, fbhat, fdhat + else + m::T = __middle(ahat, bhat) + fm::S = f(m) + incfn(l) + a, b, d, fa, fb, fd = bracket(ahat, bhat, m, fahat, fbhat, fm) + end + + state = _set(state, (b, fb), (a, fa)) + @set! state.d = d + @set! state.fd = fd + + return (state, false) +end + +## -------------------------------------------------- + +# inverse cubic interporation +function ipzero(a, b, c, d, fa, fb, fc, fd) + Q11 = (c - d) * fc / (fd - fc) + Q21 = (b - c) * fb / (fc - fb) + Q31 = (a - b) * fa / (fb - fa) + D21 = (b - c) * fc / (fc - fb) + D31 = (a - b) * fb / (fb - fa) + Q22 = (D21 - Q11) * fb / (fd - fb) + Q32 = (D31 - Q21) * fa / (fc - fa) + D32 = (D31 - Q21) * fc / (fc - fa) + Q33 = (D32 - Q22) * fa / (fd - fa) + a + (Q31 + Q32 + Q33) +end + +# Cubic if possible, if not newton_quadratic until a value is found +function inverse_cubic_interpolation(a, b, d, ee, fa, fb, fd, fe, k, delta=zero(a)) + # if r is NaN or Inf we move on by condition. Faster than checking ahead of time for + # distinctness + r = ipzero(a, b, d, ee, fa, fb, fd, fe) + (a + 2delta < r < b - 2delta) && return r + r = newton_quadratic(a, b, d, fa, fb, fd, 3, delta) +end + + + """ Roots.A42() @@ -95,34 +223,12 @@ on algorithm 4.2 described in: G. E. Alefeld, F. A. Potra, and Y. Shi, "Algorithm 748: enclosing zeros of continuous functions," ACM Trans. Math. Softw. 21, 327–344 (1995), DOI: [10.1145/210089.210111](https://doi.org/10.1145/210089.210111). The asymptotic efficiency index, ``q^{1/k}``, is ``(2 + 7^{1/2})^{1/3} = 1.6686...``. -Originally by John Travers. - -""" -struct A42 <: AbstractAlefeldPotraShi end - -# for A42, the defaults are reltol=eps(), atol=0; 45 evals and strict=true -# this *basically* follows the tol in the paper (2|u|*rtol + atol) -""" - default_tolerances(::AbstractAlefeldPotraShi, T, S) - -The default tolerances for Alefeld, Potra, and Shi methods are -`xatol=zero(T)`, `xrtol=eps(T)/2`, `atol= zero(S), and rtol=zero(S)`, with -appropriate units; `maxevals=45`, `maxfnevals = Inf`; and `strict=true`. +Originally by John Travers. """ -default_tolerances(M::AbstractAlefeldPotraShi) = default_tolerances(M, Float64, Float64) -function default_tolerances(::AbstractAlefeldPotraShi, ::Type{T}, ::Type{S}) where {T,S} - xatol = zero(T) - xrtol = eps(one(T)) / 2 - atol = zero(float(one(S))) * oneunit(S) - rtol = zero(float(one(S))) * one(S) - maxevals = 45 - maxfnevals = typemax(Int) - strict = true - (xatol, xrtol, atol, rtol, maxevals, maxfnevals, strict) -end +struct A42 <: AbstractAlefeldPotraShi end ## initial step, needs to log a,b,d function log_step(l::Tracks, M::AbstractAlefeldPotraShi, state; init::Bool=false) @@ -168,15 +274,6 @@ function init_state(::A42, F, x₀, x₁, fx₀, fx₁; c=_middle(x₀, x₁), f A42State(b, a, d, ee, fb, fa, fd, fe) end - - -# helper: set state xn1, fxn1 -function _set_state(state, x₁, fx₁) - @set! state.xn1 = x₁ - @set! state.fxn1 = fx₁ - return (state, true) -end - # Main algorithm for A42 method function update_state(M::A42, F, state::A42State{T,S}, options, l=NullTracks()) where {T,S} a::T, b::T, d::T, ee::T = state.xn0, state.xn1, state.d, state.ee @@ -184,35 +281,35 @@ function update_state(M::A42, F, state::A42State{T,S}, options, l=NullTracks()) an, bn = a, b μ, λ = 0.5, 0.7 - tole = max(options.xabstol, max(abs(a), abs(b)) * options.xreltol) # paper uses 2|u|*rtol + atol + + tole = max(options.xabstol, min(abs(a), abs(b)) * options.xreltol) # paper uses 2|u|*rtol + atol, not max delta = λ * tole if isnan(ee) - c = newton_quadratic(a, b, d, fa, fb, fd, 2) + c = newton_quadratic(a, b, d, fa, fb, fd, 2, delta) else - c = ipzero(a, b, d, ee, fa, fb, fd, fe) + c = inverse_cubic_interpolation(a, b, d, ee, fa, fb, fd, fe, delta) end fc::S = F(c) incfn(l) - (iszero(fc) || isnan(fc)) && return _set_state(state, c, fc) + + (iszero(fc) || isnan(fc)) && return (_set(state, (c,fc)), true) + (isnan(c) || isinf(c)) && return (state, true) ab::T, bb::T, db::T, fab::S, fbb::S, fdb::S = bracket(a, b, c, fa, fb, fc) eb::T, feb::S = d, fd - cb::T = take_a42_step(ab, bb, db, eb, fab, fbb, fdb, feb, delta) + cb::T = inverse_cubic_interpolation(ab, bb, db, eb, fab, fbb, fdb, feb, delta) fcb::S = F(cb) incfn(l) - (iszero(fc) || isnan(fc)) && return _set_state(state, c, fc) + (iszero(fc) || isnan(fc)) && return (_set(state, (c,fc)), true) if isnan(c) || isinf(c) # tighten up bracket - @set! state.xn0 = ab - @set! state.xn1 = bb + state = _set(state, (bb, fbb), (ab, fab)) @set! state.d = db - @set! state.fxn0 = fab - @set! state.fxn1 = fbb @set! state.fd = fdb return state, false end @@ -228,14 +325,11 @@ function update_state(M::A42, F, state::A42State{T,S}, options, l=NullTracks()) fch::S = F(ch) incfn(l) - (iszero(fch) || isnan(fch)) && return _set_state(state, ch, fch) + (iszero(fch) || isnan(fch)) && return (_set(state, (ch,fch)), true) if isnan(ch) || isinf(ch) # tighten up bracket - @set! state.xn0 = ab - @set! state.xn1 = bb + state = _set(state, (bb, fbb), (ab, fab)) @set! state.d = db - @set! state.fxn0 = fab - @set! state.fxn1 = fbb @set! state.fd = fdb return state, false end @@ -254,145 +348,11 @@ function update_state(M::A42, F, state::A42State{T,S}, options, l=NullTracks()) a, b, d, fa, fb, fd = bracket(ah, bh, m, fah, fbh, fm) end - @set! state.xn0 = a - @set! state.xn1 = b + state = _set(state, (b, fb), (a, fa)) @set! state.d = d @set! state.ee = ee - @set! state.fxn0 = fa - @set! state.fxn1 = fb @set! state.fd = fd @set! state.fee = fe return state, false end - -## ---- - -""" - Roots.AlefeldPotraShi() - -Follows algorithm in "ON ENCLOSING SIMPLE ROOTS OF NONLINEAR -EQUATIONS", by Alefeld, Potra, Shi; DOI: -[10.1090/S0025-5718-1993-1192965-2](https://doi.org/10.1090/S0025-5718-1993-1192965-2). -Asymptotic efficiency index is ``1.618``. Less efficient, but can run faster than the [`A42`](@ref) method. -Originally by John Travers. -""" -struct AlefeldPotraShi <: AbstractAlefeldPotraShi end - -struct AlefeldPotraShiState{T,S} <: AbstractUnivariateZeroState{T,S} - xn1::T - xn0::T - d::T - fxn1::S - fxn0::S - fd::S -end - -function init_state(::AlefeldPotraShi, F, x₀, x₁, fx₀, fx₁; c=_middle(x₀, x₁), fc=F(c)) - a, b, fa, fb = x₀, x₁, fx₀, fx₁ - isinf(a) && (a = nextfloat(a)) - isinf(b) && (b = prevfloat(b)) - - if a > b - a, b, fa, fb = b, a, fb, fa - end - - # check if fa*fb ≥ 0 - (iszero(fa) || iszero(fb)) && return AlefeldPotraShiState(b, a, a, fb, fa, fa) - assert_bracket(fa, fb) - - a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc) - sign(a) * sign(b) < 0 && throw(ArgumentError("_middle error")) - - return AlefeldPotraShiState(b, a, d, fb, fa, fd) -end - - -# ## 3, maybe 4, functions calls per step -function update_state( - M::AlefeldPotraShi, - f, - state::AbstractUnivariateZeroState{T,S}, - options::UnivariateZeroOptions, - l=NullTracks(), -) where {T,S} - a::T, b::T, d::T = state.xn0, state.xn1, state.d - - fa::S, fb::S, fd::S = state.fxn0, state.fxn1, state.fd - μ, λ = 0.5, 0.7 - tole = max(options.xabstol, max(abs(a), abs(b)) * options.xreltol) # paper uses 2|u|*rtol + atol - delta = λ * tole - - c::T = newton_quadratic(a, b, d, fa, fb, fd, 2, delta) - fc::S = f(c) - incfn(l) - if iszero(fc) || isnan(fc) - @set! state.xn1 = c - @set! state.fxn1 = fc - return (state, true) - elseif isnan(c) || isinf(c) - return (state, true) - end - - a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc) - - c = newton_quadratic(a, b, d, fa, fb, fd, 3, delta) - fc = f(c) - incfn(l) - - (iszero(fc) || isnan(fc)) && return _set_state(state, c, fc) - if isnan(c) || isinf(c) - # tighten up bracket - @set! state.xn0 = a - @set! state.xn1 = b - @set! state.d = d - @set! state.fxn0 = fa - @set! state.fxn1 = fb - @set! state.fd = fd - - return (state, false) - end - - a, b, d, fa, fb, fd = bracket(a, b, c, fa, fb, fc) - - u::T, fu::S = choose_smallest(a, b, fa, fb) - c = u - 2 * fu * (b - a) / (fb - fa) - if abs(c - u) > 0.5 * (b - a) - c = __middle(a, b) - end - fc = f(c) - incfn(l) - - (iszero(fc) || isnan(fc)) && return _set_state(state, c, fc) - if isnan(c) || isinf(c) - # tighten up bracket - @set! state.xn0 = a - @set! state.xn1 = b - @set! state.d = d - @set! state.fxn0 = fa - @set! state.fxn1 = fb - @set! state.fd = fd - - return (state, false) - end - - ahat::T, bhat::T, dhat::T, fahat::S, fbhat::S, fdhat::S = bracket(a, b, c, fa, fb, fc) - if bhat - ahat < μ * (b - a) - #a, b, d, fa, fb, fd = ahat, b, dhat, fahat, fb, fdhat # typo in paper - a, b, d, fa, fb, fd = ahat, bhat, dhat, fahat, fbhat, fdhat - else - m::T = __middle(ahat, bhat) - fm::S = f(m) - incfn(l) - a, b, d, fa, fb, fd = bracket(ahat, bhat, m, fahat, fbhat, fm) - end - - @set! state.xn0 = a - @set! state.xn1 = b - @set! state.d = d - @set! state.fxn0 = fa - @set! state.fxn1 = fb - @set! state.fd = fd - - return (state, false) -end diff --git a/src/Bracketing/bisection.jl b/src/Bracketing/bisection.jl index 444fcdc1..18321291 100644 --- a/src/Bracketing/bisection.jl +++ b/src/Bracketing/bisection.jl @@ -1,7 +1,6 @@ """ Bisection() - Roots.BisectionExact() (deprecated) If possible, will use the bisection method over `Float64` values. The bisection method starts with a bracketing interval `[a,b]` and splits @@ -16,59 +15,82 @@ tolerances are set to zero (the default) guarantees a "best" solution `[a, nextfloat(a)]`). When tolerances are given, this algorithm terminates when the interval -length is less than or equal to the tolerance -`max(xtol, max(abs(a), abs(b)) * .xrtol)`. +length is less than or equal to the tolerance `max(δₐ, 2abs(u)δᵣ)` with `u` in +`{a,b}` chosen by the smaller of `|f(a)|` and `|f(b)|`, or + or the function value is less than +`max(tol, min(abs(a), abs(b)) * rtol)`. The latter is used only if the default +tolerances (`atol` or `rtol`) are adjusted. -When a zero tolerance is given and the values are not `Float64` -values, this will call the [`A42`](@ref) method. (To be deprecated.) +""" +struct Bisection <: AbstractBisectionMethod end -""" -struct Bisection <: AbstractBisection end # either solvable or A42 -struct BisectionExact <: AbstractBisection - function BisectionExact() - Base.depwarn("BisectionExact is deprecated; use Bisection", :BisectionExact) - new() +initial_fncalls(::AbstractBisectionMethod) = 3 # middle + +# Bisection using __middle should have a,b on same side of 0.0 (though +# possibly, may be -0.0, 1.0 so not guaranteed to be of same sign) +function init_state(::AbstractBisectionMethod, F, x₀, x₁, fx₀, fx₁; m=_middle(x₀, x₁), fm=F(m)) + + if x₀ > x₁ + x₀, x₁, fx₀, fx₁ = x₁, x₀, fx₁, fx₀ end + + # handle interval if fa*fb ≥ 0 (explicit, but also not needed) + (iszero(fx₀) || iszero(fx₁)) && return UnivariateZeroState(x₁, x₀, fx₁, fx₀) + assert_bracket(fx₀, fx₁) + if sign(fm) * fx₀ < 0 * oneunit(fx₀) + a, b, fa, fb = x₀, m, fx₀, fm + else + a, b, fa, fb = m, x₁, fm, fx₁ + end + + # handles case where a=-0.0, b=1.0 without error + sign(a) * sign(b) < 0 && throw(ArgumentError("_middle error")) + + UnivariateZeroState(b, a, fb, fa) end -initial_fncalls(::Roots.AbstractBisection) = 3 +const FloatNN = Union{Float64,Float32,Float16} # for Bisection, the defaults are zero tolerances and strict=true """ - default_tolerances(M::AbstractBisection, [T], [S]) - + default_tolerances(M::AbstractBisectionMethod, [T], [S]) -For `Bisection` (or `BisectionExact`), when the `x` values are of type `Float64`, `Float32`, +For `Bisection` when the `x` values are of type `Float64`, `Float32`, or `Float16`, the default tolerances are zero and there is no limit on the number of iterations. In this case, the algorithm is guaranteed to converge to an exact zero, or a point where the function changes sign at one of the answer's adjacent floating point values. -For other types, the [`Roots.A42`](@ref) method (with its tolerances) is used. (To be deprecated.) +For other types, default non-zero tolerances for `xatol` and `xrtol` are given. """ -function default_tolerances(::AbstractBisection, ::Type{T}, ::Type{S}) where {T,S} - xatol = zero(T) - xrtol = zero(one(T)) - atol = zero(float(one(S))) * oneunit(S) - rtol = zero(float(one(S))) * one(S) - maxevals = typemax(Int) - maxfnevals = typemax(Int) +function default_tolerances(::AbstractBisectionMethod, ::Type{T}, ::Type{S′}) where {T<:FloatNN,S′} + S = real(float(S′)) + xatol = 0 * oneunit(S) + xrtol = 0 * one(T) + atol = 0 * oneunit(S) + rtol = 0 * one(S) + maxiters = typemax(Int) strict = true - (xatol, xrtol, atol, rtol, maxevals, maxfnevals, strict) + (xatol, xrtol, atol, rtol, maxiters, strict) end -function log_step(l::Tracks, M::Bisection, state; init::Bool=false) - a, b = state.xn0, state.xn1 - push!(l.abₛ, (a,b)) - init && log_iteration(l, 1) # c is computed - !init && log_iteration(l, 1) - nothing +# not float uses some non-zero tolerances for `x` +function default_tolerances(::AbstractBisectionMethod, ::Type{T′}, ::Type{S′}) where {T′,S′} + T,S = real(float(T′)), real(float(S′)) + xatol = eps(T)^3 * oneunit(T) + xrtol = eps(T) * one(T) # unitless + atol = 0 * oneunit(S) + rtol = 0 * one(S) + maxiters = typemax(Int) + strict = true + (xatol, xrtol, atol, rtol, maxiters, strict) end + # find middle of (a,b) with convention that # * if a, b finite, they are made non-finite # if a,b of different signs, middle is 0 @@ -85,8 +107,6 @@ function _middle(x, y) end end -const FloatNN = Union{Float64,Float32,Float16} - ## find middle assuming a,b same sign, finite ## Alternative "mean" definition that operates on the binary representation ## of a float. Using this definition, bisection will never take more than @@ -108,40 +128,10 @@ function __middle(T, S, x, y) sign(x + y) * reinterpret(T, mid) end -## the method converges, -## as we bound between x0, nextfloat(x0) is not measured by eps(), but eps(x0) -function assess_convergence(::Bisection, state::AbstractUnivariateZeroState, options) - flag, converged = assess_convergence(BisectionExact(), state, options) - converged && return (flag, converged) - - a, b = state.xn0, state.xn1 - fa, fb = state.fxn0, state.fxn1 - - if isapprox(a, b, atol=options.xabstol, rtol=options.xreltol) - return (:x_converged, true) - end - - ftol = max(options.abstol, max(abs(a), abs(b)) * options.reltol) - if min(abs(fa), abs(fb)) ≤ ftol - return (:f_converged, true) - end - - return :not_converged, false -end - -# for exact convergence, we can skip some steps -function assess_convergence(::BisectionExact, state::UnivariateZeroState, options) - a, b = state.xn0, state.xn1 - fa, fb = state.fxn0, state.fxn1 - - (iszero(fa) || isnan(fa) || iszero(fb) || isnan(fb)) && return (:f_converged, true) - nextfloat(a) == b && return (:x_converged, true) +## -------------------------------------------------- - return (:not_converged, false) -end -################################################## -function update_state(M::AbstractBisection, F, o, options, l=NullTracks()) +function update_state(M::AbstractBisectionMethod, F, o, options, l=NullTracks()) a, b = o.xn0, o.xn1 fa, fb = o.fxn0, o.fxn1 @@ -163,35 +153,70 @@ function update_state(M::AbstractBisection, F, o, options, l=NullTracks()) return o, false end -################################################## - -## Bisection has special cases -## for zero tolerance, we have either BisectionExact or A42 methods -## for non-zero tolerances, we have use thegeneral Bisection method -function find_zero( - fs, - x0, - method::Bisection; - p=nothing, - tracks=NullTracks(), - verbose=false, - kwargs..., -) - - Base.depwarn("The special case of bisection over BigFloat with zero tolerance using `A42` is deprecated. Now bisection is used with non-zero tolerances.", :find_zero) - - _options = init_options(Bisection(), Float64, Float64; kwargs...) - iszero_tol = - iszero(_options.xabstol) && - iszero(_options.xreltol) && - iszero(_options.abstol) && - iszero(_options.reltol) - - _method = if iszero_tol - float(first(_extrema(x0))) isa FloatNN ? BisectionExact() : A42() - else - method + +## Special case default method for `find_zero(f, (a,b))`; gives ~10% speedup by avoiding assess_convergence/update state dispatch +function solve!(P::ZeroProblemIterator{𝑴, 𝑵, 𝑭, 𝑺, 𝑶, 𝑳}; verbose=false) where { + 𝑴 <: Bisection, 𝑵, 𝑭, 𝑺, 𝑶 <: ExactOptions, 𝑳} + + M, F, state, options, l = P.M, P.F, P.state, P.options, P.logger + + val, stopped = :not_converged, false + ctr = 1 + log_step(l, M, state; init=true) + + while !stopped + + a, b = state.xn0, state.xn1 + fa, fb = state.fxn0, state.fxn1 + + ## assess_convergence + if nextfloat(a) ≥ b + val = :x_converged + break + end + + if (isnan(fa) || isnan(fb)) + val = :nan + break + end + + if (iszero(fa) || iszero(fb)) + val = :exact_zero + break + end + ctr > options.maxiters && break + + # ---- + ## update step + c = __middle(a, b) + fc = F(c) + incfn(l) + + if sign(fa) * sign(fc) < 0 + b, fb = c, fc + else + a, fa = c, fc + end + + ## ---- + @set! state.xn0 = a + @set! state.xn1 = b + @set! state.fxn0 = fa + @set! state.fxn1 = fb + + + log_step(l, M, state) + ctr += 1 end - return solve(ZeroProblem(fs, x0), _method, p; verbose=verbose, tracks=tracks, kwargs...) +# val, stopped = assess_convergence(M, state, options) # udpate val flag + α = decide_convergence(M, F, state, options, val) + + log_convergence(l, val) + log_method(l, M) + log_last(l, α) + verbose && display(l) + + α + end diff --git a/src/Bracketing/bracketing.jl b/src/Bracketing/bracketing.jl index 17a5d0d8..2d0e8d83 100644 --- a/src/Bracketing/bracketing.jl +++ b/src/Bracketing/bracketing.jl @@ -1,132 +1,66 @@ -### +### Bracketing method defaults -const bracketing_error = """The interval [a,b] is not a bracketing interval. -You need f(a) and f(b) to have different signs (f(a) * f(b) < 0). -Consider a different bracket or try fzero(f, c) with an initial guess c. - -""" - -## utils -@inline isbracket(fa, fb) = sign(fa) * sign(fb) < 0 -assert_bracket(fx0, fx1) = isbracket(fx0, fx1) || throw(ArgumentError(bracketing_error)) - - -## tracks for bisection, different, we show bracketing interval -## No init here; for Bisection() [a₀, b₀] is just lost. -function log_step(l::Tracks, M::AbstractBracketing, state; init::Bool=false) - a, b = state.xn0, state.xn1 - push!(l.abₛ, a < b ? (a,b) : (b,a)) - !init && log_iteration(l, 1) - nothing -end - - -## helper function: floating point, sorted, finite -function adjust_bracket(x0) - u, v = map(float, _extrema(x0)) - if u > v - u, v = v, u - end - isinf(u) && (u = nextfloat(u)) - isinf(v) && (v = prevfloat(v)) - u, v -end - -function init_state(M::AbstractBracketing, F::Callable_Function, x) +function init_state(M::AbstractBracketingMethod, F::Callable_Function, x) x₀, x₁ = adjust_bracket(x) fx₀, fx₁ = F(x₀), F(x₁) state = init_state(M, F, x₀, x₁, fx₀, fx₁) end -function init_state(::AbstractBracketing, F, x₀, x₁, fx₀, fx₁; m=_middle(x₀, x₁), fm=F(m)) - - if x₀ > x₁ - x₀, x₁, fx₀, fx₁ = x₁, x₀, fx₁, fx₀ - end - - # handle interval if fa*fb ≥ 0 (explicit, but also not needed) +function init_state(M::AbstractBracketingMethod, F, x₀, x₁, fx₀, fx₁) (iszero(fx₀) || iszero(fx₁)) && return UnivariateZeroState(x₁, x₀, fx₁, fx₀) assert_bracket(fx₀, fx₁) - - if sign(fm) * fx₀ < 0 - a, b, fa, fb = x₀, m, fx₀, fm - else - a, b, fa, fb = m, x₁, fm, fx₁ - end - - sign(a) * sign(b) < 0 && throw(ArgumentError("_middle error")) - + a, b, fa, fb = (x₀ < x₁) ? (x₀, x₁, fx₀, fx₁) : (x₁, x₀, fx₁, fx₀) UnivariateZeroState(b, a, fb, fa) end +Base.last(state::AbstractUnivariateZeroState, M::AbstractBracketingMethod) = + state.xn0 < state.xn1 ? (state.xn0, state.xn1) : (state.xn1, state.xn0) -initial_fncalls(::Roots.AbstractBracketing) = 2 -fn_argout(::AbstractBracketing) = 1 - - +fn_argout(::AbstractBracketingMethod) = 1 +initial_fncalls(::AbstractBracketingMethod) = 2 -function assess_convergence( - method::AbstractBracketing, - state::AbstractUnivariateZeroState, - options, -) - a, b, fa, fb = state.xn0, state.xn1, state.fxn0, state.fxn1 - - if isnan(a) || isnan(b) - return (:nan, true) - end +## tracks for bisection, different from secant, we show bracketing interval +## No init here; for Bisection() [a₀, b₀] is just lost. +function log_step(l::Tracks, M::AbstractBracketingMethod, state; init::Bool=false) + a, b = state.xn0, state.xn1 + push!(l.abₛ, a < b ? (a,b) : (b,a)) + !init && log_iteration(l, 1) + nothing +end - if isnan(fa) || isnan(fb) - return (:nan, true) - end +# use xatol, xrtol only, but give some breathing room over the strict ones and cap number of steps +function default_tolerances(::AbstractBracketingMethod, ::Type{T}, ::Type{S}) where {T,S} + xatol = eps(real(T))^3 * oneunit(real(T)) + xrtol = eps(real(T)) # unitless + atol = 0 * oneunit(real(S)) + rtol = 0 * one(real(S)) + maxevals = 60 + strict = true + (xatol, xrtol, atol, rtol, maxevals, strict) +end - M = maximum(abs, (a, b)) - δₓ = maximum( - promote(options.xabstol, M * options.xreltol, sign(options.xreltol) * eps(M)), - ) - if abs(b - a) <= 2δₓ - return (:x_converged, true) - end - # check f - u, fu = choose_smallest(a, b, fa, fb) - δ = maximum(promote(options.abstol, M * options.reltol * (oneunit(fu) / oneunit(u)))) - if abs(fu) <= δ - iszero(fu) && return (:exact_zero, true) - return (:f_converged, true) - end - return (:not_converged, false) -end +## -------------------------------------------------- +const bracketing_error = """The interval [a,b] is not a bracketing interval. +You need f(a) and f(b) to have different signs (f(a) * f(b) < 0). +Consider a different bracket or try fzero(f, c) with an initial guess c. -# assumes stopped = :x_converged -function decide_convergence( - ::AbstractBracketing, - F, - state::AbstractUnivariateZeroState, - options, - val, -) - a, b = state.xn0, state.xn1 - fa, fb = state.fxn0, state.fxn1 +""" - isnan(fa) && return a - isnan(fb) && return b +## utils +@inline isbracket(fa, fb) = sign(fa) * sign(fb) < 0 +assert_bracket(fx0, fx1) = isbracket(fx0, fx1) || throw(ArgumentError(bracketing_error)) - if abs(fa) < abs(fb) - return a - else - return b +## helper function: floating point, sorted, finite +function adjust_bracket(x0) + u, v = map(float, _extrema(x0)) + if u > v + u, v = v, u end -end - - -## convergence is much different here -function check_zero(::AbstractBracketing, state, c, fc) - isnan(c) && return true - isinf(c) && return true - iszero(fc) && return true - return false + isinf(u) && (u = nextfloat(u)) + isinf(v) && (v = prevfloat(v)) + u, v end diff --git a/src/Bracketing/bracketing_deprecate.jl b/src/Bracketing/bracketing_deprecate.jl deleted file mode 100644 index 7ff4d1c1..00000000 --- a/src/Bracketing/bracketing_deprecate.jl +++ /dev/null @@ -1,43 +0,0 @@ -## -------------------------------------------------- -""" - find_bracket(f, x0, method=A42(); kwargs...) - -For bracketing methods returns an approximate root, the last bracketing interval used, and a flag indicating if an exact zero was found as a named tuple. - -With the default tolerances, one of these should be the case: `exact` is `true` (indicating termination of the algorithm due to an exact zero being identified) or the length of `bracket` is less or equal than `2eps(maximum(abs.(bracket)))`. In the `BisectionExact` case, the 2 could be replaced by 1, as the bracket, `(a,b)` will satisfy `nextfloat(a) == b `; the Alefeld, Potra, and Shi algorithms don't quite have that promise. - -""" -function find_bracket( - fs, - x0, - method::M=A42(); - kwargs..., -) where {M<:Union{AbstractAlefeldPotraShi,BisectionExact}} - - Base.depwarn("This interface is deprecated", :find_bracket) - - x = adjust_bracket(x0) - F = Callable_Function(method, fs) #callable_function(fs) - state = init_state(method, F, x) - options = init_options(method, state; kwargs...) - l = NullTracks() - - # check if tolerances are exactly 0 - iszero_tol = - iszero(options.xabstol) && - iszero(options.xreltol) && - iszero(options.abstol) && - iszero(options.reltol) - - val, stopped = :not_converged, false - while !stopped - val, stopped = assess_convergence(method, state, options) - stopped && break - state, stopped = update_state(method, F, state, options, l) - end - - a, b = state.xn0, state.xn1 - fa, fb = state.fxn0, state.fxn1 - xstar, fxstar = abs(fa) < abs(fb) ? (a, fa) : (b, fb) - (xstar=xstar, bracket=(a, b), exact=iszero(fxstar)) -end diff --git a/src/Bracketing/brent.jl b/src/Bracketing/brent.jl index 53b6150f..a0900b65 100644 --- a/src/Bracketing/brent.jl +++ b/src/Bracketing/brent.jl @@ -7,7 +7,7 @@ This method uses a choice of inverse quadratic interpolation or a secant step, falling back on bisection if necessary. """ -struct Brent <: AbstractAcceleratedBisection end +struct Brent <: AbstractBracketingMethod end struct BrentState{T,S} <: AbstractUnivariateZeroState{T,S} xn1::T @@ -20,7 +20,6 @@ struct BrentState{T,S} <: AbstractUnivariateZeroState{T,S} mflag::Bool end - # # we store mflag as -1, or +1 in state.mflag function init_state(::Brent, F, x₀, x₁, fx₀, fx₁) u, v, fu, fv = x₀, x₁, fx₀, fx₁ @@ -36,13 +35,12 @@ function init_state(::Brent, F, x₀, x₁, fx₀, fx₁) BrentState(u, v, v, v, fu, fv, fv, true) end -default_tolerances(::Brent, ::Type{T}, ::Type{S}) where {T,S} = default_tolerances(Secant(), T, S) # need relaxing function update_state( ::Brent, f, - state::BrentState, - options::UnivariateZeroOptions{T,S}, + state::BrentState{T,S}, + options, l=NullTracks(), ) where {T,S} mflag = state.mflag @@ -75,13 +73,9 @@ function update_state( fs = f(s) incfn(l) - if iszero(fs) - @set! state.xn1 = s - @set! state.fxn1 = fs - return (state, true) - elseif isnan(fs) || isinf(fs) - return (state, true) - end + + iszero(fs) && return (_set(state, (s, fs)), true) + (isnan(fs) || isinf(fs)) && return (state, true) d = c c, fc = b, fb @@ -96,12 +90,9 @@ function update_state( a, b, fa, fb = b, a, fb, fa end - @set! state.xn0 = a - @set! state.xn1 = b + state = _set(state, (b,fb), (a, fa)) @set! state.c = c @set! state.d = d - @set! state.fxn0 = fa - @set! state.fxn1 = fb @set! state.fc = fc @set! state.mflag = mflag diff --git a/src/Bracketing/chandrapatlu.jl b/src/Bracketing/chandrapatlu.jl index 7f505f10..fa38656d 100644 --- a/src/Bracketing/chandrapatlu.jl +++ b/src/Bracketing/chandrapatlu.jl @@ -10,7 +10,7 @@ Chandrapatla's algorithm chooses between an inverse quadratic step or a bisectio """ -struct Chandrapatla <: AbstractAcceleratedBisection end +struct Chandrapatla <: AbstractBracketingMethod end struct ChandrapatlaState{T,S} <: AbstractUnivariateZeroState{T,S} xn1::T @@ -53,11 +53,8 @@ function update_state(::Chandrapatla, F, o, options, l=NullTracks()) fa, fc = fₜ, fa end - @set! o.xn1 = a - @set! o.xn0 = b + o = _set(o, (a, fa), (b, fb)) # a is xₙ, b is xₙ₋₁ @set! o.c = c - @set! o.fxn1 = fa - @set! o.fxn0 = fb @set! o.fc = fc return (o, false) diff --git a/src/Bracketing/false_position.jl b/src/Bracketing/false_position.jl index 92af0639..003e555b 100644 --- a/src/Bracketing/false_position.jl +++ b/src/Bracketing/false_position.jl @@ -1,4 +1,4 @@ -struct FalsePosition{R} <: AbstractSecant end +struct FalsePosition{R} <: AbstractSecantMethod end """ @@ -23,7 +23,7 @@ default). The default choice has generally better performance than the others, though there are exceptions. For some problems, the number of function calls can be greater than -for the `BisectionExact` method, but generally this algorithm will make +for the `Bisection` method, but generally this algorithm will make fewer function calls. Examples @@ -34,33 +34,39 @@ find_zero(x -> x^5 - x - 1, (-2, 2), FalsePosition()) FalsePosition FalsePosition(x=:anderson_bjork) = FalsePosition{x}() -init_state(M::FalsePosition, F, x₀, x₁, fx₀, fx₁) = init_state(BisectionExact(), F, x₀, x₁, fx₀, fx₁) +# 12 is tough; needs more evaluations +function default_tolerances(::FalsePosition{12}, ::Type{T}, ::Type{S}) where {T,S} + xatol = eps(real(T)) * oneunit(real(T)) + xrtol = eps(real(T)) # unitless + atol = 4 * eps(real(float(S))) * oneunit(real(S)) + rtol = 4 * eps(real(float(S))) * one(real(S)) + maxiters = 250 + strict = false + (xatol, xrtol, atol, rtol, maxiters, strict) +end + + +init_state(M::FalsePosition, F, x₀, x₁, fx₀, fx₁) = init_state(Bisection(), F, x₀, x₁, fx₀, fx₁) function update_state( method::FalsePosition, fs, o::AbstractUnivariateZeroState{T,S}, - options::UnivariateZeroOptions, + options, l=NullTracks(), ) where {T,S} a, b = o.xn0, o.xn1 fa, fb = o.fxn0, o.fxn1 lambda = fb / (fb - fa) - tau = 1e-10 # some engineering to avoid short moves; still fails on some - if !(tau < abs(lambda) < 1 - tau) - lambda = 1 / 2 - end + ϵ = √eps(T)/100 # some engineering to avoid short moves; still fails on some + ϵ ≤ lambda ≤ 1-ϵ || (lambda = 1/2) x::T = b - lambda * (b - a) fx = fs(x) incfn(l) - if iszero(fx) - @set! o.xn1 = x - @set! o.fxn1 = fx - return (o, true) - end + iszero(fx) && return (_set(o, (x, fx)), true) if sign(fx) * sign(fb) < 0 a, fa = b, fb @@ -69,10 +75,7 @@ function update_state( end b, fb = x, fx - @set! o.xn0 = a - @set! o.xn1 = b - @set! o.fxn0 = fa - @set! o.fxn1 = fb + o = _set(o, (b, fb), (a, fa)) return (o, false) end diff --git a/src/Bracketing/itp.jl b/src/Bracketing/itp.jl index 42af7074..d645ba99 100644 --- a/src/Bracketing/itp.jl +++ b/src/Bracketing/itp.jl @@ -22,7 +22,7 @@ by `@TheLateKronos`, who supplied the original version of the code below. """ -struct ITP{T,S} <: AbstractAcceleratedBisection +struct ITP{T,S} <: AbstractBracketingMethod κ₁::T κ₂::S n₀::Int @@ -86,12 +86,13 @@ function update_state(M::ITP, F, o, options, l=NullTracks()) Δ = b-a x₁₂ = a + Δ/2 # middle must be (a+b)/2 r = ϵ2n₁₂ * exp2(-j) - Δ/2 - δ = κ₁ * Δ^κ₂ # a numeric literal for κ₂ is faster + δ′ = κ₁ * Δ^κ₂ # a numeric literal for κ₂ is faster + δ = δ′/oneunit(δ′) # δ = κ₁ * Δ^2 xᵣ = (b*fa - a*fb) / (fa - fb) σ = sign(x₁₂ - xᵣ) - xₜ = δ ≤ abs(x₁₂ - xᵣ) ? xᵣ + σ*δ : x₁₂ + xₜ = δ ≤ abs(x₁₂ - xᵣ)/oneunit(xᵣ) ? xᵣ + σ*δ*oneunit(xᵣ) : x₁₂ c = xᵢₜₚ = abs(xₜ - x₁₂) ≤ r ? xₜ : x₁₂ - σ * r @@ -109,10 +110,7 @@ function update_state(M::ITP, F, o, options, l=NullTracks()) a, fa = c, fc end - @set! o.xn0 = a - @set! o.xn1 = b - @set! o.fxn0 = fa - @set! o.fxn1 = fb + o = _set(o, (b,fb), (a,fa)) @set! o.j = j + 1 return o, false diff --git a/src/Bracketing/ridders.jl b/src/Bracketing/ridders.jl index b87b8217..618d921f 100644 --- a/src/Bracketing/ridders.jl +++ b/src/Bracketing/ridders.jl @@ -23,7 +23,7 @@ true `f=F', g=F''/2, h=F'''/6`, suggesting converence at rate `≈ 1.839...`. It uses two function evaluations per step, so its order of convergence is `≈ 1.225...`. """ -struct Ridders <: AbstractAcceleratedBisection end +struct Ridders <: AbstractBracketingMethod end function update_state(M::Ridders, F, o, options, l=NullTracks()) @@ -53,10 +53,7 @@ function update_state(M::Ridders, F, o, options, l=NullTracks()) a, fa = c, fc end - @set! o.xn0 = a - @set! o.xn1 = b - @set! o.fxn0 = fa - @set! o.fxn1 = fb + o = _set(o, (b, fb), (a, fa)) return o, false end diff --git a/src/Derivative/halley_like.jl b/src/Derivative/halley_like.jl index 8128cbf0..13f26ba8 100644 --- a/src/Derivative/halley_like.jl +++ b/src/Derivative/halley_like.jl @@ -26,25 +26,19 @@ initial_fncalls(M::AbstractHalleyLikeMethod) = 2 * 3 fn_argout(::AbstractHalleyLikeMethod) = 3 -function update_state( - method::AbstractΔMethod, - F, - o::HalleyState{T,S}, - options::UnivariateZeroOptions, - l=NullTracks(), -) where {T,S} +function update_state(M::AbstractΔMethod, F, o::HalleyState{T,S}, options, l=NullTracks()) where {T,S} xn = o.xn1 fxn = o.fxn1 - r1, r2 = o.Δ, o.ΔΔ + r1::T, r2::T = o.Δ, o.ΔΔ - Δ::T = calculateΔ(method, r1, r2) + Δ::T = calculateΔ(M, r1, r2) if isissue(Δ) log_message(l, "Issue with computing `Δ`") return (o, true) end xn1::T = xn - Δ - fxn1::S, (r1::T, r2::T) = F(xn1) + fxn1::S, (r1, r2) = F(xn1) incfn(l, 3) @set! o.xn0 = xn @@ -62,29 +56,33 @@ end Implements Halley's [method](http://tinyurl.com/yd83eytb), `xᵢ₊₁ = xᵢ - (f/f')(xᵢ) * (1 - (f/f')(xᵢ) * (f''/f')(xᵢ) * 1/2)^(-1)` -This method is cubically converging, but requires more function calls per step (3) than -other methods. +This method is cubically converging, it requires ``3`` function calls per step. -Example +The function, its derivative and second derivative can be passed either as a tuple of ``3`` functions *or* +as a function returning values for ``(f, f/f', f'/f'')``, which could be useful when function evaluations are expensive. + +## Examples ```jldoctest with_derivative julia> using Roots julia> find_zero((sin, cos, x->-sin(x)), 3.0, Roots.Halley()) ≈ π true -``` -If function evaluations are expensive one can pass in a function which -returns `(f, f/f',f'/f'')` as follows +julia> function f(x) + s,c = sincos(x) + (s, s/c, -c/s) + end; -```jldoctest with_derivative -julia> find_zero(x -> (sin(x), sin(x)/cos(x), -cos(x)/sin(x)), 3.0, Roots.Halley()) ≈ π +julia> find_zero(f, 3.0, Roots.Halley()) ≈ π true ``` This can be advantageous if the derivatives are easily computed from the computation for f, but otherwise would be expensive to compute separately. +---- + The error, `eᵢ = xᵢ - α`, satisfies `eᵢ₊₁ ≈ -(2f'⋅f''' -3⋅(f'')²)/(12⋅(f'')²) ⋅ eᵢ³` (all evaluated at `α`). @@ -95,10 +93,10 @@ struct Halley <: AbstractΔMethod end """ Roots.QuadraticInverse() -Implements the [quadratic inverse method](https://doi.org/10.2307/2322644) also known as [Chebyshev's method]((https://dl.acm.org/doi/10.1080/00207160802208358)), +Implements the [quadratic inverse method](https://doi.org/10.2307/2322644) also known as +[Chebyshev's method]((https://dl.acm.org/doi/10.1080/00207160802208358)), `xᵢ₊₁ = xᵢ - (f/f')(xᵢ) * (1 + (f/f')(xᵢ) * (f''/f')(xᵢ) * 1/2)`. -This method is cubically converging, but requires more function calls per step (3) than -other methods. +This method is cubically converging, it requires ``3`` function calls per step. Example @@ -146,18 +144,20 @@ Schröder's method, like Halley's method, utilizes `f`, `f'`, and independent of the multiplicity of the zero (cf. Schröder, E. "Über unendlich viele Algorithmen zur Auflösung der Gleichungen." Math. Ann. 2, 317-365, 1870; -[mathworld](http://mathworld.wolfram.com/SchroedersMethod.html)). Schröder's -method applies Newton's method to `f/f'`, a function with all -simple zeros. +[mathworld](http://mathworld.wolfram.com/SchroedersMethod.html)). +Schröder's method applies Newton's method to `f/f'`, a function with +all simple zeros. + + +## Example -Example ``` m = 2 f(x) = (cos(x)-x)^m fp(x) = (-x + cos(x))*(-2*sin(x) - 2) fpp(x) = 2*((x - cos(x))*cos(x) + (sin(x) + 1)^2) -find_zero((f, fp, fpp), pi/4, Roots.Halley()) # 14 steps +find_zero((f, fp, fpp), pi/4, Roots.Halley()) # 14 steps find_zero((f, fp, fpp), 1.0, Roots.Schroder()) # 3 steps ``` @@ -181,8 +181,8 @@ const Schroeder = Schroder # either spelling const Schröder = Schroder ## r1, r2 are o.Δ, o.ΔΔ -calculateΔ(method::Halley, r1, r2) = 2r2 / (2r2 - r1) * r1 -calculateΔ(method::QuadraticInverse, r1, r2) = (1 + r1 / (2r2)) * r1 -calculateΔ(method::ChebyshevLike, r1, r2) = (1 + r1 / (2r2) * (1 + r1 / r2)) * r1 -calculateΔ(method::SuperHalley, r1, r2) = (1 + r1 / (2r2 - 2r1)) * r1 -calculateΔ(method::Schroder, r1, r2) = r2 / (r2 - r1) * r1 +calculateΔ(::Halley, r1, r2) = 2r2 / (2r2 - r1) * r1 +calculateΔ(::QuadraticInverse, r1, r2) = (1 + r1 / (2r2)) * r1 +calculateΔ(::ChebyshevLike, r1, r2) = (1 + r1 / (2r2) * (1 + r1 / r2)) * r1 +calculateΔ(::SuperHalley, r1, r2) = (1 + r1 / (2r2 - 2r1)) * r1 +calculateΔ(::Schroder, r1, r2) = r2 / (r2 - r1) * r1 diff --git a/src/Derivative/lith.jl b/src/Derivative/lith.jl index 885bacd1..4cffc69f 100644 --- a/src/Derivative/lith.jl +++ b/src/Derivative/lith.jl @@ -1,24 +1,10 @@ -# [Lith, Boonkkamp, and IJzerman](https://doi.org/10.1016/j.amc.2017.09.003) -# A family of different methods that includes the secant method and Newton's method - -# return f^(i-1)(x); not the same as default eval call -function evalf(F::Callable_Function{S,T,𝑭,P}, x, i) where {N,S<:Val{N},T<:Val{true},𝑭,P} - F.f[i](x) -end -function evalf(F::Callable_Function{S,T,𝑭,P}, x, i) where {S<:Val{1},T<:Val{false},𝑭,P} - F.f(x) -end -evalf(::Callable_Function, x, i) = throw( - ArgumentError( - "LithBoonkkampIJzerman expects functions to be defined singly or as tuples", - ), -) - """ LithBoonkkampIJzerman{S,D} <: AbstractNewtonLikeMethod LithBoonkkampIJzerman(S,D) -Specify a linear multistep solver with `S` steps and `D` derivatives following [Lith, Boonkkamp, and +A family of different methods that includes the secant method and Newton's method. + +Specifies a linear multistep solver with `S` steps and `D` derivatives following [Lith, Boonkkamp, and IJzerman](https://doi.org/10.1016/j.amc.2017.09.003). ## Examples @@ -140,6 +126,22 @@ struct LithBoonkkampIJzermanState{S′,D⁺,T,S} <: AbstractUnivariateZeroState{ fm::NTuple{D⁺,NTuple{S′,S}} end +log_step(l::Tracks, M::LithBoonkkampIJzerman, state; init=false) = log_step(l, Secant(), state; init=init) + + +# return f^(i-1)(x); not the same as default eval call +function evalf(F::Callable_Function{S,T,𝑭,P}, x, i) where {N,S<:Val{N},T<:Val{true},𝑭,P} + F.f[i](x) +end +function evalf(F::Callable_Function{S,T,𝑭,P}, x, i) where {S<:Val{1},T<:Val{false},𝑭,P} + F.f(x) +end +evalf(::Callable_Function, x, i) = throw( + ArgumentError( + "LithBoonkkampIJzerman expects functions to be defined singly or as tuples", + ), +) + function init_state(L::LithBoonkkampIJzerman{S,0}, F, x) where {S} x₀, x₁ = x₀x₁(x) fx₀, fx₁ = evalf(F, x₀, 1), evalf(F, x₁, 1) @@ -183,7 +185,6 @@ function update_state( l=NullTracks(), ) where {S,D,S⁺,D′,R,T} xs, ys = o.m, o.fm - xᵢ::R = lmm(L, xs, ys...) isissue(o.xn1 - xᵢ) && return (o, true) @@ -307,7 +308,7 @@ the next step. """ -struct LithBoonkkampIJzermanBracket <: AbstractBracketing end +struct LithBoonkkampIJzermanBracket <: AbstractBracketingMethod end struct LithBoonkkampIJzermanBracketState{T,S,R} <: AbstractUnivariateZeroState{T,S} xn1::T xn0::T @@ -351,7 +352,7 @@ function update_state( M::LithBoonkkampIJzermanBracket, F, state::LithBoonkkampIJzermanBracketState{T,S,R}, - options::UnivariateZeroOptions, + options, l=NullTracks(), ) where {T,S,R} b::T, c::T, a::T = state.xn1, state.c, state.xn0 @@ -456,9 +457,8 @@ function default_tolerances( atol = zero(float(one(S))) * oneunit(S) rtol = 2eps(float(one(S))) * one(S) maxevals = typemax(Int) - maxfnevals = typemax(Int) strict = true - (xatol, xrtol, atol, rtol, maxevals, maxfnevals, strict) + (xatol, xrtol, atol, rtol, maxevals, strict) end ### ------ diff --git a/src/Derivative/newton.jl b/src/Derivative/newton.jl index 2b775586..3972d67a 100644 --- a/src/Derivative/newton.jl +++ b/src/Derivative/newton.jl @@ -1,22 +1,13 @@ -################################################## -## Classical derivative-based, iterative, root-finding algorithms -## -## If ri = f^(i-1)/f^(i), then these have an update step `xn - delta` where: -## -## * Newton: delta = r1 # order 2 if simple root (multiplicity 1) -## * Halley: delta = 2*r2/(2r2 - r1) * r1 # order 3 if simple root -## * Schroder: delta = r2 / (r2 - r1) * r1 # order 2 -## * Thukral(3): delta = (-2*r3)*(r2 - r1)/(r1^2 - 3*r1*r3 + 2*r2*r3) * r1 # order 3 -## * Thukral(4): delta = 3*r1*r2*r4*(r1^2 - 3*r1*r3 + 2*r2*r3)/(-r1^3*r2 + 4*r1^2*r2*r4 + 3*r1^2*r3*r4 - 12*r1*r2*r3*r4 + 6*r2^2*r3*r4) # order 4 -## -## The latter two come from -## [Thukral](http://article.sapub.org/10.5923.j.ajcam.20170702.05.html). They are not implemented. - -## Newton +# 1 step taken in set up +function log_step(l::Tracks, M::AbstractDerivativeMethod, state; init=false) + init && push!(l.xfₛ, (state.xn0, state.fxn0)) + push!(l.xfₛ, (state.xn1, state.fxn1)) + init && log_iteration(l, 1) + !init && log_iteration(l, 1) + nothing +end -fn_argout(::AbstractNewtonLikeMethod) = 2 -struct Newton <: AbstractNewtonLikeMethod end """ Roots.Newton() @@ -25,7 +16,7 @@ Implements Newton's [method](http://tinyurl.com/b4d7vls): `xᵢ₊₁ = xᵢ - f(xᵢ)/f'(xᵢ)`. This is a quadratically convergent method requiring one derivative. Two function calls per step. -Example +## Examples ```jldoctest with_derivative julia> using Roots @@ -44,10 +35,18 @@ true This can be advantageous if the derivative is easily computed from the value of f, but otherwise would be expensive to compute. -The error, `eᵢ = xᵢ - α`, can be expressed as `eᵢ₊₁ = f[xᵢ,xᵢ,α]/(2f[xᵢ,xᵢ])eᵢ²` (Sidi, Unified treatment of regula falsi, Newton-Raphson, secant, and Steffensen methods for nonlinear equations). +---- + +The error, `eᵢ = xᵢ - α`, can be expressed as `eᵢ₊₁ = +f[xᵢ,xᵢ,α]/(2f[xᵢ,xᵢ])eᵢ²` (Sidi, Unified treatment of regula falsi, +Newton-Raphson, secant, and Steffensen methods for nonlinear +equations). """ -Newton +struct Newton <: AbstractNewtonLikeMethod end + +fn_argout(::AbstractNewtonLikeMethod) = 2 + # we store x0,x1,fx0,fx1 **and** Δ = fx1/f'(x1) struct NewtonState{T,S} <: AbstractUnivariateZeroState{T,S} @@ -74,13 +73,8 @@ end initial_fncalls(M::Newton) = 2 -function update_state( - method::Newton, - F, - o::NewtonState{T,S}, - options, - l=NullTracks(), -) where {T,S} +function update_state(M::Newton, F, o::NewtonState{T,S}, options, l=NullTracks()) where {T,S} + xn0, xn1 = o.xn0, o.xn1 fxn0, fxn1 = o.fxn0, o.fxn1 Δ::T = o.Δ diff --git a/src/Derivative/thukralb.jl b/src/Derivative/thukralb.jl index 758f3a07..c4edfefa 100644 --- a/src/Derivative/thukralb.jl +++ b/src/Derivative/thukralb.jl @@ -6,20 +6,20 @@ Abstract type for `ThukralXB` methods for `X` being `2`,`3`,`4`, or `5`. These are a family of methods which are * efficient (order `X`) for non-simple roots (e.g. `Thukral2B` is the `Schroder` method) -* taking `X+1` function calls per step +* take `X+1` function calls per step * require `X` derivatives. These can be passed as a tuple of functions, `(f, f', f'', …)`, *or* as a function returning the ratios: `x -> (f(x), f(x)/f'(x), f'(x)/f''(x), …)`. -## Example +## Examples ```julia using ForwardDiff Base.adjoint(f::Function) = x -> ForwardDiff.derivative(f, float(x)) f(x) = (exp(x) + x - 2)^6 x0 = 1/4 -find_zero((f, f', f''), x0, Roots.Halley()) # 14 iterations; 45 function evaluations -find_zero((f, f', f''), big(x0), Roots.Thukral2B()) # 4 iterations; 15 function evaluations -find_zero((f, f', f'', f'''), big(x0), Roots.Thukral3B()) # 3 iterations; 16 function evaluations +find_zero((f, f', f''), x0, Roots.Halley()) # 14 iterations; ≈ 48 function evaluations +find_zero((f, f', f''), big(x0), Roots.Thukral2B()) # 3 iterations; ≈ 9 function evaluations +find_zero((f, f', f'', f'''), big(x0), Roots.Thukral3B()) # 2 iterations; ≈ 8 function evaluations ``` @@ -65,7 +65,7 @@ function update_state( M::AbstractThukralBMethod, F, o::AbstractUnivariateZeroState{T,S}, - options::UnivariateZeroOptions, + options, l=NullTracks(), ) where {T,S} x₀ = o.xn1 diff --git a/src/DerivativeFree/derivative_free.jl b/src/DerivativeFree/derivative_free.jl index e69d08cc..1e57c3c4 100644 --- a/src/DerivativeFree/derivative_free.jl +++ b/src/DerivativeFree/derivative_free.jl @@ -1,27 +1,22 @@ ## Derivative free methods inherit from abstract secant # init_state(M,F,x) --> call init_state(M,F,x₀,x₁,fx₀, fx₁) -function init_state(M::AbstractSecant, F::Callable_Function, x) +function init_state(M::AbstractSecantMethod, F::Callable_Function, x) x₀, x₁ = x₀x₁(x) fx₀, fx₁ = first(F(x₀)), first(F(x₁)) state = init_state(M, F, x₀, x₁, fx₀, fx₁) end # initialize from xs, fxs -function init_state(::AbstractSecant, F, x₀, x₁, fx₀, fx₁) +function init_state(::AbstractSecantMethod, F, x₀, x₁, fx₀, fx₁) UnivariateZeroState(x₁, x₀, fx₁, fx₀) end -initial_fncalls(::AbstractSecant) = 2 +initial_fncalls(::AbstractSecantMethod) = 2 -# Many derivative free methods of different orders -# -# TODO: rework Order5 #https://pdfs.semanticscholar.org/ce50/3210d96f653a14b28da96600d5990d2abe97.pdf -# https://content.sciendo.com/view/journals/tmj/10/4/article-p103.xml 7 and 8 -# order8: https://www.hindawi.com/journals/ijmms/2012/493456/ref/ ################################################## ## Guard against non-robust algorithms @@ -53,7 +48,7 @@ initial_fncalls(::AbstractSecant) = 2 ## seems to work reasonably well over several different test cases. @inline function do_guarded_step( - M::AbstractSecant, + M::AbstractSecantMethod, o::AbstractUnivariateZeroState{T,S}, ) where {T,S} x, fx = o.xn1, o.fxn1 @@ -62,7 +57,7 @@ end # check if we should guard against step for method M; call N if yes, P if not function update_state_guarded( - M::AbstractSecant, + M::AbstractSecantMethod, N::AbstractUnivariateZeroMethod, P::AbstractUnivariateZeroMethod, fs, diff --git a/src/DerivativeFree/derivative_free_refactor.jl b/src/DerivativeFree/derivative_free_refactor.jl deleted file mode 100644 index 53220659..00000000 --- a/src/DerivativeFree/derivative_free_refactor.jl +++ /dev/null @@ -1,10 +0,0 @@ -################################################## -## some means of guarding against large fx when taking a secant step -## TODO: rework this -function steff_step(M::Union{Order5,Order8,Order16}, x::S, fx::T) where {S,T} - xbar, fxbar = real(x / oneunit(x)), fx / oneunit(fx) - thresh = max(1, abs(xbar)) * sqrt(eps(one(xbar))) #^(1/2) # max(1, sqrt(abs(x/fx))) * 1e-6 - - out = abs(fxbar) <= thresh ? fxbar : sign(fx) * thresh - x + out * oneunit(x) -end diff --git a/src/DerivativeFree/esser.jl b/src/DerivativeFree/esser.jl index 1f999771..a0a98326 100644 --- a/src/DerivativeFree/esser.jl +++ b/src/DerivativeFree/esser.jl @@ -16,7 +16,8 @@ Esser, H. Computing (1975) 14: 367. DOI: [10.1007/BF02253547](https://doi.org/10 Eine stets quadratisch konvergente Modifikation des Steffensen-Verfahrens -Example +## Examples + ``` f(x) = cos(x) - x g(x) = f(x)^2 @@ -27,21 +28,21 @@ find_zero(g, x0, Order2(), verbose=true) # 22 / 45 find_zero(g, x0, Roots.Order2B(), verbose=true) # 4 / 10 ``` """ -struct Esser <: AbstractSecant end -struct Order2B <: AbstractSecant end +struct Esser <: AbstractSecantMethod end +struct Order2B <: AbstractSecantMethod end function update_state( - method::Order2B, + M::Order2B, fs, o::AbstractUnivariateZeroState, options, l=NullTracks(), ) - update_state_guarded(method, Secant(), Esser(), fs, o, options, l) + update_state_guarded(M, Secant(), Esser(), fs, o, options, l) end function update_state( - method::Esser, + ::Esser, F, o::AbstractUnivariateZeroState{T,S}, options, @@ -79,10 +80,7 @@ function update_state( fx0, fx1 = fx1, F(x1) incfn(l) - @set! o.xn0 = x0 - @set! o.xn1 = x1 - @set! o.fxn0 = fx0 - @set! o.fxn1 = fx1 + o = _set(o, (x1, fx1), (x0, fx0)) return o, false end diff --git a/src/DerivativeFree/king.jl b/src/DerivativeFree/king.jl index b4189704..9aa784d3 100644 --- a/src/DerivativeFree/king.jl +++ b/src/DerivativeFree/king.jl @@ -14,8 +14,8 @@ The *asymptotic* error, `eᵢ = xᵢ - α`, is given by `eᵢ₊₂ = 1/2⋅G''/G'⋅ eᵢ⋅eᵢ₊₁ + (1/6⋅G'''/G' - (1/2⋅G''/G'))^2⋅eᵢ⋅eᵢ₊₁⋅(eᵢ+eᵢ₊₁)`. """ -struct King <: AbstractSecant end -struct Order1B <: AbstractSecant end +struct King <: AbstractSecantMethod end +struct Order1B <: AbstractSecantMethod end struct KingState{T,S} <: AbstractUnivariateZeroState{T,S} xn1::T @@ -74,10 +74,7 @@ function update_state(::King, F, o::KingState, options, l=NullTracks()) fx0, fx1 = fx1, F(x1) incfn(l) - @set! o.xn0 = x0 - @set! o.xn1 = x1 - @set! o.fxn0 = fx0 - @set! o.fxn1 = fx1 + o = _set(o, (x1, fx1), (x0, fx0)) @set! o.G0 = G₁ return o, false diff --git a/src/DerivativeFree/order0.jl b/src/DerivativeFree/order0.jl new file mode 100644 index 00000000..09e37be4 --- /dev/null +++ b/src/DerivativeFree/order0.jl @@ -0,0 +1,38 @@ +""" + Order0() + + +The `Order0` method is engineered to be a more robust, though possibly +slower, alternative to the other derivative-free root-finding +methods. The implementation roughly follows the algorithm described in +*Personal Calculator Has Key to Solve Any Equation ``f(x) = 0``*, the +SOLVE button from the +[HP-34C](http://www.hpl.hp.com/hpjournal/pdfs/IssuePDFs/1979-12.pdf). +The basic idea is to use a secant step. If along the way a bracket is +found, switch to a bracketing algorithm, using `AlefeldPotraShi`. If the secant +step fails to decrease the function value, a quadratic step is used up +to ``4`` times. + +This is not really ``0``-order: the secant method has order +``1.6...`` [Wikipedia](https://en.wikipedia.org/wiki/Secant_method#Comparison_with_other_root-finding_methods) +and the the bracketing method has order +``1.6180...`` [Wikipedia](http://www.ams.org/journals/mcom/1993-61-204/S0025-5718-1993-1192965-2/S0025-5718-1993-1192965-2.pdf) +so for reasonable starting points and functions, this algorithm should be +superlinear, and relatively robust to non-reasonable starting points. + +""" +struct Order0 <: AbstractSecantMethod end + +# special case Order0 to be hybrid +function init( + 𝑭𝑿::ZeroProblem, + M::Order0, + p′=nothing; + p=nothing, + verbose::Bool=false, + tracks=NullTracks(), + kwargs..., +) + p = p′ === nothing ? p : p′ + init(𝑭𝑿, Secant(), AlefeldPotraShi(); p=p, verbose=verbose, tracks=tracks, kwargs...) +end diff --git a/src/DerivativeFree/order16.jl b/src/DerivativeFree/order16.jl index 53908e1b..8f2b9276 100644 --- a/src/DerivativeFree/order16.jl +++ b/src/DerivativeFree/order16.jl @@ -19,17 +19,17 @@ The error, `eᵢ = xᵢ - α`, is expressed as `eᵢ₊₁ = K⋅eᵢ¹⁶` for in equation (50) of the paper. """ -struct Order16 <: AbstractSecant end -struct Thukral16 <: AbstractSecant end +struct Order16 <: AbstractSecantMethod end +struct Thukral16 <: AbstractSecantMethod end function update_state( - method::Order16, + M::Order16, fs, o::UnivariateZeroState{T,S}, options, l=NullTracks(), ) where {T,S} - update_state_guarded(method, Secant(), Thukral16(), fs, o, options, l) + update_state_guarded(M, Secant(), Thukral16(), fs, o, options, l) end function update_state( @@ -50,10 +50,8 @@ function update_state( if issue log_message(l, "issue with f[xn,wn]") - @set! o.xn0 = o.xn1 - @set! o.xn1 = wn - @set! o.fxn0 = o.fxn1 - @set! o.fxn1 = fwn + o = _set(o, (wn, fwn), (xn, fxn)) + return o, true end @@ -64,10 +62,7 @@ function update_state( fp, issue = _fbracket_ratio(yn, xn, wn, fyn, fxn, fwn) if issue log_message(l, "issue with f[xn,yn]*f[yn,wn]/f[xn,wn]") - @set! o.xn0 = o.xn1 - @set! o.xn1 = yn - @set! o.fxn0 = o.fxn1 - @set! o.fxn1 = fyn + o = _set(o, (yn, fyn), (xn, fxn)) return o, true end @@ -82,10 +77,7 @@ function update_state( eta = 1 / (1 + 2 * u3 * u4^2) / (1 - u2) if issue log_message(l, "Approximate derivative failed") - @set! o.xn0 = o.xn1 - @set! o.xn1 = zn - @set! o.fxn0 = o.fxn1 - @set! o.fxn1 = fzn + o = _set(o, (zn, fzn), (xn, fxn)) return o, true end @@ -97,10 +89,7 @@ function update_state( fp, issue = _fbracket_ratio(an, yn, zn, fan, fyn, fzn) if issue log_message(l, "Approximate derivative failed") - @set! o.xn0 = o.xn1 - @set! o.xn1 = an - @set! o.fxn0 = o.fxn1 - @set! o.fxn1 = fan + o = _set(o, (an, fan), (xn, fxn)) return o, true end @@ -121,20 +110,18 @@ function update_state( fxn1::S = F(xn1) incfn(l) - @set! o.xn0 = xn - @set! o.xn1 = xn1 - @set! o.fxn0 = fxn - @set! o.fxn1 = fxn1 + o = _set(o, (xn1, fxn1), (xn, fxn)) return o, false end ################################################## -## some means of guarding against large fx when taking a secant step -## for Orders 5, 8, and 16 -## TODO: rework this -## Must be included after Order5, Order8, and Order16 are defined +## some means of guarding against large fx when taking a steffensen step +## for Orders 5, 8, and 16 we restrict the size of the steffensen step +## to make the underlying methods more robust. As we need the types defined +## below, this must be included after Order5, Order8, and Order16 are defined +## function steff_step(M::Union{Order5,Order8,Order16}, x::S, fx::T) where {S,T} xbar, fxbar = real(x / oneunit(x)), fx / oneunit(fx) thresh = max(1, abs(xbar)) * sqrt(eps(one(xbar))) #^(1/2) # max(1, sqrt(abs(x/fx))) * 1e-6 diff --git a/src/DerivativeFree/order5.jl b/src/DerivativeFree/order5.jl index 2c8f1afc..66ade3ed 100644 --- a/src/DerivativeFree/order5.jl +++ b/src/DerivativeFree/order5.jl @@ -13,11 +13,11 @@ The error, `eᵢ = xᵢ - α`, satisfies `eᵢ₊₁ = K₁ ⋅ K₅ ⋅ M ⋅ eᵢ⁵ + O(eᵢ⁶)` """ -struct Order5 <: AbstractSecant end -struct KumarSinghAkanksha <: AbstractSecant end +struct Order5 <: AbstractSecantMethod end +struct KumarSinghAkanksha <: AbstractSecantMethod end -function update_state(method::Order5, fs, o::UnivariateZeroState, options, l=NullTracks()) - update_state_guarded(method, Secant(), KumarSinghAkanksha(), fs, o, options, l) +function update_state(M::Order5, fs, o::UnivariateZeroState, options, l=NullTracks()) + update_state_guarded(M, Secant(), KumarSinghAkanksha(), fs, o, options, l) end function update_state( @@ -38,11 +38,7 @@ function update_state( fp, issue = _fbracket(o.xn1, wn, o.fxn1, fwn) if issue log_message(l, "Issue with divided difference f[xn, wn]. ") - @set! o.xn0 = o.xn1 - @set! o.xn1 = wn - @set! o.fxn0 = o.fxn1 - @set! o.fxn1 = fwn - + o = _set(o, (wn, fwn), (xn, fxn)) return o, true end @@ -56,31 +52,28 @@ function update_state( fp, issue = _fbracket_ratio(yn, o.xn1, wn, fyn, o.fxn1, fwn) if issue - log_message(l, "Issue with f[xn,yn]*f[yn,wn] / f[xn, wn]") - @set! o.xn0 = o.xn1 - @set! o.xn1 = yn - @set! o.fxn0 = o.fxn1 - @set! o.fxn1 = fyn + log_message(l, "Issue with f[xn,yn] * f[yn,wn] / f[xn, wn].") + o = _set(o, (yn, fyn), (xn, fxn)) return o, true end - @set! o.xn0 = o.xn1 - @set! o.fxn0 = o.fxn1 x₁::T = zn - fzn / fp - @set! o.xn1 = x₁ - @set! o.fxn1 = F(o.xn1) + f₁ = F(x₁) incfn(l) + o = _set(o, (x₁, f₁), (xn, fxn)) + + return o, false # nothing end -struct Order5Derivative <: AbstractSecant end +struct Order5Derivative <: AbstractSecantMethod end fn_argout(::Order5Derivative) = 2 function update_state( - method::Order5Derivative, + m::Order5Derivative, f, o::AbstractUnivariateZeroState{T,S}, options, @@ -102,10 +95,7 @@ function update_state( if isissue(fpyn) log_message(l, "Issue computing `fpyn`") - @set! o.xn0 = o.xn1 - @set! o.xn1 = yn - @set! o.fxn0 = o.fxn1 - @set! o.fxn1 = fyn + o = _set(o, (yn, fyn), (o.xn1, o.fxn1)) return o, true end @@ -118,10 +108,7 @@ function update_state( fxn1, _ = f(xn1) incfn(l, 2) - @set! o.xn0 = xn - @set! o.xn1 = xn1 - @set! o.fxn0 = fxn - @set! o.fxn1 = fxn1 + o = _set(o, (xn1, fxn1), (xn, fxn)) return o end diff --git a/src/DerivativeFree/order8.jl b/src/DerivativeFree/order8.jl index a9015e34..3077c997 100644 --- a/src/DerivativeFree/order8.jl +++ b/src/DerivativeFree/order8.jl @@ -14,18 +14,18 @@ The error, `eᵢ = xᵢ - α`, is expressed as `eᵢ₊₁ = K ⋅ eᵢ⁸` in (2.25) of the paper for an explicit `K`. """ -struct Order8 <: AbstractSecant end -struct Thukral8 <: AbstractSecant end +struct Order8 <: AbstractSecantMethod end +struct Thukral8 <: AbstractSecantMethod end ## cf also: https://doi.org/10.1515/tmj-2017-0049 function update_state( - method::Order8, + M::Order8, fs, o::UnivariateZeroState{T,S}, options, l=NullTracks(), ) where {T,S} - update_state_guarded(method, Secant(), Thukral8(), fs, o, options, l) + update_state_guarded(M, Secant(), Thukral8(), fs, o, options, l) end function update_state( @@ -44,10 +44,7 @@ function update_state( if isissue(fwn) log_message(l, "issue with Steffensen step fwn") - @set! o.xn0 = xn - @set! o.xn1 = wn - @set! o.fxn0 = fxn - @set! o.fxn1 = fwn + o = _set(o, (wn, fwn), (xn, fxn)) return o, true end @@ -66,10 +63,7 @@ function update_state( fp, issue = _fbracket(yn, xn, fyn, fxn) if issue #fp log_message(l, "issue with divided difference f[xn, yn]. ") - @set! o.xn0 = xn - @set! o.xn1 = yn - @set! o.fxn0 = fxn - @set! o.fxn1 = fyn + o = _set(o, (yn, fyn), (xn, fxn)) return o, true end @@ -82,10 +76,7 @@ function update_state( fp, issue = _fbracket_diff(xn, yn, zn, fxn, fyn, fzn) if issue log_message(l, "issue with divided difference f[y,z] - f[x,y] + f[x,z]. ") - @set! o.xn0 = xn - @set! o.xn1 = zn - @set! o.fxn0 = fxn - @set! o.fxn1 = fzn + o = _set(o, (zn, fzn), (xn, fxn)) return o, true end @@ -98,10 +89,7 @@ function update_state( fxn1::S = F(xn1) incfn(l) - @set! o.xn0 = xn - @set! o.xn1 = xn1 - @set! o.fxn0 = fxn - @set! o.fxn1 = fxn1 + o = _set(o, (xn1, fxn1), (xn, fxn)) return o, false end diff --git a/src/DerivativeFree/secant.jl b/src/DerivativeFree/secant.jl index 3421e28b..702526a6 100644 --- a/src/DerivativeFree/secant.jl +++ b/src/DerivativeFree/secant.jl @@ -15,7 +15,7 @@ The error, `eᵢ = xᵢ - α`, satisfies `eᵢ₊₂ = f[xᵢ₊₁,xᵢ,α] / f[xᵢ₊₁,xᵢ] * (xᵢ₊₁-α) * (xᵢ - α)`. """ -struct Secant <: AbstractSecant end +struct Secant <: AbstractSecantMethod end const Order1 = Secant const Orderφ = Secant @@ -40,10 +40,7 @@ function update_state( fx0, fx1 = fxn1, F(x1) incfn(l) - @set! o.xn0 = x0 - @set! o.xn1 = x1 - @set! o.fxn0 = fx0 - @set! o.fxn1 = fx1 + o = _set(o, (x1, fx1), (x0, fx0)) return o, false end diff --git a/src/DerivativeFree/steffensen.jl b/src/DerivativeFree/steffensen.jl index 0181b5a9..a4a302ae 100644 --- a/src/DerivativeFree/steffensen.jl +++ b/src/DerivativeFree/steffensen.jl @@ -16,21 +16,21 @@ step when `f(x)` is large. The error, `eᵢ - α`, satisfies `eᵢ₊₁ = f[xᵢ, xᵢ+fᵢ, α] / f[xᵢ,xᵢ+fᵢ] ⋅ (1 - f[xᵢ,α] ⋅ eᵢ²` """ -struct Steffensen <: AbstractSecant end -struct Order2 <: AbstractSecant end +struct Steffensen <: AbstractSecantMethod end +struct Order2 <: AbstractSecantMethod end function update_state( - method::Order2, + M::Order2, fs, o::UnivariateZeroState{T,S}, options, l=NullTracks(), ) where {T,S} - update_state_guarded(method, Secant(), Steffensen(), fs, o, options, l) + update_state_guarded(M, Secant(), Steffensen(), fs, o, options, l) end function update_state( - method::Steffensen, + ::Steffensen, F, o::AbstractUnivariateZeroState{T,S}, options, @@ -57,10 +57,7 @@ function update_state( fx0, fx1 = fx1, F(x1) incfn(l) - @set! o.xn0 = x0 - @set! o.xn1 = x1 - @set! o.fxn0 = fx0 - @set! o.fxn1 = fx1 + o = _set(o, (x1, fx1), (x0, fx0)) return o, false end diff --git a/src/Roots.jl b/src/Roots.jl index 588f2d60..0050ef04 100644 --- a/src/Roots.jl +++ b/src/Roots.jl @@ -12,7 +12,6 @@ using Setfield export fzero, fzeros, secant_method export find_zero, - find_zero!, find_zeros, ZeroProblem, solve, @@ -37,17 +36,17 @@ include("convergence.jl") include("functions.jl") include("trace.jl") include("find_zero.jl") +include("hybrid.jl") + include("Bracketing/bracketing.jl") include("Bracketing/bisection.jl") -include("Bracketing/accelerated_bisection.jl") include("Bracketing/alefeld_potra_shi.jl") include("Bracketing/brent.jl") include("Bracketing/ridders.jl") include("Bracketing/itp.jl") include("Bracketing/chandrapatlu.jl") include("Bracketing/false_position.jl") -include("Bracketing/bracketing_deprecate.jl") include("DerivativeFree/derivative_free.jl") include("DerivativeFree/secant.jl") @@ -57,13 +56,13 @@ include("DerivativeFree/order8.jl") include("DerivativeFree/order16.jl") include("DerivativeFree/king.jl") include("DerivativeFree/esser.jl") +include("DerivativeFree/order0.jl") include("Derivative/newton.jl") include("Derivative/halley_like.jl") include("Derivative/thukralb.jl") include("Derivative/lith.jl") -include("order0.jl") include("find_zeros.jl") include("simple.jl") include("alternative_interfaces.jl") diff --git a/src/abstract_types.jl b/src/abstract_types.jl index 8708db03..895eb3ee 100644 --- a/src/abstract_types.jl +++ b/src/abstract_types.jl @@ -1,18 +1,22 @@ ### Method types abstract type AbstractUnivariateZeroMethod end -abstract type AbstractBracketing <: AbstractUnivariateZeroMethod end -abstract type AbstractBisection <: AbstractBracketing end -abstract type AbstractAcceleratedBisection <: AbstractBisection end +abstract type AbstractBracketingMethod <: AbstractUnivariateZeroMethod end +abstract type AbstractBisectionMethod <: AbstractBracketingMethod end -abstract type AbstractNonBracketing <: AbstractUnivariateZeroMethod end -abstract type AbstractSecant <: AbstractNonBracketing end +abstract type AbstractNonBracketingMethod <: AbstractUnivariateZeroMethod end +abstract type AbstractSecantMethod <: AbstractNonBracketingMethod end -abstract type AbstractNewtonLikeMethod <: AbstractNonBracketing end -abstract type AbstractHalleyLikeMethod <: AbstractNonBracketing end +abstract type AbstractDerivativeMethod <:AbstractNonBracketingMethod end +abstract type AbstractNewtonLikeMethod <: AbstractDerivativeMethod end +abstract type AbstractHalleyLikeMethod <: AbstractDerivativeMethod end abstract type AbstractΔMethod <: AbstractHalleyLikeMethod end - +# deprecated but not clear way to do so, hence these defintions not to be used +const AbstractBracketing = AbstractBracketingMethod +const AbstractBisection = AbstractBisectionMethod +const AbstractNonBracketing = AbstractNonBracketingMethod +const AbstractSecant = AbstractSecantMethod ### State abstract type AbstractUnivariateZeroState{T,S} end diff --git a/src/alternative_interfaces.jl b/src/alternative_interfaces.jl index 118b239f..017fe8ae 100644 --- a/src/alternative_interfaces.jl +++ b/src/alternative_interfaces.jl @@ -164,7 +164,7 @@ function fzero(f, x0, M::AbstractUnivariateZeroMethod; kwargs...) find_zero(FnWrapper(f), x0, M; kwargs...) end -function fzero(f, x0, M::AbstractUnivariateZeroMethod, N::AbstractBracketing; kwargs...) +function fzero(f, x0, M::AbstractUnivariateZeroMethod, N::AbstractBracketingMethod; kwargs...) find_zero(FnWrapper(f), x0, M, N; kwargs...) end diff --git a/src/convergence.jl b/src/convergence.jl index be4db436..e4ba4831 100644 --- a/src/convergence.jl +++ b/src/convergence.jl @@ -1,14 +1,35 @@ ### Options -struct UnivariateZeroOptions{Q,R,S,T} + +abstract type AbstractUnivariateZeroOptions end + +struct UnivariateZeroOptions{Q,R,S,T} <: AbstractUnivariateZeroOptions xabstol::Q xreltol::R abstol::S reltol::T - maxevals::Int - maxfnevals::Int + maxiters::Int + strict::Bool +end + + +struct XExactOptions{S,T} <: AbstractUnivariateZeroOptions + abstol::S + reltol::T + maxiters::Int + strict::Bool +end + +struct FExactOptions{S,T} <: AbstractUnivariateZeroOptions + xabstol::S + xreltol::T + maxiters::Int strict::Bool end +struct ExactOptions <: AbstractUnivariateZeroOptions + maxiters::Int + strict::Bool +end init_options( M::AbstractUnivariateZeroMethod, @@ -20,25 +41,20 @@ function init_options(M, T=Float64, S=Float64; kwargs...) d = kwargs defs = default_tolerances(M, T, S) - options = UnivariateZeroOptions( - get(d, :xatol, get(d, :xabstol, defs[1])), - get(d, :xrtol, get(d, :xreltol, defs[2])), - get(d, :atol, get(d, :abstol, defs[3])), - get(d, :rtol, get(d, :reltol, defs[4])), - get(d, :maxevals, get(d, :maxsteps, defs[5])), - get(d, :maxfnevals, defs[6]), - get(d, :strict, defs[7]), - ) - if haskey(d, :maxfnevals) - @warn( - "maxfnevals is ignored. See the test for an example to implement this featrue" - ) - end - options -end + δₐ = get(d, :xatol, get(d, :xabstol, defs[1])) + δᵣ = get(d, :xrtol, get(d, :xreltol, defs[2])) + ϵₐ = get(d, :atol, get(d, :abstol, defs[3])) + ϵᵣ = get(d, :rtol, get(d, :reltol, defs[4])) + M = get(d, :maxiters, get(d, :maxevals, get(d, :maxsteps, defs[5]))) + strict = get(d, :strict, defs[6]) + + iszero(δₐ) && iszero(δᵣ) && iszero(ϵₐ) && iszero(ϵᵣ) && return ExactOptions(M, strict) + iszero(δₐ) && iszero(δᵣ) && return XExactOptions(ϵₐ, ϵᵣ, M, strict) + iszero(ϵₐ) && iszero(ϵᵣ) && return FExactOptions(δₐ, δᵣ, M, strict) -# # reset options to default values -@deprecate init_options!(options, M) init_options(M) + return UnivariateZeroOptions(δₐ, δᵣ, ϵₐ, ϵᵣ, M, strict) + +end ## -------------------------------------------------- @@ -50,7 +66,7 @@ The default tolerances for most methods are `xatol=eps(T)`, units (absolute tolerances have the units of `x` and `f(x)`; relative tolerances are unitless). For `Complex{T}` values, `T` is used. -The number of iterations is limited by `maxevals=40`. +The number of iterations is limited by `maxiters=40`. """ default_tolerances(M::AbstractUnivariateZeroMethod) = @@ -64,27 +80,93 @@ function default_tolerances( xrtol = eps(real(T)) # unitless atol = 4 * eps(real(float(S))) * oneunit(real(S)) rtol = 4 * eps(real(float(S))) * one(real(S)) - maxevals = 40 - maxfnevals = typemax(Int) + maxiters = 40 strict = false - (xatol, xrtol, atol, rtol, maxevals, maxfnevals, strict) + (xatol, xrtol, atol, rtol, maxiters, strict) +end + +## -------------------------------------------------- + +# ## Assess convergence + +## test f == 0 not f ≈ 0 +function is_exact_zero_f(::AbstractNonBracketingMethod, state::AbstractUnivariateZeroState, options) + fb = state.fxn1 + iszero(fb) +end + +function is_exact_zero_f(::AbstractBracketingMethod, state::AbstractUnivariateZeroState, options) + fa, fb = state.fxn0, state.fxn1 + iszero(fa) || iszero(fb) +end + + +## test f ≈ 0 not f == 0 +function is_approx_zero_f(::AbstractUnivariateZeroMethod, state::AbstractUnivariateZeroState, options::O) where {O <: AbstractUnivariateZeroOptions} + ab, afb = abs(state.xn1), abs(state.fxn1) + ϵₐ, ϵᵣ = options.abstol, options.reltol + Δ = max(_unitless(ϵₐ), _unitless(ab) * ϵᵣ) + afb ≤ Δ * oneunit(afb) +end + +## test f ≈ 0 not f == 0 +function is_approx_zero_f(::AbstractBracketingMethod, state::AbstractUnivariateZeroState, options::O) where {O <: AbstractUnivariateZeroOptions} + ab₁, afb₁ = abs(state.xn1), abs(state.fxn1) + ab₀, afb₀ = abs(state.xn0), abs(state.fxn0) + ϵₐ, ϵᵣ = options.abstol, options.reltol + u, fu = afb₀ < afb₁ ? (ab₀, afb₀) : (ab₁, afb₁) + Δ = max(_unitless(ϵₐ), _unitless(u) * ϵᵣ) + fu ≤ Δ * oneunit(fu) +end + +function is_approx_zero_f(::AbstractUnivariateZeroMethod, state::AbstractUnivariateZeroState, options::O, relaxed::Any) where {O <: AbstractUnivariateZeroOptions} + ab, afb = abs(state.xn1), abs(state.fxn1) + ϵₐ, ϵᵣ = options.abstol, options.reltol + Δ = max(_unitless(ϵₐ), _unitless(ab) * ϵᵣ) + Δ = cbrt(abs(_unitless(Δ))) * oneunit(afb) # relax test + afb <= Δ + +end + +function is_approx_zero_f(::AbstractUnivariateZeroMethod, state::AbstractUnivariateZeroState, options::O) where {O <: Union{ExactOptions, FExactOptions}} + false end ## -------------------------------------------------- -## Assess convergence -@inline function _is_f_approx_0(fa, a, atol, rtol, relaxed::Any) - aa, afa = abs(a), abs(fa) - tol = max(_unitless(atol), _unitless(aa) * rtol) - tol = cbrt(abs(_unitless(tol))) # relax test - afa <= tol * oneunit(afa) +# test xₙ₊₁ - xₙ ≈ 0 +function iszero_Δx(::AbstractUnivariateZeroMethod, state::AbstractUnivariateZeroState, options::O) where {O <: Union{ExactOptions, XExactOptions}} + a, b = state.xn0, state.xn1 + if b < a + a,b = b,a + end + + nextfloat(a) == b end -@inline function _is_f_approx_0(fa, a, atol, rtol) - aa, afa = abs(a), abs(fa) - tol = max(_unitless(atol), _unitless(aa) * rtol) - afa <= tol * oneunit(afa) + +function iszero_Δx(::AbstractBracketingMethod, state::AbstractUnivariateZeroState, options::O) where {O <: Union{FExactOptions, UnivariateZeroOptions}} + a, b, fa, fb = state.xn0, state.xn1, state.fxn0, state.fxn1 + u, fu = choose_smallest(a, b, fa, fb) + δₐ, δᵣ = options.xabstol, options.xreltol + δₓ = max(δₐ, 2 * abs(u) * δᵣ) # needs non-zero δₐ to stop near 0 + abs(b-a) ≤ δₓ +end + +function iszero_Δx(::AbstractNonBracketingMethod, state::AbstractUnivariateZeroState, options::O) where {O <: Union{FExactOptions, UnivariateZeroOptions}} + a, b, fa, fb = state.xn0, state.xn1, state.fxn0, state.fxn1 + δₐ, δᵣ = options.xabstol, options.xreltol + isapprox(a, b, atol=δₐ, rtol=δᵣ) end +isnan_f(M::AbstractBracketingMethod, state) = isnan(state.fxn1) || isnan(state.fxn0) +isnan_f(M::AbstractNonBracketingMethod, state) = isnan(state.fxn1) + +isinf_f(M::AbstractBracketingMethod, state) = isinf(state.fxn1) || isinf(state.fxn0) +isinf_f(M::AbstractNonBracketingMethod, state) = isinf(state.fxn1) + +## -------------------------------------------------- + + """ Roots.assess_convergence(method, state, options) @@ -92,15 +174,15 @@ Assess if algorithm has converged. Return a convergence flag and a Boolean indicating if algorithm has terminated (converged or not converged) -If algrithm hasn't converged returns `(:not_converged, false)`. +If algrithm hasn't converged this returns `(:not_converged, false)`. If algorithm has stopped or converged, return flag and `true`. Flags are: -* `:x_converged` if `abs(xn1 - xn0) < max(xatol, max(abs(xn1), abs(xn0)) * xrtol)` +* `:x_converged` if `xn1 ≈ xn`, typically with non-zero tolerances specified. * `:f_converged` if `|f(xn1)| < max(atol, |xn1|*rtol)` -* `:nan`, `:inf` if xn1 or fxn1 is `NaN` or an infinity +* `:nan` or `:inf` if fxn1 is `NaN` or an infinity. * `:not_converged` if algorithm should continue @@ -110,34 +192,37 @@ In `decide_convergence`, stopped values (and `:x_converged` when `strict=false`) """ -function assess_convergence(::Any, state::AbstractUnivariateZeroState, options) +function assess_convergence(M::Any, state::AbstractUnivariateZeroState, options) # return convergence_flag, boolean + isnan_f(M, state) && return (:nan, true) + isinf_f(M, state) && return (:inf, true) + is_exact_zero_f(M, state, options) && return (:exact_zero, true) + is_approx_zero_f(M, state, options) && return (:f_converged, true) + iszero_Δx(M, state, options) && return (:x_converged, true) + return (:not_converged, false) +end - xn0, xn1 = state.xn0, state.xn1 - fxn1 = state.fxn1 +# speeds up exact f values by just a bit (2% or so) over the above, so guess this is worth it. +function assess_convergence(M::AbstractBracketingMethod, state::AbstractUnivariateZeroState, options::Union{ExactOptions,FExactOptions}) - if isnan(xn1) || isnan(fxn1) - return (:nan, true) - end +# isnan_f(M, state) && return (:nan, true) +# is_exact_zero_f(M, state, options) && return (:exact_zero, true) +# iszero_Δx(M, state, options) && return (:x_converged, true) - if isinf(xn1) || isinf(fxn1) - return (:inf, true) - end + (isnan(state.fxn1) || isnan(state.fxn0)) && return (:nan, true) + (iszero(state.fxn1) || iszero(state.fxn0)) && return (:exact_zero, true) - # f(xstar) ≈ xstar * f'(xstar)*eps(), so we pass in lambda - if _is_f_approx_0(fxn1, xn1, options.abstol, options.reltol) - return (:f_converged, true) - end + a, b, fa, fb = state.xn0, state.xn1, state.fxn0, state.fxn1 + u, fu = choose_smallest(a, b, fa, fb) + δₐ, δᵣ = options.xabstol, options.xreltol + δₓ = max(δₐ, 2 * abs(u) * δᵣ) # needs non-zero δₐ to stop near 0 + abs(b-a) ≤ δₓ && return(:x_converged, true) - # stop when xn1 ~ xn. - # in find_zeros there is a check that f could be a zero with a relaxed tolerance - if isapprox(xn1, xn0, atol=options.xabstol, rtol=options.xreltol) - return (:x_converged, true) - end return (:not_converged, false) end + # state has stopped, this identifies if it has converged """ decice_convergence(M,F,state,options, convergence_flag) @@ -145,7 +230,7 @@ end When the algorithm terminates, this function decides the stopped value or returns NaN """ function decide_convergence( - ::AbstractUnivariateZeroMethod, + M::AbstractNonBracketingMethod, F, state::AbstractUnivariateZeroState, options, @@ -153,66 +238,49 @@ function decide_convergence( ) xn0, xn1 = state.xn0, state.xn1 fxn1 = state.fxn1 - val ∈ (:f_converged, :exact_zero, :converged) && return xn1 + ## XXX this could be problematic + val == :nan && return xn1 + val == :inf_nan && return xn1 + ## stopping is a heuristic, x_converged can mask issues ## if strict=true or the tolerance for f is 0 this will return xn1 if x_converged ## if strict == false, this will also check f(xn) ~ - with a relaxed ## tolerance - if options.strict || (iszero(options.abstol) && iszero(options.reltol)) + if options.strict || isa(options, ExactOptions) || isa(options, FExactOptions) #|| (iszero(options.abstol) && iszero(options.reltol)) val == :x_converged && return xn1 - _is_f_approx_0(fxn1, xn1, options.abstol, options.reltol) && return xn1 + is_approx_zero_f(M, state, options) && return xn1 + #_is_f_approx_0(fxn1, xn1, options.abstol, options.reltol) && return xn1 else - if val ∈ (:x_converged, :not_converged) - # |fxn| <= relaxed - _is_f_approx_0(fxn1, xn1, options.abstol, options.reltol, true) && return xn1 + if val == :x_converged + is_approx_zero_f(M, state, options, true) && return xn1 + elseif val == :not_converged + # this is the case where runaway can happen + ## XXX Need a good heuristic to catch that + is_approx_zero_f(M, state, options, :relaxed) && return xn1 end end - # if (state.stopped || state.x_converged) && !(state.f_converged) - # ## stopped is a heuristic, x_converged can mask issues - # ## if strict == false, this will also check f(xn) ~ - with a relaxed - # ## tolerance - - # ## are we at a crossing values? - # ## seems worth a check for 2 fn evals. - # if T <: Real && S <: Real - # for u in (prevfloat(xn1), nextfloat(xn1)) - # fu = first(F(u)) - # incfn(state) - # if iszero(fu) || _unitless(fu * fxn1) < 0 - # state.message *= "Change of sign at xn identified. " - # state.f_converged = true - # end - # end - # end - - # δ = maximum(_unitless, (options.abstol, options.reltol)) - # if options.strict || iszero(δ) - # if state.x_converged - # state.f_converged = true - # else - # state.convergence_failed = true - # end - - # else - # xstar, fxstar = state.xn1, state.fxn1 - # if _is_f_approx_0(fxstar, xstar, options.abstol, options.reltol, :relaxed) - # state.xstar, state.fxstar = xstar, fxstar - # msg = "Algorithm stopped early, but |f(xn)| < ϵ^(1/3), where ϵ depends on xn, rtol, and atol. " - # state.message = state.message == "" ? msg : state.message * "\n\t" * msg - # state.f_converged = true - # else - # state.convergence_failed = true - # end - # end - # end - - # if !state.f_converged - # state.xstar, state.fxstar = NaN*xn1, NaN*fxn1 - # end - NaN * xn1 end + +# assumes stopped = :x_converged +function decide_convergence( + ::AbstractBracketingMethod, + F, + state::AbstractUnivariateZeroState, + options, + val, +) + + a, b = state.xn0, state.xn1 + fa, fb = state.fxn0, state.fxn1 + + isnan(fa) && return a + isnan(fb) && return b + + + abs(fa) < abs(fb) ? a : b +end diff --git a/src/find_zero.jl b/src/find_zero.jl index 6c84bef3..e52da8d4 100644 --- a/src/find_zero.jl +++ b/src/find_zero.jl @@ -1,6 +1,6 @@ """ - find_zero(f, x0, M, [N::AbstractBracketing]; kwargs...) + find_zero(f, x0, M, [N::AbstractBracketingMethod]; kwargs...) Interface to one of several methods for finding zeros of a univariate function, e.g. solving ``f(x)=0``. @@ -11,47 +11,71 @@ in the iterative procedure. (Secant methods can have a tuple specify their initial values.) Values must be a subtype of `Number` and have methods for `float`, `real`, and `oneunit` defined. -For bracketing intervals, `x0` is specified using a tuple, a vector, or any iterable with `extrema` defined. A bracketing interval, ``[a,b]``, is one where f(a) and f(b) have different signs. +For bracketing intervals, `x0` is specified using a tuple, a vector, +or any iterable with `extrema` defined. A bracketing interval, +``[a,b]``, is one where ``f(a)`` and ``f(b)`` have different signs. # Return value -If the algorithm suceeds, the approximate root identified is returned. A `ConvergenceFailed` error is thrown if the algorithm fails. The alternate form `solve(ZeroProblem(f,x0), M)` returns `NaN` in case of failure. +If the algorithm suceeds, the approximate root identified is +returned. A `ConvergenceFailed` error is thrown if the algorithm +fails. The alternate form `solve(ZeroProblem(f,x0), M)` returns `NaN` +in case of failure. # Specifying a method A method is specified to indicate which algorithm to employ: -* There are methods for bisection where a bracket is specified: [`Bisection`](@ref), [`Roots.A42`](@ref), [`Roots.AlefeldPotraShi`](@ref), [`Roots.Brent`](@ref), and [`FalsePosition`](@ref) +* There are methods where a bracket is specified: [`Bisection`](@ref), + [`A42`](@ref), [`AlefeldPotraShi`](@ref), + [`Roots.Brent`](@ref), among others. Bisection is the default, but + `A42` generally requires far fewer iterations. -* There are several derivative-free methods: cf. [`Order0`](@ref), [`Order1`](@ref) (also [`Roots.Secant`](@ref)), [`Order2`](@ref) (also [`Roots.Steffensen`](@ref)), [`Order5`](@ref), [`Order8`](@ref), and [`Order16`](@ref), where the number indicates the order of the convergence. Methods [`Roots.Order1B`](@ref) and [`Roots.Order2B`](@ref) are useful when the desired zero has a multiplicity. +* There are several derivative-free methods: cf. [`Order0`](@ref), + [`Order1`](@ref) (also [`Roots.Secant`](@ref)), [`Order2`](@ref) + (also [`Steffensen`](@ref)), [`Order5`](@ref), + [`Order8`](@ref), and [`Order16`](@ref), where the number indicates + the order of the convergence. Methods [`Roots.Order1B`](@ref) and + [`Roots.Order2B`](@ref) are useful when the desired zero has a + multiplicity. -* There are some classical methods where derivatives are required: [`Roots.Newton`](@ref), [`Roots.Halley`](@ref), [`Roots.Schroder`](@ref). +* There are some classical methods where derivatives are required: + [`Roots.Newton`](@ref), [`Roots.Halley`](@ref), + [`Roots.Schroder`](@ref), among others. -* The family [`Roots.LithBoonkkampIJzerman{S,D}`](@ref) for different `S` and `D` uses a linear multistep method root finder. The `(2,0)` method is the secant method, `(1,1)` is Newton's method. +* The family [`Roots.LithBoonkkampIJzerman{S,D}`](@ref) ,for different + `S` and `D`, uses a linear multistep method root finder. The `(2,0)` + method is the secant method, `(1,1)` is Newton's method. -For more detail, see the help page for each method (e.g., `?Order1`). Non-exported methods must be qualified with module name, as in `?Roots.Schroder`. +For more detail, see the help page for each method (e.g., +`?Order1`). Non-exported methods must be qualified with module name, +as in `?Roots.Schroder`. If no method is specified, the default method depends on `x0`: -* If `x0` is a scalar, the default is the slower, but more robust `Order0` method. +* If `x0` is a scalar, the default is the more robust `Order0` method. -* If `x0` is a tuple, vector, or iterable with `extrema` defined indicating a *bracketing* interval, the `Bisection` method is used. (The exact algorithm depends on the number type and the tolerances.) +* If `x0` is a tuple, vector, or iterable with `extrema` defined + indicating a *bracketing* interval, then the `Bisection` method is used. + +The default methods are chosen to be robust; they may not be as efficient as some others. # Specifying the function The function(s) are passed as the first argument. -For the few methods that use one or more derivatives (`Newton`, `Halley`, -`Schroder`, `LithBoonkkampIJzerman(S,D)`, and `Order5Derivative`) a -tuple of functions is used. For the classical algorithms, a function returning `(f(x), f(x)/f'(x), [f'(x)/f''(x)])` may be used. +For the few methods that use one or more derivatives (`Newton`, +`Halley`, `Schroder`, `LithBoonkkampIJzerman(S,D)`, etc.) a tuple of +functions is used. For the classical algorithms, a function returning +`(f(x), f(x)/f'(x), [f'(x)/f''(x)])` may be used. # Optional arguments (tolerances, limit evaluations, tracing) -* `xatol` - absolute tolerance for `x` values. Passed to `isapprox(x_n, x_{n-1})`. -* `xrtol` - relative tolerance for `x` values. Passed to `isapprox(x_n, x_{n-1})`. +* `xatol` - absolute tolerance for `x` values. +* `xrtol` - relative tolerance for `x` values. * `atol` - absolute tolerance for `f(x)` values. * `rtol` - relative tolerance for `f(x)` values. -* `maxevals` - limit on maximum number of iterations. +* `maxiters` - limit on maximum number of iterations. * `strict` - if `false` (the default), when the algorithm stops, possible zeros are checked with a relaxed tolerance. * `verbose` - if `true` a trace of the algorithm will be shown on successful completion. See the internal [`Tracks`](@ref) object to save this trace. @@ -62,17 +86,19 @@ for details on the default tolerances. In general, with floating point numbers, convergence must be understood as not an absolute statement. Even if mathematically `α` is an answer and `xstar` the floating point realization, it may be that -`f(xstar) - f(α) ≈ xstar ⋅ f'(α) ⋅ eps(α)`, so tolerances must be +`f(xstar) - f(α) ≈ xstar ⋅ f'(α) ⋅ eps(α)`, so the role of tolerances must be appreciated, and at times specified. -For the `Bisection` methods, convergence is guaranteed, so the tolerances are set to be ``0`` by default. +For the `Bisection` methods, convergence is guaranteed over `Float64` +values, so the tolerances are set to be ``0`` by default. If a bracketing method is passed in after the method specification, then whenever a bracket is identified during the algorithm, the method will switch to the bracketing method to identify the zero. (Bracketing -methods are mathematically guaranteed to converge, non-bracketing methods may or may not converge.) -This is what `Order0` does by default, with an initial secant method switching -to the `AlefeldPotraShi` method should a bracket be encountered. +methods are mathematically guaranteed to converge, non-bracketing +methods may or may not converge.) This is what `Order0` does by +default, with an initial secant method switching to the +`AlefeldPotraShi` method should a bracket be encountered. Note: The order of the method is hinted at in the naming scheme. A scheme is order `r` if, with `eᵢ = xᵢ - α`, `eᵢ₊₁ = C⋅eᵢʳ`. If the @@ -110,7 +136,7 @@ julia> find_zero(sin, 3.0, Order2()) # Use Steffensen method julia> find_zero(sin, big(3.0), Order16()) # rapid convergence 3.141592653589793238462643383279502884197169399375105820974944592307816406286198 -julia> find_zero(sin, (3, 4), Roots.A42()) # fewer function calls than Bisection(), in this case +julia> find_zero(sin, (3, 4), A42()) # fewer function calls than Bisection(), in this case 3.141592653589793 julia> find_zero(sin, (3, 4), FalsePosition(8)) # 1 of 12 possible algorithms for false position @@ -144,7 +170,7 @@ julia> x0, xstar = 1.0, 1.112243913023029; julia> find_zero(fn, x0, Order2()) ≈ xstar true -julia> find_zero(fn, x0, Order2(), maxevals=3) # need more steps to converge +julia> find_zero(fn, x0, Order2(), maxiters=3) # need more steps to converge ERROR: Roots.ConvergenceFailed("Algorithm failed to converge") [...] ``` @@ -153,7 +179,7 @@ ERROR: Roots.ConvergenceFailed("Algorithm failed to converge") Passing `verbose=true` will show details on the steps of the algorithm. The `tracks` argument allows -the passing of storage to record the values of `x` and `f(x)` used in +the passing of a [`Tracks`](ref) object to record the values of `x` and `f(x)` used in the algorithm. !!! note @@ -181,35 +207,6 @@ end find_zero(f, x0::Number; kwargs...) = find_zero(f, x0, Order0(); kwargs...) find_zero(f, x0; kwargs...) = find_zero(f, x0, Bisection(); kwargs...) -""" - find_zero(M, F, state, [options], [l]) - -Find zero using method `M`, function(s) `F`, and initial state -`state`. Returns an approximate zero or `NaN`. Useful when some part -of the processing pipeline is to be adjusted. - -* `M::AbstractUnivariateZeroMethod` a method, such as `Secant()` -* `F`: A callable object (or tuple of callable objects for certain methods) -* `state`: An initial state, as created by `init_state` (or `_init_state`). -* `options::UnivariateZeroOptions`: specification of tolerances -* `l::AbstractTracks`: used to record steps in algorithm, when requested. -``` - -!!! note - To be deprecated in favor of `solve!(init(...))`. -""" -function find_zero( - M::AbstractUnivariateZeroMethod, - F, - state::AbstractUnivariateZeroState, - options::UnivariateZeroOptions=init_options(M, state), - l::AbstractTracks=NullTracks(), -) - Base.depwarn("findzero(M, F, state, ...) interface is deprecated, use `solve(ZeroProblem(f,x0), M, ...)", :find_zero) - - solve!(init(M, F, state, options, l)) -end - ## --------------- ## Create an Iterator interface @@ -226,6 +223,11 @@ struct ZeroProblem{F,X} end Base.broadcastable(p::ZeroProblem) = Ref(p) +# possible unicode operator to lighten `solve(ZeroProblem(f,x), M)` at `solve(f ≀ x, M)` +f ≀ x = ZeroProblem(f, x) # \wr[tab] + +## -------------------------------------------------- + ## The actual iterating object struct ZeroProblemIterator{M,N,F,S,O,L} M::M @@ -243,7 +245,8 @@ Base.show(io::IO, Z::ZeroProblemIterator) = ## init(Z,p) ## init(Z,M,p) ## init(M,F,state, [options], [logger]) -## want p to be positional, not named⁺ +## want p to be a keyword, not positional. Leaving for now. +## but the positional use is not documented (though annoyingly is being tested) function init( 𝑭𝑿::ZeroProblem, M::AbstractUnivariateZeroMethod, @@ -262,64 +265,35 @@ function init( end function init(𝑭𝑿::ZeroProblem, p′=nothing; kwargs...) - M = length(𝑭𝑿.x₀) == 1 ? Secant() : Bisection() - init(𝑭𝑿, M, p′; kwargs...) + M = length(𝑭𝑿.x₀) == 1 ? Order0() : AlefeldPotraShi() + init(𝑭𝑿, M; p = p′, kwargs...) end function init( M::AbstractUnivariateZeroMethod, F, state::AbstractUnivariateZeroState, - options::UnivariateZeroOptions=init_options(M, state), + options::AbstractUnivariateZeroOptions=init_options(M, state), l::AbstractTracks=NullTracks(), ) ZeroProblemIterator(M, Nothing, Callable_Function(M, F), state, options, l) end -# Optional iteration interface to handle looping -# errors on non-convergence, unlike solve!(init(...)) which returns NaN -# allocates... -# This should be deprecated -function Base.iterate(P::ZeroProblemIterator, st=nothing) - ## st = (val, (state, ctr, val)) - if st === nothing - state = P.state - ctr = 1 - else - (state, ctr, val) = st - ctr += 1 - end - M, options, l = P.M, P.options, P.logger - val, stopped = :no_convergence, false - while true - val, stopped = assess_convergence(M, state, options) - stopped && break - if ctr > options.maxevals - stopped = true - break - end - state, stopped = update_state(M, P.F, state, options, l) - log_step(l, M, state) - break - end - - if stopped - xstar = decide_convergence(P.M, P.F, state, P.options, val) - isnan(xstar) && throw(ConvergenceFailed("Stopped at $(state.xn1)")) - nothing - else - return (state.xn1, (state, ctr, val)) - end -end """ - solve(fx::ZeroProblem, [p=nothing]; kwargs...) - solve(fx::ZeroProblem, M, [p=nothing]; kwargs...) - init(fx::ZeroProblem, [M], [p=nothing]; + solve(fx::ZeroProblem, [M], [N]; p=nothing, kwargs...) + init(fx::ZeroProblem, [M], [N]; + p=nothing, verbose=false, tracks=NullTracks(), kwargs...) solve!(P::ZeroProblemIterator) -Solve for the zero of a function specified through a `ZeroProblem` or `ZeroProblemIterator` +Solve for the zero of a scalar-valued univariate function specified through `ZeroProblem` or +`ZeroProblemIterator` using the `CommonSolve` interface. + +The defaults for `M` and `N` depend on the `ZeroProblem`: if `x0` is a +number, then `M=Secant()` and `N=AlefeldPotraShi()` is used (`Order0`); if `x0` +has `2` (or more values) then it is assumed to be a bracketing +interval and `M=AlefeldPotraShi()` is used. The methods involved with this interface are: @@ -328,10 +302,15 @@ The methods involved with this interface are: The latter calls the following, which can be useful independently: -* `init`: to initialize an iterator with a method for solution, any adjustments to the default tolerances, and a specification to log the steps or not. +* `init`: to initialize an iterator with a method for solution, any + adjustments to the default tolerances, and a specification to log + the steps or not. * `solve!` to iterate to convergence. -Returns `NaN`, not an error, when the problem can not be solved. Tested for zero allocations. +Returns `NaN`, not an error like `find_zero`, when the problem can not +be solved. Tested for zero allocations. + + ## Examples: @@ -350,19 +329,15 @@ Or, if the iterable is required ```jldoctest find_zero julia> problem = init(fx); - julia> solve!(problem) 3.141592653589793 ``` -The default method is `Order1()`, when `x0` is a number, or `Bisection()` when `x0` is an iterable with 2 or more values. - - -A second position argument for `solve` or `init` is used to specify a different method; keyword arguments can be used to adjust the default tolerances. +keyword arguments can be used to adjust the default tolerances. ```jldoctest find_zero -julia> solve(fx, Order5(), atol=1/100) +julia> solve(fx, Order5(); atol=1/100) 3.1425464815525403 ``` @@ -371,12 +346,13 @@ The above is equivalent to: ```jldoctest find_zero julia> problem = init(fx, Order5(), atol=1/100); - julia> solve!(problem) 3.1425464815525403 ``` -The argument `p` may be used if the function(s) to be solved depend on a parameter in their second positional argument (e.g., `f(x,p)`). For example +The keyword argument `p` may be used if the function(s) to be solved +depend on a parameter in their second positional argument (e.g., +`f(x, p)`). For example ```jldoctest find_zero julia> f(x,p) = exp(-x) - p # to solve p = exp(-x) @@ -385,7 +361,7 @@ f (generic function with 1 method) julia> fx = ZeroProblem(f, 1) ZeroProblem{typeof(f), Int64}(f, 1) -julia> solve(fx, 1/2) # log(2) +julia> solve(fx; p=1/2) # log(2) 0.6931471805599453 ``` @@ -393,7 +369,9 @@ This would be recommended, as there is no recompilation due to the function chan The argument `verbose=true` for `init` instructs that steps to be logged; -The iterator interface allows for the creation of hybrid solutions, for example, this is essentially how `Order0` is constructed (`Order0` follows secant steps until a bracket is identified, after which it switches to a bracketing algorithm.) +The iterator interface allows for the creation of hybrid solutions, +such as is used when two methods are passed to `solve`. +For example, this is essentially how the hybrid default is constructed: ```jldoctest find_zero julia> function order0(f, x) @@ -432,7 +410,7 @@ function solve!(P::ZeroProblemIterator; verbose=false) while !stopped val, stopped = assess_convergence(M, state, options) stopped && break - ctr > options.maxevals && break + ctr > options.maxiters && break state, stopped = update_state(M, F, state, options, l) @@ -443,45 +421,57 @@ function solve!(P::ZeroProblemIterator; verbose=false) val, stopped = assess_convergence(M, state, options) # udpate val flag α = decide_convergence(M, F, state, options, val) - if !(l isa NullTracks) - log_convergence(l, val) - log_method(l, M) - log_last(l, α) - verbose && display(l) - end + log_convergence(l, val) + log_method(l, M) + log_last(l, α) + verbose && display(l) + α end # thread verbose through """ - solve(𝐙::ZeroProblem, args...; verbose=false, kwargs...) + solve(fx::ZeroProblem, args...; verbose=false, kwargs...) -Disptaches to `solve!(init(𝐙, args...; kwargs...))`. See [`solve!`](@ref) for details. +Disptaches to `solve!(init(fx, args...; kwargs...))`. See [`solve!`](@ref) for details. """ -CommonSolve.solve(F::ZeroProblem, args...; verbose=false, kwargs...) = - solve!(init(F, args...; verbose=verbose, kwargs...); verbose=verbose) +CommonSolve.solve(𝑭𝑿::ZeroProblem, args...; verbose=false, kwargs...) = + solve!(init(𝑭𝑿, args...; verbose=verbose, kwargs...); verbose=verbose) -## -------------------------------------------------- -## Framework for setting up an iterative problem for finding a zero -# -# In McNamee & Pan (DOI:10.1016/j.camwa.2011.11.015 there are a number of -# results on efficiencies of a solution, (1/d) log_10(q) -# Those implemented here are: -# quadratic cut (Muller) .265 (in a42) -# Newton() newton = .1505 or 1/2log(2) -# Order1() secant method .20 (1/1 * log(1.6) -# FalsePostion(12) Anderson-Bjork [.226, .233] -# FalsePostion(3) (King?) .264 -# A42() 0.191 but convergence guaranteed -# Order8() 8th order 4 steps: .225 (log10(8)/4 -# Order16() 16th order 5 steps .240 -# Order5(): 5th order, 4 steps. 0.1747 - -# A zero is found by specifying: -# the method to use <: AbstractUnivariateZeroMethod -# the function(s) -# the initial state through a value for x either x, [a,b], or (a,b) <: AbstractUnivariateZeroState -# the options (e.g., tolerances) <: UnivariateZeroOptions - -# The minimal amount needed to add a method, is to define a Method and an update_state method. + +# Optional iteration interface to handle looping +# * returns xₙ or (aₙ, bₙ) depending +# * throws error on non-convergence +function Base.iterate(P::ZeroProblemIterator, st=nothing) + ## st = (val, (state, ctr, flag, stopped)) + + M, F, options, l = P.M, P.F, P.options, P.logger + + if st === nothing + state = P.state + ctr, flag, stopped = 1, :not_converged, false + log_method(l, M) + log_step(l, M, state; init=true) + else + state, ctr, flag, stopped = st + ctr += 1 + end + + stopped && return nothing + ctr > options.maxiters && return nothing + + state, stopped = update_state(M, F, state, options, l) + log_step(l, M, state) + + flag, stopped = assess_convergence(M, state, options) + + if stopped + α = decide_convergence(M, F, state, options, flag) + log_last(l, α) + isnan(α) && throw(ConvergenceFailed("Algorithm did not converge.")) + end + + return (last(state,M), (state, ctr, flag, stopped)) + +end diff --git a/src/order0.jl b/src/hybrid.jl similarity index 67% rename from src/order0.jl rename to src/hybrid.jl index 7001e67d..c6d4294c 100644 --- a/src/order0.jl +++ b/src/hybrid.jl @@ -1,48 +1,11 @@ -""" - Order0() - - -The `Order0` method is engineered to be a more robust, though possibly -slower, alternative to the other derivative-free root-finding -methods. The implementation roughly follows the algorithm described in -*Personal Calculator Has Key to Solve Any Equation ``f(x) = 0``*, the -SOLVE button from the -[HP-34C](http://www.hpl.hp.com/hpjournal/pdfs/IssuePDFs/1979-12.pdf). -The basic idea is to use a secant step. If along the way a bracket is -found, switch to a bracketing algorithm, using `AlefeldPotraShi`. If the secant -step fails to decrease the function value, a quadratic step is used up -to ``4`` times. - -This is not really ``0``-order: the secant method has order -``1.6...`` [Wikipedia](https://en.wikipedia.org/wiki/Secant_method#Comparison_with_other_root-finding_methods) -and the the bracketing method has order -``1.6180...`` [Wikipedia](http://www.ams.org/journals/mcom/1993-61-204/S0025-5718-1993-1192965-2/S0025-5718-1993-1192965-2.pdf) -so for reasonable starting points and functions, this algorithm should be -superlinear, and relatively robust to non-reasonable starting points. - -""" -struct Order0 <: AbstractSecant end - -# special case Order0 to be hybrid -function init( - 𝑭𝑿::ZeroProblem, - M::Order0, - p′=nothing; - p=nothing, - verbose::Bool=false, - tracks=NullTracks(), - kwargs..., -) - p = p′ === nothing ? p : p′ - init(𝑭𝑿, Secant(), AlefeldPotraShi(); p=p, verbose=verbose, tracks=tracks, kwargs...) -end +## Init for hybrid method -- start with a non-bracketing, finish with bracketing ## When passing 2 methods, any parameters must be passed as a named argument through ## the keyword p function init( 𝑭𝑿::ZeroProblem, - M::AbstractUnivariateZeroMethod, - N::AbstractBracketing; + M::AbstractNonBracketingMethod, + N::AbstractBracketingMethod; p=nothing, verbose::Bool=false, tracks=NullTracks(), @@ -63,12 +26,15 @@ end # * limit steps so as not too far or too near the previous one # * if not decreasing, use a quad step upto 4 times to bounce out of trap, if possible # First uses M, then N if bracket is identified -function solve!(𝐙::ZeroProblemIterator{𝐌,𝐍}; verbose=false) where {𝐌,𝐍<:AbstractBracketing} +function solve!(𝐙::ZeroProblemIterator{𝐌,𝐍}; verbose=false) where {𝐌,𝐍<:AbstractBracketingMethod} M, N, F, state, options, l = 𝐙.M, 𝐙.N, 𝐙.F, 𝐙.state, 𝐙.options, 𝐙.logger incfn(l, 2) log_step(l, M, state; init=true) + log_method(l, M) + log_nmethod(l, N) + quad_ctr = 0 flag = :not_converged ctr = 0 @@ -78,7 +44,7 @@ function solve!(𝐙::ZeroProblemIterator{𝐌,𝐍}; verbose=false) where {𝐌 flag, converged = assess_convergence(M, state, options) converged && break - ctr >= options.maxevals && break + ctr >= options.maxiters && break state0 = state state0, stopped = update_state(M, F, state0, options) # state0 is proposed step @@ -98,7 +64,11 @@ function solve!(𝐙::ZeroProblemIterator{𝐌,𝐍}; verbose=false) where {𝐌 stateₙ = init_state(N, state0, Fₙ) # save function calls by using state0 values optionsₙ = init_options(N, stateₙ) α = solve!(init(N, Fₙ, stateₙ, optionsₙ, l)) - break + + log_method(l, M) + verbose && display(l) + + return α end ## did we move too far? @@ -133,7 +103,11 @@ function solve!(𝐙::ZeroProblemIterator{𝐌,𝐍}; verbose=false) where {𝐌 stateₙ = init_state(N, Fₙ, a, b, fa, fb) optionsₙ = init_options(N, stateₙ) α = solve!(init(N, Fₙ, stateₙ, optionsₙ, l)) - break + + log_method(l, M) + verbose && display(l) + + return α end ## did we improve? @@ -179,11 +153,10 @@ function solve!(𝐙::ZeroProblemIterator{𝐌,𝐍}; verbose=false) where {𝐌 log_step(l, M, state) end - val, stopped = assess_convergence(M, state, options) + α = decide_convergence(M, F, state, options, val) + log_convergence(l, val) - log_method(l, M) - log_nmethod(l, N) log_last(l, α) verbose && display(l) @@ -194,26 +167,10 @@ function find_zero( fs, x0, M::AbstractUnivariateZeroMethod, - N::AbstractBracketing; + N::AbstractBracketingMethod; verbose=false, kwargs..., ) 𝐏 = ZeroProblem(fs, x0) solve!(init(𝐏, M, N; verbose=verbose, kwargs...), verbose=verbose) end - -# Switch to bracketing method -# deprecate soon, not used -function run_bisection(N::AbstractBracketing, f, ab, state) - - Base.depwarn("This method is deprecated, as it is not longer used internally", :run_bisection) - - steps, fnevals = state.steps, state.fnevals - f = Callable_Function(N, f) - init_state!(state, N, f; clear=true) - find_zero(N, f, state, init_options(N, state)) - a, b = _extrema(ab) - u, v = a > b ? (b, a) : (a, b) - state.message *= "Bracketing used over ($u, $v), those steps not shown. " - return nothing -end diff --git a/src/simple.jl b/src/simple.jl index d8acb635..bda10f2a 100644 --- a/src/simple.jl +++ b/src/simple.jl @@ -365,7 +365,7 @@ The inital values can be specified as a pair of two values, as in is computed, possibly from `fb`. The basic idea is to follow the secant method to convergence unless: -* a bracket is found, in which case bisection is used; +* a bracket is found, in which case `AlefeldPotraShi` is used; * the secant method is not converging, in which case a few steps of a quadratic method are used to see if that improves matters. @@ -411,7 +411,7 @@ function dfree(f, xs) cnt += 1 if sign(fa) * sign(fb) < 0 - return bisection(f, a, b) + return solve(ZeroProblem(f, (a,b))) # faster than bisection(f, a, b) end # take a secant step @@ -426,7 +426,7 @@ function dfree(f, xs) # change sign if sign(fgamma) * sign(fb) < 0 - return bisection(f, gamma, b) + return solve(ZeroProblem(f, (gamma, b))) # faster than bisection(f, gamma, b) end # decreasing diff --git a/src/state.jl b/src/state.jl index 740796fe..ef686b01 100644 --- a/src/state.jl +++ b/src/state.jl @@ -6,22 +6,25 @@ struct UnivariateZeroState{T,S} <: AbstractUnivariateZeroState{T,S} fxn0::S end -# init_state(M, F, state) -- convert -# basic idea to convert from N to M: -# Fₘ = copy(M, fₙ) -# stateₘ = init_state(M, stateₙ, Fₘ) -function init_state(M::AbstractUnivariateZeroMethod, state::AbstractUnivariateZeroState, F) - init_state(M, F, state.xn0, state.xn1, state.fxn0, state.fxn1) -end +# simple helper to set main properties of a state object +function _set(state, xf1) + x, fx = xf1 + @set! state.xn1 = x + @set! state.fxn1 = fx -# init_state(M,F,x) --> call init_state(M,F,x₀,x₁,fx₀, fx₁) -function init_state(M::AbstractUnivariateZeroMethod, F, x) - error("no default method") + state end -# initialize from xs, fxs -function init_state(::AbstractUnivariateZeroMethod, F, x₀, x₁, fx₀, fx₁) - error("no default method") +function _set(state, xf1, xf0) + x, fx = xf1 + @set! state.xn1 = x + @set! state.fxn1 = fx + + x, fx = xf0 + @set! state.xn0 = x + @set! state.fxn0 = fx + + state end # init_state(M, F, x; kwargs...) @@ -33,7 +36,26 @@ end # * the values xₙ₋₁, xₙ and f(xₙ₋₁), f(xₙ) along with # * some method-specific values # +# # A state is initialized with `init_state(M, F, x)` which sets up xₙ₋₁, xₙ, f(xₙ₋₁), f(xₙ) # which then calls `init_state(M, F, xₙ₋₁, xₙ, f(xₙ₋₁), f(xₙ))` to finish the initialization # to change to a new state use `init_state(M, state, F)` -# + +# basic idea to convert from N to M: +# Fₘ = some state +# stateₘ = init_state(M, stateₙ, Fₘ) +function init_state(M::AbstractUnivariateZeroMethod, state::AbstractUnivariateZeroState, F) + init_state(M, F, state.xn0, state.xn1, state.fxn0, state.fxn1) +end + +# init_state(M,F,x) --> call init_state(M,F,x₀,x₁,fx₀, fx₁) +function init_state(M::AbstractUnivariateZeroMethod, F, x) + error("no default method") +end + +# initialize from xs, fxs +function init_state(::AbstractUnivariateZeroMethod, F, x₀, x₁, fx₀, fx₁) + error("no default method") +end + +Base.last(state::AbstractUnivariateZeroState, M::AbstractNonBracketingMethod) = state.xn1 diff --git a/src/trace.jl b/src/trace.jl index 35c00665..60123a20 100644 --- a/src/trace.jl +++ b/src/trace.jl @@ -15,7 +15,7 @@ struct NullTracks <: AbstractTracks end initial_fncalls(M::AbstractUnivariateZeroState) = @warn "initial_fncalls fix $M" log_step(s::NullTracks, M, x; init=false) = nothing # log a step (x,f(x)) or (a,b) -log_iteration(::NullTracks, n=1) = nothing # log an iteration +log_iteration(::NullTracks, n=1) = nothing # log an iteration (call to update state) log_fncall(::NullTracks, n=1) = nothing # add a function call (aliased to incfn); only one called in update_step log_message(::NullTracks, msg) = nothing # append to a message log_convergence(::NullTracks, msg) = nothing # flag for convergence @@ -35,20 +35,23 @@ Construct a `Tracks` object used to record the progress of the algorithm. both default to `Float64`. Note that because this type is not exported, you have to write `Roots.Tracks()` to construct a `Tracks` object. -By default, no tracking is done while finding a root (with `find_zero`, `solve`, or `fzero`). +By default, no tracking is done while finding a root. To change this, construct a `Tracks` object, and pass it to the keyword argument `tracks`. This will modify the `Tracks` object, storing the input and function values at each iteration, along with additional information about the root-finding process. -`Tracker` objects are printed in a nice, easy-to-read +`Tracks` objects are shown in an easy-to-read format. Internally either a tuple of `(x,f(x))` pairs or `(aₙ, bₙ)` -pairs are stored, the latter for bracketing methods, as illustrated -in the examples below. (These implementation details may change without notice.) +pairs are stored, the latter for bracketing methods. (These +implementation details may change without notice.) The methods +`empty!`, to reset the `Tracks` object; `get`, to get the tracks; +`last`, to get the value convered to, may be of interest. If you only want to print the information, but you don't need it later, this can conveniently be done by passing `verbose=true` to the root-finding function. This will not effect the return value, which will still be the root of the function. + ## Examples ```jldoctest Tracks @@ -73,42 +76,38 @@ Results of univariate zero finding: * stopped as |f(x_n)| ≤ max(δ, |x|⋅ϵ) using δ = atol, ϵ = rtol Trace: -x_0 = 0.0000000000000000, fx_0 = -2.0000000000000000 -x_1 = 2.0000000000000000, fx_1 = 2.0000000000000000 -x_2 = 1.0000000000000000, fx_2 = -1.0000000000000000 -x_3 = 1.3333333333333333, fx_3 = -0.2222222222222223 -x_4 = 1.4285714285714286, fx_4 = 0.0408163265306123 -x_5 = 1.4137931034482758, fx_5 = -0.0011890606420930 -x_6 = 1.4142114384748701, fx_6 = -0.0000060072868389 -x_7 = 1.4142135626888697, fx_7 = 0.0000000008931456 -x_8 = 1.4142135623730947, fx_8 = -0.0000000000000009 - -julia> tracker.xfₛ # stored as (x, f(x)) pairs -9-element Vector{Tuple{Float64, Float64}}: - (0.0, -2.0) - (2.0, 2.0) - (1.0, -1.0) - (1.3333333333333333, -0.22222222222222232) - (1.4285714285714286, 0.04081632653061229) - (1.4137931034482758, -0.0011890606420930094) - (1.41421143847487, -6.007286838860537e-6) - (1.4142135626888697, 8.931455575122982e-10) - (1.4142135623730947, -8.881784197001252e-16) - -julia> tracker = Roots.Tracks() -Algorithm has not been run - -julia> find_zero(sin, (3, 4), Roots. A42(), tracks=tracker) ≈ π +x₁ = 0, fx₁ = -2 +x₂ = 2, fx₂ = 2 +x₃ = 1, fx₃ = -1 +x₄ = 1.3333333333333333, fx₄ = -0.22222222222222232 +x₅ = 1.4285714285714286, fx₅ = 0.04081632653061229 +x₆ = 1.4137931034482758, fx₆ = -0.0011890606420930094 +x₇ = 1.4142114384748701, fx₇ = -6.0072868388605372e-06 +x₈ = 1.4142135626888697, fx₈ = 8.9314555751229818e-10 +x₉ = 1.4142135623730947, fx₉ = -8.8817841970012523e-16 + +julia> empty!(tracker) # resets + +julia> find_zero(sin, (3, 4), Roots.A42(), tracks=tracker) ≈ π true -julia> tracker.abₛ # stored as (aₙ, bₙ) pairs -4-element Vector{Tuple{Float64, Float64}}: - (3.0, 4.0) - (3.0, 3.5) - (3.14156188905231, 3.1416247553172956) - (3.141592653589793, 3.1415926535897936) +julia> get(tracker) +4-element Vector{NamedTuple{names, Tuple{Float64, Float64}} where names}: + (a = 3.0, b = 4.0) + (a = 3.0, b = 3.5) + (a = 3.14156188905231, b = 3.1416247553172956) + (a = 3.141592653589793, b = 3.1415926535897936) + +julia> last(tracker) +3.141592653589793 ``` +!!! note + As designed, the `Tracks` object may not record actions taken + while the state object is initialized. An example is the default + bisection algorithm where an initial midpoint is found to ensure + the bracket does not straddle ``0``. + """ mutable struct Tracks{T,S} <: AbstractTracks xfₛ::Vector{Tuple{T,S}} # (x,f(x)) @@ -121,13 +120,13 @@ mutable struct Tracks{T,S} <: AbstractTracks method nmethod end -Tracks(T, S) = Tracks(Tuple{T,S}[], Tuple{T,T}[], 0, 0, :not_converged, "", NaN*zero(T), nothing, nothing) +Tracks(T, S) = Tracks(Tuple{T,S}[], Tuple{T,T}[], 0, 0, :algorithm_not_run, "", nan(T), nothing, nothing) Tracks(s::AbstractUnivariateZeroState{T,S}) where {T,S} = Tracks(T, S) Tracks(verbose, tracks, state::AbstractUnivariateZeroState{T,S}) where {T,S} = (verbose && isa(tracks, NullTracks)) ? Tracks(T,S) : tracks Tracks() = Tracks(Float64, Float64) # give default -function log_step(l::Tracks, M::AbstractNonBracketing, state; init=false) +function log_step(l::Tracks, M::AbstractNonBracketingMethod, state; init=false) init && push!(l.xfₛ, (state.xn0, state.fxn0)) push!(l.xfₛ, (state.xn1, state.fxn1)) !init && log_iteration(l, 1) @@ -142,12 +141,52 @@ log_last(l::Tracks, α) = (l.alpha = α; nothing) log_method(l::Tracks, method) = (l.method = method; nothing) log_nmethod(l::Tracks, method) = (l.nmethod = method; nothing) -Base.show(io::IO, l::Tracks) = show_trace(io, l.method, l.nmethod, l) +# copy some API from ValueHistories +Base.first(l::AbstractTracks) = (@warn "No tracking information was kept"; nothing) +function Base.first(l::Tracks) + l.convergence_flag == :algorithm_not_run && error("Algorithm not run") + !isempty(l.xfₛ) && return (x₁= l.xfₛ[1][1], f₁= l.xfₛ[1][2]) + return (a₁= l.abₛ[1][1], b₁= l.abₛ[1][2]) +end +# return last value of algorithm +Base.last(l::AbstractTracks) = (@warn "No tracking information was kept"; nothing) +function Base.last(l::Tracks) + convergence_flag = l.convergence_flag + α = l.alpha + if convergence_flag == :algorithm_not_run + @warn "The algorithm has not run" + end + α +end + +# Returns all available observations. +Base.get(l::NullTracks) = (@warn "No tracking information was kept"; nothing) +function Base.get(l::Tracks) + xf = [(xn=xᵢ, fn = fᵢ) for (xᵢ, fᵢ) ∈ l.xfₛ] + ab = [(a=min(u,v), b=max(u,v)) for (u,v) ∈ l.abₛ] + vcat(xf, ab) +end +# reset tracker +Base.empty!(l::NullTracks) = nothing +function Base.empty!(l::Tracks{T,S}) where {T,S} + empty!(l.xfₛ) + empty!(l.abₛ) + l.steps = 0 + l.fncalls = 0 + l.convergence_flag = :algorithm_not_run + l.message = "" + l.alpha = nan(T) + l.method = l.nmethod = nothing + nothing +end + +Base.show(io::IO, l::Tracks) = show_trace(io, l.method, l.nmethod, l) function show_trace(io::IO, method, N, tracks) - if length(tracks.xfₛ) == 0 && length(tracks.abₛ) == 0 + flag = tracks.convergence_flag + if flag == :algorithm_not_run print(io, "Algorithm has not been run") return nothing end @@ -168,6 +207,8 @@ function show_trace(io::IO, method, N, tracks) tracks.convergence_flag == :f_converged && tracks.message == "" && println(io, "* stopped as |f(x_n)| ≤ max(δ, |x|⋅ϵ) using δ = atol, ϵ = rtol") + tracks.convergence_flag == :exact_zero && + println(io, "* stopped as f(x_n) = 0") tracks.message != "" && println(io, "* Note: $(tracks.message)") else println(io, "* Convergence failed: $(tracks.message)") @@ -185,10 +226,10 @@ function show_tracks(io::IO, s::Tracks, M::AbstractUnivariateZeroMethod) println( io, @sprintf( - "%s = % 18.16f,\t %s = % 18.16f", - "x_$(i-1)", + "%s%s = %.17g,\t %s%s = %.17g", + "x", sprint(io -> unicode_subscript(io, i)), float(xi), - "fx_$(i-1)", + "fx", sprint(io -> unicode_subscript(io, i)), float(fxi) ) ) @@ -201,9 +242,9 @@ function show_tracks(io::IO, s::Tracks, M::AbstractUnivariateZeroMethod) println( io, @sprintf( - "(%s, %s) = (% 18.16f, % 18.16f )", - "a_$(j-1)", - "b_$(j-1)", + "(%s%s, %s%s) = ( %.17g, %.17g )", + "a", sprint(io -> unicode_subscript(io, j-1)), + "b", sprint(io -> unicode_subscript(io, j-1)), a, b ) ) diff --git a/src/utils.jl b/src/utils.jl index a7635942..dd29686b 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -20,6 +20,7 @@ nan(::Type{Float16}) = NaN16 nan(::Type{Float32}) = NaN32 nan(::Type{Float64}) = NaN nan(x::T) where {T<:Number} = NaN * one(T) +nan(x::Type{T}) where {T <: Number} = NaN * one(T) nan(::Any) = NaN ## issue with approx derivative @@ -167,11 +168,12 @@ function identify_starting_point(a, b, sfxs) p1 end -## ---- - -function unicode_subscript(io, j) +## not used +function _unicode_subscript(io, j) a = ("⁻", "", "", "₀", "₁", "₂", "₃", "₄", "₅", "₆", "₇", "₈", "₉") for i in string(j) print(io, a[Int(i) - 44]) end end + +unicode_subscript(io, j) = _unicode_subscript.(Ref(io), reverse(digits(j))) diff --git a/test/test_allocations.jl b/test/test_allocations.jl index 964aa809..a5e344a7 100644 --- a/test/test_allocations.jl +++ b/test/test_allocations.jl @@ -12,7 +12,7 @@ import BenchmarkTools Order16(), Roots.Order1B(), Roots.Order2B(), -# Roots.Bisection(), + Roots.Bisection(), Roots.A42(), Roots.AlefeldPotraShi(), Roots.Brent(), diff --git a/test/test_bracketing.jl b/test/test_bracketing.jl index 4f46eed3..cb46ae48 100644 --- a/test/test_bracketing.jl +++ b/test/test_bracketing.jl @@ -219,7 +219,8 @@ avg(x) = sum(x) / length(x) ## test for evaluation counts, ideally not so low for these problems ## exact_bracket - Ms = [Roots.Brent(), Roots.A42(), Roots.AlefeldPotraShi(), Roots.Chandrapatla(), Roots.ITP(), Roots.Ridders(), Roots.Bisection()] + Ms = [Roots.Brent(), Roots.A42(), Roots.AlefeldPotraShi(), Roots.Chandrapatla(), Roots.ITP(), Roots.Ridders(), + Roots.Bisection()] results = [run_tests((f, b) -> find_zero(f, b, M), name="$M") for M in Ms] maxfailures = maximum([length(result.failures) for result in results]) maxresidual = maximum([result.maxresidual for result in results]) @@ -229,15 +230,15 @@ avg(x) = sum(x) / length(x) @test avg(cnts) <= 4700 - ## False position has failures, and larger residuals + ## False position has larger residuals (and failures until maxsteps is increased) Ms = [Roots.FalsePosition(i) for i in 1:12] results = [run_tests((f, b) -> find_zero(f, b, M), name="$M") for M in Ms] maxfailures = maximum([length(result.failures) for result in results]) maxresidual = maximum([result.maxresidual for result in results]) cnts = [result.evalcount for result in results] - @test maxfailures <= 10 + @test maxfailures <= 0 @test maxresidual <= 1e-5 - @test avg(cnts) <= 2500 + @test avg(cnts) <= 3000 end mutable struct Cnt @@ -286,7 +287,7 @@ end end @testset "Bracketing edge cases" begin - Ms = (Roots.Bisection(), Roots.A42(), Roots.AlefeldPotraShi()) + Ms = (Bisection(), Roots.A42(), Roots.AlefeldPotraShi()) # Endpoints can be infinite for M in Ms @@ -303,15 +304,17 @@ end end @test iszero(@inferred(find_zero(f, (-1, 1), Roots.Bisection()))) - @test_throws Roots.ConvergenceFailed find_zero(f, (-1, 1), Roots.A42()) - @test_throws Roots.ConvergenceFailed find_zero(f, (-1, 1), Roots.AlefeldPotraShi()) + # XXX changes with relaxed tolerance (adding non-zero xatol) + #@test_throws Roots.ConvergenceFailed find_zero(f, (-1, 1), Roots.A42()) + #@test_throws Roots.ConvergenceFailed find_zero(f, (-1, 1), Roots.AlefeldPotraShi()) # subnormals should still be okay - α = nextfloat(nextfloat(0.0)) - f = x -> x - α - for M in Ms - @test find_zero(f, (-1, 1), M) == α - end + + α = nextfloat(nextfloat(0.0)) + f = x -> x - α + for M in (Bisection(),) #Ms XXX NOT A42, AlefeldPotraShi with xatol !==0 + @test find_zero(f, (-1, 1), M) == α + end # with NaN, not Inf f = x -> abs(x) / x diff --git a/test/test_find_zero.jl b/test/test_find_zero.jl index 6f40c8e5..6a77b31f 100644 --- a/test/test_find_zero.jl +++ b/test/test_find_zero.jl @@ -7,7 +7,7 @@ Base.adjoint(f) = x -> ForwardDiff.derivative(f, float(x)); # for a user-defined method import Roots.Setfield import Roots.Setfield: @set! -struct Order3_Test <: Roots.AbstractSecant end +struct Order3_Test <: Roots.AbstractSecantMethod end ## Test the interface @testset "find_zero interface tests" begin @@ -44,15 +44,13 @@ struct Order3_Test <: Roots.AbstractSecant end @test @inferred(find_zero(sin, [3, 4])) ≈ π # Bisection() ## test tolerance arguments - ## xatol, xrtol, atol, rtol, maxevals, maxfneval, strict + ## xatol, xrtol, atol, rtol, maxevals, strict fn, xstar = x -> sin(x) - x + 1, 1.9345632107520243 x0, M = 20.0, Order2() @test find_zero(fn, x0, M) ≈ xstar # needs 16 iterations, 33 fn evaluations, difference is exact # test of maxevals @test_throws Roots.ConvergenceFailed find_zero(fn, x0, M, maxevals=2) - # test of maxfneval REMOVED - # @test_throws Roots.ConvergenceFailed find_zero(fn, x0, M, maxfnevals=2) # tolerance on f, atol, rtol: f(x) ~ 0 M = Order2() @@ -67,9 +65,11 @@ struct Order3_Test <: Roots.AbstractSecant end h = 1e-6 M = Roots.Bisection() tracks = Roots.Tracks(Float64, Float64) - @inferred(find_zero(fn, (a, b), M, tracks=tracks, xatol=h, xrtol=0.0)) - u, v = tracks.abₛ[end] - @test h >= abs(u - v) >= h / 2 + if VERSION >= v"1.6.0" + @inferred(find_zero(fn, (a, b), M, tracks=tracks, xatol=h, xrtol=0.0)) + u, v = tracks.abₛ[end] + @test h >= abs(u - v) >= h / 2 + end ## test of strict fn, x0 = x -> cos(x) - 1, pi / 4 @@ -150,7 +150,7 @@ end M = Roots.A42() G1 = Roots.Callable_Function(M, g1) state = @inferred(Roots.init_state(M, G1, x0_)) - options = @inferred(Roots.init_options(M, state)) + options = Roots.init_options(M, state) for M in (Roots.A42(), Roots.Bisection(), Roots.FalsePosition()) Gₘ = Roots.Callable_Function(M, G1) stateₘ = @inferred(Roots.init_state(M, state, Gₘ)) @@ -201,7 +201,7 @@ end fxs = f.(xs) M = Bisection() state = @inferred(Roots.init_state(M, f, xs..., fxs..., m=3.5, fm=f(3.5))) - @test @inferred(find_zero(M, f, state)) ≈ π + @test @inferred(solve!(init(M, f, state))) ≈ π # ## hybrid g1 = x -> exp(x) - x^4 @@ -209,7 +209,7 @@ end M = Roots.Bisection() G1 = Roots.Callable_Function(M, g1) state = @inferred(Roots.init_state(M, G1, x0_)) - options = @inferred(Roots.init_options(M, state, xatol=1 / 2)) + options = Roots.init_options(M, state, xatol=1 / 2) ZPI = @inferred(init(M, G1, state, options)) ϕ = iterate(ZPI) while ϕ !== nothing @@ -300,7 +300,9 @@ end end ## test broadcasting semantics with ZeroProblem - Z = ZeroProblem((x, p) -> cos(x) - p, pi / 4) + ## This assume parameters can be passed in a positional manner, a + ## style which is discouraged, as it is confusing + Z = ZeroProblem((x, p) -> cos(x) - x/p, pi / 4) @test all(solve.(Z, (1, 2)) .≈ (solve(Z, 1), solve(Z, 2))) end @@ -310,9 +312,7 @@ end Ms = [ Order0(), Order1(), - Roots.Order1B(), Order2(), - Roots.Order2B(), Order5(), Order8(), Order16(), @@ -335,7 +335,7 @@ end xn = find_zero(fn, x0, M) @test abs(fn(xn)) <= 1e-10 end - for M in [Order2(), Order5(), Order8(), Order16()] + for M in [Roots.Order1B(), Order2(), Roots.Order2B(), Order5(), Order8(), Order16()] @test_throws Roots.ConvergenceFailed find_zero(fn, x0, M, strict=true) end @@ -400,8 +400,7 @@ end state = Roots.init_state(M, F, x0) options = Roots.init_options(M, state) l = Roots.Tracks(state) - find_zero(M, F, state, options, l) - + solve(ZeroProblem(lhs, x0), M; tracks=l) @test l.steps <= 45 # 15 end test_94() @@ -411,11 +410,13 @@ end ## Use tolerance on f, not x with bisectoin atol = 0.01 - u = @inferred(find_zero(sin, (3, 4), atol=atol)) - @test atol >= abs(sin(u)) >= atol^2 + if VERSION >= v"1.6.0" + u = @inferred(find_zero(sin, (3, 4), atol=atol)) + @test atol >= abs(sin(u)) >= atol^2 - ## issue #159 bracket with zeros should be found - @test @inferred(find_zero(x -> x + 1, (-1, 1))) == -1 + ## issue #159 bracket with zeros should be found + @test @inferred(find_zero(x -> x + 1, (-1, 1))) == -1 + end ## issue #178 passinig through method @test fzero(sin, 3, 4, Roots.Brent()) ≈ π @@ -426,7 +427,7 @@ end end r = 0.05 xs = (r + 1e-12, 1.0) - @test @inferred(find_zero(x -> f(r) - f(x), xs, Roots.A42())) ≈ 0.4715797678171889 + @test find_zero(x -> f(r) - f(x), xs, Roots.A42()) ≈ 0.4715797678171889 end struct _SampleCallableObject end @@ -462,58 +463,6 @@ end end end -@testset "find_bracket test" begin - fs_xs = ((x -> x^5 - x - 1, (0, 2)), (sin, (3, 4)), (x -> exp(x) - x^4, (5, 20))) - - for (f, x0) in fs_xs - out = Roots.find_bracket(f, x0) - @test prod(f.(out.bracket)) <= 0 - end - - ## test size of bracket - for M in (Roots.BisectionExact(), Roots.A42(), Roots.AlefeldPotraShi()) - for (fn, b) in ( - (x -> x == 0.0 ? 0.0 : x / exp(1 / (x * x)), (-1.0, 4.0)), - (x -> exp(-15 * x) * (x - 1) + x^15, (0.0, 1.0)), - (x -> (-40.0) * x * exp(-x), (-9, 31)), - ) - l = Roots.find_bracket(fn, b, M) - @test l.exact || abs(l.bracket[2] - l.bracket[1]) <= eps(l.xstar) - end - end - - ## subnormal - x0 = nextfloat(nextfloat(0.0)) - fn = x -> (x - x0) - b = (0.0, 1.0) - m = Roots.__middle(b...) # 1e-154 - M = Roots.BisectionExact() - l = Roots.find_bracket(fn, b, M) - ## Here l.exact=true, but this checks the bracket size - ab = abs(l.bracket[2] - l.bracket[1]) - #@test !(ab) <= eps(l.xstar)) - @test abs(-(l.bracket...)) <= m - - M = Roots.A42() - l = Roots.find_bracket(fn, b, M) - ab = abs(l.bracket[2] - l.bracket[1]) - #@test !(ab <= eps(l.xstar)) - @test ab <= m - - M = Roots.AlefeldPotraShi() - l = Roots.find_bracket(fn, b, M) - ab = abs(l.bracket[1] - l.bracket[1]) - #@test !(ab <= eps(l.xstar)) - @test ab ≤ m - - x0 = nextfloat(0.0) - fn = x -> x <= 0.0 ? x - x0 : x + x0 - b = (-1.0, 1.0) - M = Roots.BisectionExact() - l = Roots.find_bracket(fn, b, M) - @test !l.exact && abs(-(l.bracket...)) <= maximum(eps.(l.bracket)) -end - @testset "function evalutions" begin function wrapper(f) cnt = 0 @@ -558,7 +507,7 @@ end Order16(), Roots.Order1B(), Roots.Order2B(), - Roots.BisectionExact(), + Roots.Bisection(), Roots.Brent(), Roots.Ridders(), Roots.ITP(), @@ -583,8 +532,10 @@ end end @testset "_extrema" begin - @test @inferred(Roots._extrema((π, 0))) === (0.0, Float64(π)) - @test @inferred(Roots._extrema([π, 0])) === (0.0, Float64(π)) + if VERSION >= v"1.6.0" + @test @inferred(Roots._extrema((π, 0))) === (0.0, Float64(π)) + @test @inferred(Roots._extrema([π, 0])) === (0.0, Float64(π)) + end @test_throws ArgumentError Roots._extrema(π) @test_throws ArgumentError Roots._extrema((π, π)) @test_throws ArgumentError Roots._extrema([π, π]) diff --git a/test/test_find_zeros.jl b/test/test_find_zeros.jl index 9e7e2af2..ef450e11 100644 --- a/test/test_find_zeros.jl +++ b/test/test_find_zeros.jl @@ -24,13 +24,13 @@ end xrts = find_zeros(F, -5, 20) @test length(xrts) == 3 @test all(azero.((F,), xrts)) - @test F.n <= 3000 + @test F.n <= 1500 #3000 F = CallableFunction(x -> cos(x) + cos(2x), 0) xrts = find_zeros(F, 0, 4pi) @test length(xrts) == 6 @test all(azero.((F,), xrts)) - @test F.n <= 5000 + @test F.n <= 2000 # 5000 T11(x) = 1024x^11 - 2816x^9 + 2816x^7 - 1232x^5 + 220x^3 - 11x U9(x) = 512x^9 - 1024x^7 + 672x^5 - 160x^3 + 10x @@ -39,13 +39,13 @@ end xrts = find_zeros(F, -1, 1) @test length(xrts) == 11 @test all(azero.((F,), xrts)) - @test F.n <= 10_000 + @test F.n <= 2500 # 10_000 F = CallableFunction(U9, 0) xrts = find_zeros(F, -1, 1) @test length(xrts) == 9 @test all(azero.((F,), xrts)) - @test F.n <= 10_000 + @test F.n <= 2500 # 10_000 W(n) = x -> prod(x - i for i in 1:n) Wi(n) = x -> prod((x - i)^i for i in 1:n) @@ -54,7 +54,7 @@ end xrts = find_zeros(F, -1, 21) @test length(xrts) == 20 @test all(azero.((F,), xrts)) - @test F.n <= 20_000 + @test F.n <= 4000 #20_000 F = CallableFunction(Wi(6), 0) xrts = find_zeros(F, -1, 7) diff --git a/test/test_fzero.jl b/test/test_fzero.jl index 2e8579fe..4d1d0c21 100644 --- a/test/test_fzero.jl +++ b/test/test_fzero.jl @@ -37,7 +37,6 @@ import Roots.fzero @test !(fzero(fn, x0, order=1, xatol=1e-4, atol=1) ≈ xstar) @test_throws Roots.ConvergenceFailed fzero(fn, x0, order=1, maxevals=3) @test !(fzero(fn, x0, order=1, maxevals=3, atol=1e-3) ≈ xstar) - ## @test !(fzero(fn, x0, order=1, maxfnevals=3, atol=1e-3) ≈ xstar) removed maxfnevals ## Infinities ## f(b) = Inf