Skip to content

Commit c6747d4

Browse files
authored
Require Julia 0.7 (#127)
* Move to Julia 0.7 * Explicitly call `convert(T, ::Number)` rather than `T(::Number)`
1 parent dc27e6e commit c6747d4

21 files changed

+123
-196
lines changed

.travis.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ os:
44
- linux
55
- osx
66
julia:
7-
- 0.6
7+
- 0.7
88
- nightly
99
matrix:
1010
allow_failures:

REQUIRE

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
1-
julia 0.6
2-
NLSolversBase 5.0
1+
julia 0.7-beta2
2+
NLSolversBase 7.0
33
Parameters
44
NaNMath
5-
Compat

appveyor.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
environment:
22
matrix:
3-
- JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.6/julia-0.6-latest-win32.exe"
4-
- JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.6/julia-0.6-latest-win64.exe"
3+
- JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.7/julia-0.7-latest-win32.exe"
4+
- JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.7/julia-0.7-latest-win64.exe"
55
matrix:
66
allow_failures:
77
- JULIA_URL: "https://julialangnightlies-s3.julialang.org/bin/winnt/x86/julia-latest-win32.exe"

docs/generate.jl

+4-3
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,13 @@ import Literate
33

44
# TODO: Remove items from `SKIPFILE` as soon as they run on the latest
55
# stable `Optim` (or other dependency)
6-
#ONLYSTATIC = ["optim_linesearch.jl", "optim_initialstep.jl"]
7-
ONLYSTATIC = []
6+
ONLYSTATIC = ["optim_linesearch.jl", "optim_initialstep.jl"]
7+
#ONLYSTATIC = []
88

99
EXAMPLEDIR = joinpath(@__DIR__, "src", "examples")
1010
GENERATEDDIR = joinpath(@__DIR__, "src", "examples", "generated")
11-
for example in filter!(r"\.jl$", readdir(EXAMPLEDIR))
11+
myfilter(str) = occursin(r"\.jl$", str)
12+
for example in filter!(myfilter, readdir(EXAMPLEDIR))
1213
input = abspath(joinpath(EXAMPLEDIR, example))
1314
script = Literate.script(input, GENERATEDDIR)
1415
code = strip(read(script, String))

docs/make.jl

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ makedocs(
1111
format = :html,
1212
sitename = "LineSearches.jl",
1313
doctest = false,
14-
# strict = VERSION.minor == 6 && sizeof(Int) == 8, # only strict mode on 0.6 and Int64
14+
# strict = VERSION.minor == 7 && sizeof(Int) == 8, # only strict mode on 0.7 and Int64
1515
strict = false,
1616
pages = Any[
1717
"Home" => "index.md",
@@ -27,7 +27,7 @@ makedocs(
2727
deploydocs(
2828
repo = "github.com/JuliaNLSolvers/LineSearches.jl.git",
2929
target = "build",
30-
julia = "0.6", # deploy from release bot
30+
julia = "0.7", # deploy from release bot
3131
deps = nothing,
3232
make = nothing,
3333
)

docs/src/examples/customoptimizer.jl

+4-4
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,11 @@ function gdoptimize(f, g!, fg!, x0::AbstractArray{T}, linesearch,
2828
ϕ(α) = f(x .+ α.*s)
2929
function (α)
3030
g!(gvec, x .+ α.*s)
31-
return vecdot(gvec, s)
31+
return dot(gvec, s)
3232
end
3333
function ϕdϕ(α)
3434
phi = fg!(gvec, x .+ α.*s)
35-
dphi = vecdot(gvec, s)
35+
dphi = dot(gvec, s)
3636
return (phi, dphi)
3737
end
3838

@@ -100,10 +100,10 @@ ls = BackTracking(order=3)
100100
fx_bt3, x_bt3, iter_bt3 = gdoptimize(f, g!, fg!, x0, ls)
101101

102102
## Test the results #src
103-
using Base.Test #src
103+
using Test #src
104104
@test fx_bt3 < 1e-12 #src
105105
@test iter_bt3 < 10000 #src
106-
@test x_bt3 [1.0, 1.0] atol=1e-7 #src
106+
@test x_bt3 [1.0, 1.0] atol=2e-7 #src
107107

108108
# Interestingly, the `StrongWolfe` line search converges in one iteration, whilst
109109
# all the other algorithms take thousands of iterations.

docs/src/examples/optim_initialstep.jl

+1-7
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,5 @@
11
# # Optim initial step length guess
22
#
3-
#src TODO: Find a way to run these with Literate when deploying via Travis
4-
#src TODO: This file must currently be run locally and not on CI, and then
5-
#src TODO: the md file must be copied over to the correct directory.
6-
#src TODO: The reason is that there may be breaking changes between Optim and LineSearches,
7-
#src TODO: so we don't want that to mess up JuliaCIBot
8-
#-
93
#-
104
#md # !!! tip
115
#md # This example is also available as a Jupyter notebook:
@@ -34,7 +28,7 @@ res_hz = Optim.optimize(prob.f, prob.g!, prob.h!, prob.initial_x, method=algo_hz
3428
# From the result we see that this has reduced the number of function and gradient calls, but increased the number of iterations.
3529

3630
## Test the results #src
37-
using Base.Test #src
31+
using Test #src
3832
@test Optim.f_calls(res_hz) < Optim.f_calls(res_st) #src
3933
@test Optim.g_calls(res_hz) < Optim.g_calls(res_st) #src
4034
@test Optim.iterations(res_hz) > Optim.iterations(res_st) #src

docs/src/examples/optim_linesearch.jl

+2-7
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,5 @@
11
# # Optim line search
22
#
3-
#src TODO: Find a way to run these with Literate when deploying via Travis
4-
#src TODO: This file must currently be run locally and not on CI, and then
5-
#src TODO: the md file must be copied over to the correct directory.
6-
#src TODO: The reason is that there may be breaking changes between Optim and LineSearches,
7-
#src TODO: so we don't want that to mess up JuliaCIBot
83
#-
94
#md # !!! tip
105
#md # This example is also available as a Jupyter notebook:
@@ -32,7 +27,7 @@ algo_bt3 = Newton(linesearch = BackTracking(order=3))
3227
res_bt3 = Optim.optimize(prob.f, prob.g!, prob.h!, prob.initial_x, method=algo_bt3)
3328

3429

35-
## Test the results #src
36-
using Base.Test #src
30+
## Test the results #src
31+
using Test #src
3732
@test Optim.f_calls(res_bt3) < Optim.f_calls(res_hz) #src
3833
@test Optim.g_calls(res_bt3) < Optim.g_calls(res_hz) #src

src/LineSearches.jl

+8-11
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,9 @@
1-
isdefined(Base, :__precompile__) && __precompile__()
1+
__precompile__()
22

33
module LineSearches
44

5-
using Compat,
6-
Compat.LinearAlgebra,
7-
Compat.Distributed,
8-
Compat.Printf
9-
5+
using Printf
6+
import LinearAlgebra: dot, norm
107
using Parameters, NaNMath
118

129
import NLSolversBase
@@ -38,7 +35,7 @@ function make_ϕdϕ(df, x_new, x, s)
3835
NLSolversBase.value_gradient!(df, x_new)
3936

4037
# Calculate ϕ(a_i), ϕ'(a_i)
41-
NLSolversBase.value(df), real(vecdot(NLSolversBase.gradient(df), s))
38+
NLSolversBase.value(df), real(dot(NLSolversBase.gradient(df), s))
4239
end
4340
ϕdϕ
4441
end
@@ -51,7 +48,7 @@ function make_ϕ_dϕ(df, x_new, x, s)
5148
NLSolversBase.gradient!(df, x_new)
5249

5350
# Calculate ϕ'(a_i)
54-
real(vecdot(NLSolversBase.gradient(df), s))
51+
real(dot(NLSolversBase.gradient(df), s))
5552
end
5653
make_ϕ(df, x_new, x, s), dϕ
5754
end
@@ -64,7 +61,7 @@ function make_ϕ_dϕ_ϕdϕ(df, x_new, x, s)
6461
NLSolversBase.gradient!(df, x_new)
6562

6663
# Calculate ϕ'(a_i)
67-
real(vecdot(NLSolversBase.gradient(df), s))
64+
real(dot(NLSolversBase.gradient(df), s))
6865
end
6966
function ϕdϕ(α)
7067
# Move a distance of alpha in the direction of s
@@ -74,7 +71,7 @@ function make_ϕ_dϕ_ϕdϕ(df, x_new, x, s)
7471
NLSolversBase.value_gradient!(df, x_new)
7572

7673
# Calculate ϕ'(a_i)
77-
NLSolversBase.value(df), real(vecdot(NLSolversBase.gradient(df), s))
74+
NLSolversBase.value(df), real(dot(NLSolversBase.gradient(df), s))
7875
end
7976
make_ϕ(df, x_new, x, s), dϕ, ϕdϕ
8077
end
@@ -87,7 +84,7 @@ function make_ϕ_ϕdϕ(df, x_new, x, s)
8784
NLSolversBase.value_gradient!(df, x_new)
8885

8986
# Calculate ϕ'(a_i)
90-
NLSolversBase.value(df), real(vecdot(NLSolversBase.gradient(df), s))
87+
NLSolversBase.value(df), real(dot(NLSolversBase.gradient(df), s))
9188
end
9289
make_ϕ(df, x_new, x, s), ϕdϕ
9390
end

src/backtracking.jl

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ function (ls::BackTracking)(df::AbstractObjective, x::AbstractArray{T}, s::Abstr
2929
dϕ_0 = ((0))
3030
end
3131

32-
α_0 = min(α_0, min(alphamax, ls.maxstep / vecnorm(s, Inf)))
32+
α_0 = min(α_0, min(alphamax, ls.maxstep / norm(s, Inf)))
3333
ls(ϕ, α_0, ϕ_0, dϕ_0)
3434
end
3535

src/hagerzhang.jl

+19-15
Original file line numberDiff line numberDiff line change
@@ -111,18 +111,19 @@ function (ls::HagerZhang)(ϕ, ϕdϕ,
111111
@unpack delta, sigma, alphamax, rho, epsilon, gamma,
112112
linesearchmax, psi3, display, mayterminate = ls
113113

114+
zeroT = convert(T, 0)
114115

115116
if !(isfinite(phi_0) && isfinite(dphi_0))
116117
throw(ArgumentError("Value and slope at step length = 0 must be finite."))
117118
end
118-
if dphi_0 >= T(0)
119+
if dphi_0 >= zeroT
119120
throw(ArgumentError("Search direction is not a direction of descent."))
120121
end
121122

122123
# Prevent values of x_new = x+αs that are likely to make
123124
# ϕ(x_new) infinite
124125
iterfinitemax::Int = ceil(Int, -log2(eps(T)))
125-
alphas = [T(0)] # for bisection
126+
alphas = [zeroT] # for bisection
126127
values = [phi_0]
127128
slopes = [dphi_0]
128129
if display & LINESEARCH > 0
@@ -131,7 +132,7 @@ function (ls::HagerZhang)(ϕ, ϕdϕ,
131132

132133

133134
phi_lim = phi_0 + epsilon * abs(phi_0)
134-
@assert c > T(0)
135+
@assert c > zeroT
135136
@assert isfinite(c) && c <= alphamax
136137
phi_c, dphi_c = ϕdϕ(c)
137138
iterfinite = 1
@@ -142,9 +143,9 @@ function (ls::HagerZhang)(ϕ, ϕdϕ,
142143
phi_c, dphi_c = ϕdϕ(c)
143144
end
144145
if !(isfinite(phi_c) && isfinite(dphi_c))
145-
warn("Failed to achieve finite new evaluation point, using alpha=0")
146+
@warn("Failed to achieve finite new evaluation point, using alpha=0")
146147
mayterminate[] = false # reset in case another initial guess is used next
147-
return T(0.0), ϕ(T(0.0)) # phi_0
148+
return zeroT, ϕ(zeroT) # phi_0
148149
end
149150
push!(alphas, c)
150151
push!(values, phi_c)
@@ -175,7 +176,7 @@ function (ls::HagerZhang)(ϕ, ϕdϕ,
175176
", phi_c = ", phi_c,
176177
", dphi_c = ", dphi_c)
177178
end
178-
if dphi_c >= T(0)
179+
if dphi_c >= zeroT
179180
# We've reached the upward slope, so we have b; examine
180181
# previous values to find a
181182
ib = length(alphas)
@@ -191,7 +192,7 @@ function (ls::HagerZhang)(ϕ, ϕdϕ,
191192
# have crested over the peak. Use bisection.
192193
ib = length(alphas)
193194
ia = ib - 1
194-
if c alphas[ib] || slopes[ib] >= T(0)
195+
if c alphas[ib] || slopes[ib] >= zeroT
195196
error("c = ", c)
196197
end
197198
# ia, ib = bisect(phi, lsr, ia, ib, phi_lim) # TODO: Pass options
@@ -226,7 +227,7 @@ function (ls::HagerZhang)(ϕ, ϕdϕ,
226227
if !(isfinite(phi_c) && isfinite(dphi_c))
227228
mayterminate[] = false # reset in case another initial guess is used next
228229
return cold, ϕ(cold)
229-
elseif dphi_c < T(0) && c == alphamax
230+
elseif dphi_c < zeroT && c == alphamax
230231
# We're on the edge of the allowed region, and the
231232
# value is still decreasing. This can be due to
232233
# roundoff error in barrier penalties, a barrier
@@ -352,7 +353,8 @@ function secant2!(ϕdϕ,
352353
dphi_a = slopes[ia]
353354
dphi_b = slopes[ib]
354355
T = eltype(slopes)
355-
if !(dphi_a < T(0) && dphi_b >= T(0))
356+
zeroT = convert(T, 0)
357+
if !(dphi_a < zeroT && dphi_b >= zeroT)
356358
error(string("Search direction is not a direction of descent; ",
357359
"this error may indicate that user-provided derivatives are inaccurate. ",
358360
@sprintf "(dphi_a = %f; dphi_b = %f)" dphi_a dphi_b))
@@ -436,10 +438,11 @@ function update!(ϕdϕ,
436438
a = alphas[ia]
437439
b = alphas[ib]
438440
T = eltype(slopes)
441+
zeroT = convert(T, 0)
439442
# Debugging (HZ, eq. 4.4):
440-
@assert slopes[ia] < T(0)
443+
@assert slopes[ia] < zeroT
441444
@assert values[ia] <= phi_lim
442-
@assert slopes[ib] >= T(0)
445+
@assert slopes[ib] >= zeroT
443446
@assert b > a
444447
c = alphas[ic]
445448
phi_c = values[ic]
@@ -456,7 +459,7 @@ function update!(ϕdϕ,
456459
if c < a || c > b
457460
return ia, ib #, 0, 0 # it's out of the bracketing interval
458461
end
459-
if dphi_c >= T(0)
462+
if dphi_c >= zeroT
460463
return ia, ic #, 0, 0 # replace b with a closer point
461464
end
462465
# We know dphi_c < 0. However, phi may not be monotonic between a
@@ -485,9 +488,10 @@ function bisect!(ϕdϕ,
485488
a = alphas[ia]
486489
b = alphas[ib]
487490
# Debugging (HZ, conditions shown following U3)
488-
@assert slopes[ia] < T(0)
491+
zeroT = convert(T, 0)
492+
@assert slopes[ia] < zeroT
489493
@assert values[ia] <= phi_lim
490-
@assert slopes[ib] < T(0) # otherwise we wouldn't be here
494+
@assert slopes[ib] < zeroT # otherwise we wouldn't be here
491495
@assert values[ib] > phi_lim
492496
@assert b > a
493497
while b - a > eps(b)
@@ -503,7 +507,7 @@ function bisect!(ϕdϕ,
503507
push!(slopes, gphi)
504508

505509
id = length(alphas)
506-
if gphi >= T(0)
510+
if gphi >= zeroT
507511
return ia, id # replace b, return
508512
end
509513
if phi_d <= phi_lim

0 commit comments

Comments
 (0)