-
Notifications
You must be signed in to change notification settings - Fork 106
/
Copy pathcg.jl
152 lines (123 loc) · 4.4 KB
/
cg.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
module TestCG
using IterativeSolvers
using LinearMaps
using Test
using LinearAlgebra
using SparseArrays
using Random
import LinearAlgebra.ldiv!
include("laplace_matrix.jl")
struct JacobiPrec
diagonal
end
ldiv!(y, P::JacobiPrec, x) = y .= x ./ P.diagonal
@testset "Conjugate Gradients" begin
Random.seed!(1234321)
@testset "Small full system" begin
n = 10
@testset "Matrix{$T}, conjugated dot product" for T in (Float32, Float64, ComplexF32, ComplexF64)
A = rand(T, n, n)
A = A' * A + I
b = rand(T, n)
reltol = √eps(real(T))
x,ch = cg(A, b; reltol=reltol, maxiter=2n, log=true)
@test isa(ch, ConvergenceHistory)
@test A*x ≈ b rtol=reltol
@test ch.isconverged
# If you start from the exact solution, you should converge immediately
x,ch = cg!(A \ b, A, b; abstol=2n*eps(real(T)), reltol=zero(real(T)), log=true)
@test niters(ch) ≤ 1
@test nprods(ch) ≤ 2
# Test with cholfact as preconditioner should converge immediately
F = cholesky(A, Val(false))
x,ch = cg(A, b; Pl=F, log=true)
@test niters(ch) ≤ 2
@test nprods(ch) ≤ 2
# All-zeros rhs should give all-zeros lhs
x0 = cg(A, zeros(T, n))
@test x0 == zeros(T, n)
end
@testset "Matrix{$T}, unconjugated dot product" for T in (Float32, Float64, ComplexF32, ComplexF64)
A = rand(T, n, n)
A = A + transpose(A) + 15I
x = ones(T, n)
b = A * x
reltol = √eps(real(T))
# Solve without preconditioner
x1, his1 = cocg(A, b, reltol = reltol, maxiter = 100, log = true)
@test isa(his1, ConvergenceHistory)
@test A*x1 ≈ b rtol=reltol
# With an initial guess
x_guess = rand(T, n)
x2, his2 = cocg!(x_guess, A, b, reltol = reltol, maxiter = 100, log = true)
@test isa(his2, ConvergenceHistory)
@test x2 == x_guess
@test A*x2 ≈ b rtol=reltol
# Do an exact LU decomp of a nearby matrix
F = lu(A + rand(T, n, n))
x3, his3 = cocg(A, b, Pl = F, maxiter = 100, reltol = reltol, log = true)
@test A*x3 ≈ b rtol=reltol
end
end
@testset "Sparse Laplacian" begin
A = laplace_matrix(Float64, 10, 2)
P = JacobiPrec(diag(A))
rhs = randn(size(A, 2))
rmul!(rhs, inv(norm(rhs)))
abstol = 1e-5
reltol = 1e-5
@testset "SparseMatrixCSC{$T, $Ti}" for T in (Float64, Float32), Ti in (Int64, Int32)
xCG = cg(A, rhs; reltol=reltol, maxiter=100)
xJAC = cg(A, rhs; Pl=P, reltol=reltol, maxiter=100)
@test A*xCG ≈ rhs rtol=reltol
@test A*xJAC ≈ rhs rtol=reltol
end
Af = LinearMap(A)
@testset "Function" begin
xCG = cg(Af, rhs; reltol=reltol, maxiter=100)
xJAC = cg(Af, rhs; Pl=P, reltol=reltol, maxiter=100)
@test A*xCG ≈ rhs rtol=reltol
@test A*xJAC ≈ rhs rtol=reltol
end
@testset "Function with specified starting guess" begin
x0 = randn(size(rhs))
xCG, hCG = cg!(copy(x0), Af, rhs; abstol=abstol, reltol=0.0, maxiter=100, log=true)
xJAC, hJAC = cg!(copy(x0), Af, rhs; Pl=P, abstol=abstol, reltol=0.0, maxiter=100, log=true)
@test A*xCG ≈ rhs rtol=reltol
@test A*xJAC ≈ rhs rtol=reltol
@test niters(hJAC) == niters(hCG)
end
end
@testset "CG with a view" begin
A = rand(10, 10)
A = A + A' + 100I
x = view(rand(10, 2), :, 1)
b = rand(10)
x, hist = cg!(x, A, b, log = true)
@test hist.isconverged
end
@testset "Termination criterion" begin
for T in (Float32, Float64, ComplexF32, ComplexF64)
A = T[ 2 -1 0
-1 2 -1
0 -1 2]
n = size(A, 2)
b = ones(T, n)
x0 = A \ b
perturbation = 10 * sqrt(eps(real(T))) * T[(-1)^i for i in 1:n]
# If the initial residual is small and a small relative tolerance is used,
# many iterations are necessary
x = x0 + perturbation
initial_residual = norm(A * x - b)
x, ch = cg!(x, A, b, log=true)
@test 2 ≤ niters(ch) ≤ n
# If the initial residual is small and a large absolute tolerance is used,
# no iterations are necessary
x = x0 + perturbation
initial_residual = norm(A * x - b)
x, ch = cg!(x, A, b, abstol=2*initial_residual, reltol=zero(real(T)), log=true)
@test niters(ch) == 0
end
end
end
end # module