using PyPlot, Interact, LinearAlgebra, SparseArrays, Arpack A1 = randn(500,500) A2 = randn(1000,1000) lu(A1); # do it once to make sure it is compiled @elapsed(lu(A2)) / @elapsed(lu(A1)) qr(A1); # do it once to make sure it is compiled @elapsed(qr(A2)) / @elapsed(qr(A1)) eigen(A1); # do it once to make sure it is compiled @elapsed(eigen(A2)) / @elapsed(eigen(A1)) @time lu(A2) @time eigen(A2); A = [ 2 -1 0 0 0 0 -1 2 -1 0 0 0 0 -1 2 -1 0 0 0 0 -1 2 -1 0 0 0 0 -1 2 -1 0 0 0 0 -1 2] sparse(A) function spdiagm_nonsquare(m, n, args...) I, J, V = SparseArrays.spdiagm_internal(args...) return sparse(I, J, V, m, n) end # compute the first-derivative finite-difference matrix # for Dirichlet boundaries, given a grid x[:] of x points # (including the endpoints where the function = 0!). function sdiff1(x) N = length(x) - 2 dx1 = Float64[1/(x[i+1] - x[i]) for i = 1:N] dx2 = Float64[-1/(x[i+1] - x[i]) for i = 2:N+1] return spdiagm_nonsquare(N+1,N, 0=>dx1, -1=>dx2) end # compute the -∇⋅ c ∇ operator for a function c(x,y) # and arrays x[:] and y[:] of the x and y points, # including the endpoints where functions are zero # (i.e. Dirichlet boundary conditions). function Laplacian(x, y, c = (x,y) -> 1.0) Dx = sdiff1(x) Nx = size(Dx,2) Dy = sdiff1(y) Ny = size(Dy,2) # discrete Gradient operator: G = [kron(sparse(I,Ny,Ny), Dx); kron(Dy, sparse(I,Nx,Nx))] # grids for derivatives in x and y directions x′ = [0.5*(x[i]+x[i+1]) for i = 1:length(x)-1] y′ = [0.5*(y[i]+y[i+1]) for i = 1:length(y)-1] # evaluate c(x) C = spdiagm(0 => [ vec([c(X,Y) for X in x′, Y in y[2:end-1]]); vec([c(X,Y) for X in x[2:end-1], Y in y′]) ]) return G' * C * G # -∇⋅ c ∇ end N = 400 x = range(-1,stop=1,length=N+2)[2:end-1] y = x' # a row vector r = sqrt.(x.^2 .+ y.^2) # use broadcasting (.+) to make Nx x Ny matrix of radii θ = atan.(y, x) # and angles φ = @. exp(-(r - θ*0.5/π - 0.5)^2 / 0.3^2) - 0.5 imshow(φ .> 0, extent=[-1,1,-1,1], cmap="binary") title("the domain") x0 = range(-1,1,length=N+2) # includes boundary points, unlike x Abox = Laplacian(x0, x0, (x,y) -> 1.0); i = findall(vec(φ) .> 0) A = Abox[i,i] size(A) nnz(A) / length(A) nnz(A) / size(A,1) u = zeros(N,N) @time λ, X = eigs(A, nev=20, which=:SM); f = figure(figsize=(10,4)) for which_eig2 in 1:2:20 display(withfig(f) do for which_eig = which_eig2:which_eig2+1 subplot(1,2,which_eig+1-which_eig2) u[i] = X[:,which_eig] umax = maximum(abs, u) imshow(u, extent=[-1,1,-1,1], vmin=-umax,vmax=+umax, cmap="RdBu") title("eigenvalue n=$which_eig, \$\\lambda\$ = $(λ[which_eig])") end end) end """ Return `(A,Nx,Ny,x,y)` for the 2d Helmholtz problem. """ function Helmholtz2d(Lx, Ly, ε, ω; dpml=2, resolution=20, Rpml=1e-20) # PML σ = σ₀ x²/dpml², with σ₀ chosen so that # the round-trip reflection is Rpml: σ₀ = -log(Rpml) / (4dpml/3) M = round(Int, (Lx+2dpml) * resolution) N = round(Int, (Ly+2dpml) * resolution) dx = (Lx+2dpml) / (M+1) dy = (Ly+2dpml) / (N+1) x = (1:M) * dx .- Lx/2 .- dpml # x grid, centered y = (1:N) * dy .- Ly/2 .- dpml # y grid, centered # 1st-derivative matrix x′ = ((0:M) .+ 0.5)*dx # 1st-derivative grid points y′ = ((0:N) .+ 0.5)*dy # 1st-derivative grid points ox = fill(1/dx, M) oy = fill(1/dy, N) σx = [ξ < dpml ? σ₀*(dpml-ξ)^2 : ξ > Lx+dpml ? σ₀*(ξ-(Lx+dpml))^2 : 0.0 for ξ in x′] σy = [ξ < dpml ? σ₀*(dpml-ξ)^2 : ξ > Ly+dpml ? σ₀*(ξ-(Ly+dpml))^2 : 0.0 for ξ in y′] Dx = spdiagm(0=>inv.(@. 1+(im/ω)*σx)) * spdiagm_nonsquare(M+1,M, -1=>-ox, 0=>ox) Dy = spdiagm(0=>inv.(@. 1+(im/ω)*σy)) * spdiagm_nonsquare(N+1,N, -1=>-oy, 0=>oy) Ix = sparse(I,M,M) Iy = sparse(I,N,N) return (kron(Ix, transpose(Dy)*Dy) + kron(transpose(Dx)*Dx, Iy) - spdiagm(0=>vec([ω^2 * ε(ξ, ζ) for ζ in y, ξ in x])), M, N, x, y) end A, Nx, Ny, x, y = Helmholtz2d(20,20, (x,y) -> hypot(x,y) < 0.5 ? 12 : 1, 2π) size(A), nnz(A) / size(A,1) nnz(A)/length(A) b = zeros(Nx,Ny) b[Nx÷2, Ny÷4] = 1 @time u = reshape(A \ reshape(b, Nx*Ny), Nx, Ny) s = maximum(abs, real(u)) / 10 imshow(real(u), cmap="RdBu", vmin=-s, vmax=s, extent=(minimum(x),maximum(x),minimum(y),maximum(y))) function SD(A, b, x=0.0*b; tol=1e-8, maxiters=1000) bnorm = norm(b) r = b - A*x # initial residual rnorm = [norm(r)] # return the array of residual norms Ad = similar(r) # allocate space for Ad for i = 1:maxiters d = r # use the steepest-descent direction mul!(Ad, A, d) # store matvec A*d in-place in Ad α = dot(d, r) / dot(d, Ad) @. x = x + α * d # @. is a way to get this in a single loop in-place @. r = r - α * Ad # update the residual (without computing A*x again) push!(rnorm, norm(r)) rnorm[end] ≤ tol*bnorm && break # converged end return x, rnorm end A = rand(100,100); A = A'*A # a random SPD matrix b = rand(100) x, rnorm = SD(A, b, maxiters=1000) length(rnorm), rnorm[end]/norm(b) semilogy(rnorm) title("steepest-descent convergence") ylabel("residual norm") xlabel("iterations") θ = 0.9 # chosen to make a nice-looking plot Q = [cos(θ) sin(θ); -sin(θ) cos(θ)] # 2x2 rotation by θ A = Q * diagm(0=>[10,1]) * Q' # a 2x2 matrix with eigenvalues 10,1 b = A * [1,1] # right-hand side for solution (1,1) x1 = range(-11,11,length=100) contour(x1', x1, [dot([x1,x2], A*[x1,x2]) - 2*(x1*b[1]+x2*b[2]) for x1 in x1, x2 in x1], levels=range(1,2000,length=100)) plot(1,1, "r*") x1s = Float64[] x2s = Float64[] for i = 0:20 x, = SD(A, b, [-10.,-10.], maxiters=i) push!(x1s, x[1]) push!(x2s, x[2]) end plot(x2s, x1s, "k.-") title("convergence of 2x2 steepest-descent") function CG(A, b, x=0.0*b; tol=1e-8, maxiters=1000) bnorm = norm(b) r = b - A*x # initial residual rnorm = [norm(r)] # return the array of residual norms d = copy(r) # initial direction is just steepest-descent Ad = similar(r) # allocate space for Ad for i = 1:maxiters mul!(Ad, A, d) # store matvec A*d in-place in Ad α = dot(d, r) / dot(d, Ad) @. x = x + α * d # @. is a way to get this in a single loop in-place @. r = r - α * Ad # update the residual (without computing A*x again) push!(rnorm, norm(r)) @. d = r + d * (rnorm[end]/rnorm[end-1])^2 # conjugate direction update rnorm[end] ≤ tol*bnorm && break # converged end return x, rnorm end A = rand(100,100); A = A'*A # a random SPD matrix b = rand(100) x, rnorm = CG(A, b) length(rnorm), rnorm[end]/norm(b) semilogy(rnorm) title("conjugate-gradient convergence") ylabel("residual norm") xlabel("iterations") θ = 0.9 # chosen to make a nice-looking plot Q = [cos(θ) sin(θ); -sin(θ) cos(θ)] # 2x2 rotation by θ A = Q * diagm(0=>[10,1]) * Q' # a 2x2 matrix with eigenvalues 10,1 b = A * [1,1] # right-hand side for solution (1,1) x1 = range(-11,11,length=100) contour(x1', x1, [dot([x1,x2], A*[x1,x2]) - 2*(x1*b[1]+x2*b[2]) for x1 in x1, x2 in x1], levels=range(1,2000,length=100)) plot(1,1, "r*") x1s = Float64[] x2s = Float64[] for i = 0:2 x, = CG(A, b, [-10.,-10.], maxiters=i) push!(x1s, x[1]) push!(x2s, x[2]) end plot(x2s, x1s, "k.-") title("convergence of 2x2 conjugate-gradient") # do this if you haven't installed it yet: ] add IterativeSolvers using IterativeSolvers A = rand(100,100); A = A'*A # a random SPD matrix b = rand(100) x, ch = cg(A, b, maxiter=300, log=true) norm(A*x - b) / norm(b) semilogy(ch[:resnorm]) xlabel("iteration") ylabel(L"\Vert \mathrm{residual} \Vert") title("convergence of conjugate gradient") n = 300 L = SymTridiagonal(fill(2.0,n), fill(-1.0, n-1)) b = rand(n) S = sprand(n,n,0.001) * 0.1 # a small random, sparse perturbation S = S'*S # needs to be symmetric positive-definite A = sparse(L + S) x, ch = cg(A, b, maxiter=300, log=true) x′, ch′ = cg(A, b, Pl=ldlt(L), maxiter=300, log=true) semilogy(ch[:resnorm], "b-") semilogy(ch′[:resnorm], "r-") xlabel("iterations") ylabel("residual norm") legend(["no preconditioner", "preconditioner"], loc="lower right") semilogy(sort!(eigvals(Matrix{Float64}(A))), "b.") semilogy(sort!(eigvals(Matrix{Float64}(A),Matrix{Float64}(L))), "r.") xlabel(L"index $k$ of eigenvalue $\lambda_k$") ylabel(L"eigenvalues $\lambda_k$") legend([L"unpreconditioned $A$", L"preconditioned $L^{-1} A$"])