versioninfo() using LinearAlgebra A = Float64.([4 12 -16; 12 37 -43; -16 -43 98]) # Cholesky without pivoting Achol = cholesky(Symmetric(A)) typeof(Achol) fieldnames(typeof(Achol)) # retrieve the lower triangular Cholesky factor Achol.L # retrieve the upper triangular Cholesky factor Achol.U b = [1.0; 2.0; 3.0] A \ b # this does LU; wasteful!; 2/3 n^3 + 2n^2 Achol \ b # two triangular solves; only 2n^2 flops det(A) # this actually does LU; wasteful! det(Achol) # cheap inv(A) # this does LU! inv(Achol) using Random Random.seed!(123) # seed A = randn(5, 3) A = A * transpose(A) # A has rank 3 Achol = cholesky(A, Val(true)) # 2nd argument requests pivoting Achol = cholesky(A, Val(true), check=false) # turn off checking pd rank(Achol) # determine rank from Cholesky factor rank(A) # determine rank from SVD, which is more numerically stable Achol.L Achol.U Achol.p # P A P' = L U norm(Achol.P * A * Achol.P - Achol.L * Achol.U) # this is a person w/o numerical analsyis training function logpdf_mvn_1(y::Vector, Σ::Matrix) n = length(y) - (n//2) * log(2π) - (1//2) * logdet(Σ) - (1//2) * transpose(y) * inv(Σ) * y end # this is an efficiency-savvy person function logpdf_mvn_2(y::Vector, Σ::Matrix) n = length(y) Σchol = cholesky(Symmetric(Σ)) - (n//2) * log(2π) - (1//2) * logdet(Σchol) - (1//2) * sum(abs2, Σchol.L \ y) end # better memory efficiency function logpdf_mvn_3(y::Vector, Σ::Matrix) n = length(y) Σchol = cholesky!(Symmetric(Σ)) - (n//2) * log(2π) - (1//2) * logdet(Σchol) - (1//2) * dot(y, Σchol \ y) end using BenchmarkTools, Distributions, Random Random.seed!(123) # seed n = 1000 # a pd matrix Σ = convert(Matrix{Float64}, Symmetric([i * (n - j + 1) for i in 1:n, j in 1:n])) y = rand(MvNormal(Σ)) # one random sample from N(0, Σ) # at least they give same answer @show logpdf_mvn_1(y, Σ) @show logpdf_mvn_2(y, Σ) @show logpdf_mvn_3(y, Σ); @benchmark logpdf_mvn_1(y, Σ) @benchmark logpdf_mvn_2(y, Σ) @benchmark logpdf_mvn_3(y, Σ)