** Naoyuki Tsuchiya **
Department of Economics, University of Tokyo
From Miranda and Fackler, Applied Computational Economics and Finance, 2002, Section 7.6.3
Julia
translation of the Python version
maxage = 5 # Maximum asset age
repcost = 75 # Replacement cost
mancost = 10 # Maintainance cost
beta = 0.9 # Discount factor
m = 3 # Number of actions; 1: keep, 2: service, 3: replace ;
S = vec([(j, i) for i = 0:maxage-1, j = 1:maxage])
S # State space
25-element Array{Tuple{Int64,Int64},1}: (1, 0) (1, 1) (1, 2) (1, 3) (1, 4) (2, 0) (2, 1) (2, 2) (2, 3) (2, 4) (3, 0) (3, 1) (3, 2) (3, 3) (3, 4) (4, 0) (4, 1) (4, 2) (4, 3) (4, 4) (5, 0) (5, 1) (5, 2) (5, 3) (5, 4)
n = length(S) # Number of states
25
# We need a routine to get the index of a age-serv pair
function getindex_0(age, serv, S)
for i in 1:n
if S[i][1] == age && S[i][2] == serv
return i
end
end
end
getindex_0 (generic function with 1 method)
# Profit function as a function of the age and the number of service
function p(age, serv)
return (1 - (age - serv)/5) * (50 - 2.5 * age - 2.5 * age^2)
end
p (generic function with 1 method)
# Reward array
R = Array{Float64}(n, m)
for i in 1:n
R[i, 1] = p(S[i][1], S[i][2])
R[i, 2] = p(S[i][1], S[i][2]+1) - mancost
R[i, 3] = p(0, 0) - repcost
end
# Infeasible actions
for j in n-maxage+1:n
R[j, 1] = -Inf
R[j, 2] = -Inf
end
R
25×3 Array{Float64,2}: 36.0 35.0 -25.0 45.0 44.0 -25.0 54.0 53.0 -25.0 63.0 62.0 -25.0 72.0 71.0 -25.0 21.0 18.0 -25.0 28.0 25.0 -25.0 35.0 32.0 -25.0 42.0 39.0 -25.0 49.0 46.0 -25.0 8.0 2.0 -25.0 12.0 6.0 -25.0 16.0 10.0 -25.0 20.0 14.0 -25.0 24.0 18.0 -25.0 0.0 -10.0 -25.0 0.0 -10.0 -25.0 0.0 -10.0 -25.0 0.0 -10.0 -25.0 0.0 -10.0 -25.0 -Inf -Inf -25.0 -Inf -Inf -25.0 -Inf -Inf -25.0 -Inf -Inf -25.0 -Inf -Inf -25.0
# (Degenerate) transition probability array
Q = zeros(n, m, n)
for i in 1:n
Q[i, 1, getindex_0(min(S[i][1]+1, maxage), S[i][2], S)] = 1
Q[i, 2, getindex_0(min(S[i][1]+1, maxage), min(S[i][2]+1, maxage-1), S)] = 1
Q[i, 3, getindex_0(1, 0, S)] = 1
end
using QuantEcon
# Create a DiscreteDP
ddp = DiscreteDP(R, Q, beta);
# Solve the dynamic optimization problem (by policy iteration)
res = solve(ddp, PFI);
# Number of iterations
res.num_iter
# Optimal policy
res.sigma
25-element Array{Int64,1}: 2 2 2 2 1 1 2 2 2 1 3 1 1 1 1 3 3 3 3 3 3 3 3 3 3
# Optimal actions for reachable states
for i in 1:n
if S[i][1] > S[i][2]
print(S[i], res.sigma[i])
end
end
(1, 0)2(2, 0)1(2, 1)2(3, 0)3(3, 1)1(3, 2)1(4, 0)3(4, 1)3(4, 2)3(4, 3)3(5, 0)3(5, 1)3(5, 2)3(5, 3)3(5, 4)3
# Simulate the controlled Markov chain
mc = MarkovChain(res.mc.p, S)
Discrete Markov Chain stochastic matrix of type Array{Float64,2}: [0.0 0.0 … 0.0 0.0; 0.0 0.0 … 0.0 0.0; … ; 1.0 0.0 … 0.0 0.0; 1.0 0.0 … 0.0 0.0]
initial_state_value = getindex_0(1, 0, S)
nyrs = 12
spath = simulate(mc, nyrs+1, init=initial_state_value)
13-element Array{Tuple{Int64,Int64},1}: (1, 0) (2, 1) (3, 2) (4, 2) (1, 0) (2, 1) (3, 2) (4, 2) (1, 0) (2, 1) (3, 2) (4, 2) (1, 0)
using Plots
plotlyjs()
Plotly javascript loaded.
To load again call
init_notebook(true)
Plots.PlotlyJSBackend()
spath_1 = Vector{Int}(nyrs)
for i in 1:nyrs
spath_1[i] = spath[i][1]
end
spath_2 = Vector{Int}(nyrs)
for j in 1:nyrs
spath_2[j] = spath[j][2]
end
y = spath_1
plot(0:nyrs, y)
title!("Optimal State Path: Age of Asset")
xaxis!("Year")
yaxis!("Age of Asset", 1:0.5:4)
y = spath_2
plot(0:nyrs, y)
title!("Optimal State Path: Number of Servicings")
xaxis!("Year")
yaxis!("Number of Servicings", 0:0.5:2)