!pip install --upgrade quantecon import numpy as np import quantecon as qe import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D %matplotlib inline # Set discount factor β = 0.95 def construct_arrays1(f1_vals=[1. ,1.], f2_vals=[1., 1.], d_vals=[1., 1.]): """ Construct matrices that map the problem described in example 1 into a Markov jump linear quadratic dynamic programming problem """ # Number of Markov states m = len(f1_vals) # Number of state and control variables n, k = 2, 1 # Construct sets of matrices for each state As = [np.eye(n) for i in range(m)] Bs = [np.array([[1, 0]]).T for i in range(m)] Rs = np.zeros((m, n, n)) Qs = np.zeros((m, k, k)) for i in range(m): Rs[i, 0, 0] = f2_vals[i] Rs[i, 1, 0] = - f1_vals[i] / 2 Rs[i, 0, 1] = - f1_vals[i] / 2 Qs[i, 0, 0] = d_vals[i] Cs, Ns = None, None # Compute the optimal k level of the payoff function in each state k_star = np.empty(m) for i in range(m): k_star[i] = f1_vals[i] / (2 * f2_vals[i]) return Qs, Rs, Ns, As, Bs, Cs, k_star state_vec1 = ["k", "constant term"] # Construct Markov transition matrix Π1 = np.array([[0., 1.], [1., 0.]]) # Construct matrices Qs, Rs, Ns, As, Bs, Cs, k_star = construct_arrays1(d_vals=[1., 0.5]) # Construct a Markov Jump LQ problem ex1_a = qe.LQMarkov(Π1, Qs, Rs, As, Bs, Cs=Cs, Ns=Ns, beta=β) # Solve for optimal value functions and decision rules ex1_a.stationary_values(); # P(s) ex1_a.Ps # d(s) = 0, since there is no randomness ex1_a.ds # F(s) ex1_a.Fs # Plot the optimal decision rules k_grid = np.linspace(0., 1., 100) # Optimal choice in state s1 u1_star = - ex1_a.Fs[0, 0, 1] - ex1_a.Fs[0, 0, 0] * k_grid # Optimal choice in state s2 u2_star = - ex1_a.Fs[1, 0, 1] - ex1_a.Fs[1, 0, 0] * k_grid fig, ax = plt.subplots() ax.plot(k_grid, k_grid + u1_star, label="$\overline{s}_1$ (high)") ax.plot(k_grid, k_grid + u2_star, label="$\overline{s}_2$ (low)") # The optimal k* ax.scatter([0.5, 0.5], [0.5, 0.5], marker="*") ax.plot([k_star[0], k_star[0]], [0., 1.0], '--') # 45 degree line ax.plot([0., 1.], [0., 1.], '--', label="45 degree line") ax.set_xlabel("$k_t$") ax.set_ylabel("$k_{t+1}$") ax.legend() plt.show() # Compute time series T = 20 x0 = np.array([[0., 1.]]).T x_path = ex1_a.compute_sequence(x0, ts_length=T)[0] fig, ax = plt.subplots() ax.plot(range(T), x_path[0, :-1]) ax.set_xlabel("$t$") ax.set_ylabel("$k_t$") ax.set_title("Optimal path of $k_t$") plt.show() λ = 0.8 # high λ Π2 = np.array([[1-λ, λ], [λ, 1-λ]]) ex1_b = qe.LQMarkov(Π2, Qs, Rs, As, Bs, Cs=Cs, Ns=Ns, beta=β) ex1_b.stationary_values(); ex1_b.Fs λ = 0.2 # low λ Π2 = np.array([[1-λ, λ], [λ, 1-λ]]) ex1_b = qe.LQMarkov(Π2, Qs, Rs, As, Bs, Cs=Cs, Ns=Ns, beta=β) ex1_b.stationary_values(); ex1_b.Fs λ_vals = np.linspace(0., 1., 10) F1 = np.empty((λ_vals.size, 2)) F2 = np.empty((λ_vals.size, 2)) for i, λ in enumerate(λ_vals): Π2 = np.array([[1-λ, λ], [λ, 1-λ]]) ex1_b = qe.LQMarkov(Π2, Qs, Rs, As, Bs, Cs=Cs, Ns=Ns, beta=β) ex1_b.stationary_values(); F1[i, :] = ex1_b.Fs[0, 0, :] F2[i, :] = ex1_b.Fs[1, 0, :] for i, state_var in enumerate(state_vec1): fig, ax = plt.subplots() ax.plot(λ_vals, F1[:, i], label="$\overline{s}_1$", color="b") ax.plot(λ_vals, F2[:, i], label="$\overline{s}_2$", color="r") ax.set_xlabel("$\lambda$") ax.set_ylabel("$F_{s_t}$") ax.set_title(f"Coefficient on {state_var}") ax.legend() plt.show() λ, δ = 0.8, 0.2 Π3 = np.array([[1-λ, λ], [δ, 1-δ]]) ex1_b = qe.LQMarkov(Π3, Qs, Rs, As, Bs, Cs=Cs, Ns=Ns, beta=β) ex1_b.stationary_values(); ex1_b.Fs λ_vals = np.linspace(0., 1., 10) δ_vals = np.linspace(0., 1., 10) λ_grid = np.empty((λ_vals.size, δ_vals.size)) δ_grid = np.empty((λ_vals.size, δ_vals.size)) F1_grid = np.empty((λ_vals.size, δ_vals.size, len(state_vec1))) F2_grid = np.empty((λ_vals.size, δ_vals.size, len(state_vec1))) for i, λ in enumerate(λ_vals): λ_grid[i, :] = λ δ_grid[i, :] = δ_vals for j, δ in enumerate(δ_vals): Π3 = np.array([[1-λ, λ], [δ, 1-δ]]) ex1_b = qe.LQMarkov(Π3, Qs, Rs, As, Bs, Cs=Cs, Ns=Ns, beta=β) ex1_b.stationary_values(); F1_grid[i, j, :] = ex1_b.Fs[0, 0, :] F2_grid[i, j, :] = ex1_b.Fs[1, 0, :] for i, state_var in enumerate(state_vec1): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # high adjustment cost, blue surface ax.plot_surface(λ_grid, δ_grid, F1_grid[:, :, i], color="b") # low adjustment cost, red surface ax.plot_surface(λ_grid, δ_grid, F2_grid[:, :, i], color="r") ax.set_xlabel("$\lambda$") ax.set_ylabel("$\delta$") ax.set_zlabel("$F_{s_t}$") ax.set_title(f"coefficient on {state_var}") plt.show() def run(construct_func, vals_dict, state_vec): """ A Wrapper function that repeats the computation above for different cases """ Qs, Rs, Ns, As, Bs, Cs, k_star = construct_func(**vals_dict) # Symmetric Π # Notice that pure periodic transition is a special case # when λ=1 print("symmetric Π case:\n") λ_vals = np.linspace(0., 1., 10) F1 = np.empty((λ_vals.size, len(state_vec))) F2 = np.empty((λ_vals.size, len(state_vec))) for i, λ in enumerate(λ_vals): Π2 = np.array([[1-λ, λ], [λ, 1-λ]]) mplq = qe.LQMarkov(Π2, Qs, Rs, As, Bs, Cs=Cs, Ns=Ns, beta=β) mplq.stationary_values(); F1[i, :] = mplq.Fs[0, 0, :] F2[i, :] = mplq.Fs[1, 0, :] for i, state_var in enumerate(state_vec): fig = plt.figure() ax = fig.add_subplot(111) ax.plot(λ_vals, F1[:, i], label="$\overline{s}_1$", color="b") ax.plot(λ_vals, F2[:, i], label="$\overline{s}_2$", color="r") ax.set_xlabel("$\lambda$") ax.set_ylabel("$F(\overline{s}_t)$") ax.set_title(f"coefficient on {state_var}") ax.legend() plt.show() # Plot optimal k*_{s_t} and k that optimal policies are targeting # only for example 1 if state_vec == ["k", "constant term"]: fig = plt.figure() ax = fig.add_subplot(111) for i in range(2): F = [F1, F2][i] c = ["b", "r"][i] ax.plot([0, 1], [k_star[i], k_star[i]], "--", color=c, label="$k^*(\overline{s}_"+str(i+1)+")$") ax.plot(λ_vals, - F[:, 1] / F[:, 0], color=c, label="$k^{target}(\overline{s}_"+str(i+1)+")$") # Plot a vertical line at λ=0.5 ax.plot([0.5, 0.5], [min(k_star), max(k_star)], "-.") ax.set_xlabel("$\lambda$") ax.set_ylabel("$k$") ax.set_title("Optimal k levels and k targets") ax.text(0.5, min(k_star)+(max(k_star)-min(k_star))/20, "$\lambda=0.5$") ax.legend(bbox_to_anchor=(1., 1.)) plt.show() # Asymmetric Π print("asymmetric Π case:\n") δ_vals = np.linspace(0., 1., 10) λ_grid = np.empty((λ_vals.size, δ_vals.size)) δ_grid = np.empty((λ_vals.size, δ_vals.size)) F1_grid = np.empty((λ_vals.size, δ_vals.size, len(state_vec))) F2_grid = np.empty((λ_vals.size, δ_vals.size, len(state_vec))) for i, λ in enumerate(λ_vals): λ_grid[i, :] = λ δ_grid[i, :] = δ_vals for j, δ in enumerate(δ_vals): Π3 = np.array([[1-λ, λ], [δ, 1-δ]]) mplq = qe.LQMarkov(Π3, Qs, Rs, As, Bs, Cs=Cs, Ns=Ns, beta=β) mplq.stationary_values(); F1_grid[i, j, :] = mplq.Fs[0, 0, :] F2_grid[i, j, :] = mplq.Fs[1, 0, :] for i, state_var in enumerate(state_vec): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(λ_grid, δ_grid, F1_grid[:, :, i], color="b") ax.plot_surface(λ_grid, δ_grid, F2_grid[:, :, i], color="r") ax.set_xlabel("$\lambda$") ax.set_ylabel("$\delta$") ax.set_zlabel("$F(\overline{s}_t)$") ax.set_title(f"coefficient on {state_var}") plt.show() run(construct_arrays1, {"f1_vals":[0.5, 1.]}, state_vec1) run(construct_arrays1, {"f2_vals":[0.5, 1.]}, state_vec1) def construct_arrays2(f1_vals=[1. ,1.], f2_vals=[1., 1.], d_vals=[1., 1.], α0_vals=[1., 1.], ρ_vals=[0.9, 0.9], σ_vals=[1., 1.]): """ Construct matrices that maps the problem described in example 2 into a Markov jump linear quadratic dynamic programming problem. """ m = len(f1_vals) n, k, j = 3, 1, 1 Rs = np.zeros((m, n, n)) Qs = np.zeros((m, k, k)) As = np.zeros((m, n, n)) Bs = np.zeros((m, n, k)) Cs = np.zeros((m, n, j)) for i in range(m): Rs[i, 0, 0] = f2_vals[i] Rs[i, 1, 0] = - f1_vals[i] / 2 Rs[i, 0, 1] = - f1_vals[i] / 2 Rs[i, 0, 2] = 1/2 Rs[i, 2, 0] = 1/2 Qs[i, 0, 0] = d_vals[i] As[i, 0, 0] = 1 As[i, 1, 1] = 1 As[i, 2, 1] = α0_vals[i] As[i, 2, 2] = ρ_vals[i] Bs[i, :, :] = np.array([[1, 0, 0]]).T Cs[i, :, :] = np.array([[0, 0, σ_vals[i]]]).T Ns = None k_star = None return Qs, Rs, Ns, As, Bs, Cs, k_star state_vec2 = ["k", "constant term", "w"] run(construct_arrays2, {"d_vals":[1., 0.5]}, state_vec2) run(construct_arrays2, {"f1_vals":[0.5, 1.]}, state_vec2) run(construct_arrays2, {"f2_vals":[0.5, 1.]}, state_vec2) run(construct_arrays2, {"α0_vals":[0.5, 1.]}, state_vec2) run(construct_arrays2, {"ρ_vals":[0.5, 0.9]}, state_vec2) run(construct_arrays2, {"σ_vals":[0.5, 1.]}, state_vec2)