#!/usr/bin/env python # coding: utf-8 # # The Value of Existential Risk Mitigation: Companion Notebook # Author: [Arvo Muñoz Morán](https://www.arvomm.com/) # # This is the companion notebook to the "Technical Report: The Value of Existential Risk Mitigation", which is a project of The Worldview Investigations Team at [Rethink Priorities](https://rethinkpriorities.org/research) # In[1]: from datetime import date today = date.today() formatted_date = today.strftime('%d %B %Y') print(f"Last Updated: ",formatted_date) # All the imports needed for the notebook (which need installing if the haven't been already) are in the cell below. # In[2]: import numpy as np import matplotlib.pyplot as plt import pandas as pd import time from IPython.display import Image, display # In[3]: # skip_execution = True # skip execution of certain cells skip_execution = True # # The value of the world given existential risk # Let us start by directly translating Equation (1) of the generalised model into Python. # In[4]: def calculate_Ew(T, v, r): Ew = 0 for i in range(T): # i=0,1,2, ... T-1 prod_term = 1 for j in range(i+1): # j=0,1,2, ... i prod_term *= (1 - r[j]) Ew += prod_term * v[i] return Ew # Quick Example # Set the parameters T = 1000 # For the purpose of this example, I'll use uniform values for `v` and `r` # v = [1, 1, ... 1] and r = [0.01, 0.01, ... 0.01] # The reader can change these values as per their requirements v = [1 for _ in range(T)] r = [0.01 for _ in range(T)] # Calculate E(w) Ew = calculate_Ew(T, v, r) # Printing the results along with the parameters print("Quick example:") print("--------------") print("T =", T) print("v (first 5 elements for brevity) =", v[:5]) print("r (first 5 elements for brevity) =", r[:5]) print("E(w) =", Ew) # True value at infty for v = [1 for _ in range(T)] , r = [0.01 for _ in range(T)] should be # S = a1/(1-R)=99 since a1=1-r and R=1-r=1-0.01=0.99 # And it is. # # Different Value Paths # Let us now put into code the main value paths we are interested in: # - constant value # - linear value # - quadratic value # - cubic value # - logit value # - and others (custom) # # From this point onwards we normalise the value of the world next year $v_c$ to 1. # # First, we define a time adjustment function to adjust the time scale of the value vector from centuries to years, or to any other scale captured by the alpha parameter. Second, we define a function that returns the sequence of values of the world, given a path and a time adjustment function. # In[5]: # Time adjustment function def time_adjustment(input, alpha): return alpha * input + (1 - alpha) #alpha is simply 1/units_in_century # Value sequence generator given the path type def generate_value_vector(T, v_type="constant", custom_v=None, alpha=0.01, forplot=False): # Initialize the value vector based on the preset option if v_type == "constant": v = [1 for _ in range(T)] elif v_type == "linear": v = [time_adjustment(i+1,alpha) for i in range(T)] elif v_type == "quadratic": v = [(time_adjustment(i+1,alpha))**2 for i in range(T)] elif v_type == "cubic": v = [(time_adjustment(i+1,alpha))**3 for i in range(T)] elif v_type == "logistic": # Key parameters c = (100)**3 # The value cap. Here, what obtains after 10,000 years of cubic growth exp_rate = 0.03 # 3% annually i.e. when alpha=0.01 mod_rate = exp_rate*(alpha/0.01 + 10*np.exp(1)*alpha-np.exp(1)*0.1) # modifications for other alphas v_0 = c / ((c - 1) * np.exp(mod_rate) + 1) # The value at zero, chosen so that value at i=1 is 1. # Function for the ith term v = [c / (1 + ((c - v_0) / v_0 )* np.exp(-mod_rate * (i+1))) for i in range(T)] # if in doubt, see graph below elif v_type == "custom": if custom_v is None: custom_v = list(map(int, input("Enter your custom v vector separated by spaces: ").split())) v = custom_v return v # Examples of first ten years with each of the types types = ["constant", "linear", "quadratic", "cubic", "logistic"] T = 10 v_type = "constant" print("Quick example for 1 period = 1 year (alpha=0.01):") for v_type in types: v = generate_value_vector(T, v_type) print("Quick example:") print(f"Generated value sequence (v) of type '{v_type}': {v}") print("--------------") # We can print the same example again for alpha=1 print(" ") print(" ") print("Quick example for 1 period = 1 century (alpha=1):") for v_type in types: v = generate_value_vector(T, v_type, alpha=1) print(f"Generated value sequence (v) of type '{v_type}': {v}") print("--------------") # We can now plot these value paths (when periods are centuries, like in the original work by Ord). # In[6]: def plot_value_scenarios(T=50, alpha=1): types = ["constant", "linear", "quadratic", "cubic", "logistic"] colors = ["b", "g", "r", "c", "m"] plt.figure(figsize=(20, 6)) for i, v_type in enumerate(types): plt.subplot(1, 5, i + 1) v = generate_value_vector(T, v_type, alpha=alpha, forplot=True) plt.plot(range(1, T + 1), v, color=colors[i]) plt.title(f"{v_type} value") plt.xlabel("period") if alpha == 1: plt.xlabel("century") elif alpha == 0.01: plt.xlabel("year") else: plt.xlabel("period") plt.ylabel("value") plt.tight_layout() plt.show() # In[7]: plot_value_scenarios() # And we can plot these value paths when periods are years. See how, as expected, these two match when the $T$ in years is $100$ times that of centuries. # In[8]: plot_value_scenarios(T=5000,alpha=0.01) # # Different Existential Risk Structures # # The defaults in this function are: # - For "constant", the risk is a constant value ```0.0022289477``` for $T$ periods. # - For "top", the risk is ```0.0022289477``` for the first $100$ periods and ```0.0001``` thereafter. # - For "eras", the risk vector alternates between ```0.0022289477``` and ```0.0001``` with eras of different lengths. # - For "exponential_decay", the risk ```r_0=0.0022289477``` decays exponentially towards a tiny risk $r_\infty=$```1e-9``` at the rate ```lambda_val=-0.0001```. # - For "custom", the user is prompted to enter their custom risk vector. # # In[9]: def generate_risk_vector(T, r_type="constant", r_high=0.0022289477, r_low=0.0001, eras_periods=None, eras_risks=None, r_0=0.0022289477, lambda_val=-0.0001, r_infty=1e-9, custom_r=None): if r_type == "constant": r = [r_high for _ in range(T)] elif r_type == "top": r = [r_high if i < 100 else r_low for i in range(T)] elif r_type == "eras": if eras_periods is None or eras_risks is None: raise ValueError("For 'eras' type, both eras_periods and eras_risks must be provided.") r = [] for i, r_val in zip(eras_periods, eras_risks): r.extend([r_val] * i) elif r_type == "exponential_decay": r = [(r_0 * np.exp(lambda_val * t) + r_infty) for t in range(T)] elif r_type == "custom": if custom_r is None: custom_r = list(map(float, input("Enter your custom r vector separated by spaces: ").split())) r = custom_r #this is very flexible, user can run the function with any arbitrary r by setting the option: custom_r = [chosen vector] return r # In[10]: # Quick example T = 150 print(f"For exponential decay: ",generate_risk_vector(T, r_type="exponential_decay")[:15]) # Quick example T = 1500 eras_periods = [100, 500, 100, T - 700] eras_risks = [0.0022289477, 0.0001, 0.0022289477, 0.0001] print(f"For 2 Filters: ",generate_risk_vector(T, "eras",eras_periods=eras_periods,eras_risks=eras_risks)[:15]) # for both we only display the first 15 entries # In[11]: def plot_risk_scenarios(T=2000): # def plot_risk_scenarios(T=100000): types = ["constant", "top", "eras", "exponential_decay"] colors = ["b", "g", "r", "m"] plt.figure(figsize=(20, 6)) for i, r_type in enumerate(types): plt.subplot(1, 4, i + 1) r = generate_risk_vector(T, r_type, eras_periods = [100, 500, 100, T - 700], eras_risks = [0.0022289477, 0.0001, 0.0022289477, 0.0001], lambda_val=-0.0001, r_infty=1e-9) plt.plot(range(1, T + 1), r, color=colors[i]) if r_type == 'eras': plt.title('Two Great Filters') elif r_type == 'top': plt.title('Time of Perils') elif r_type == 'exponential_decay': plt.title('exponential decay risk') elif r_type == 'constant': plt.title('constant risk') else: plt.title(f"{r_type} Risk") plt.xlabel("years") plt.ylabel("extinction risk") plt.tight_layout() plt.show() # In[12]: plot_risk_scenarios() # # The Expected Value of the World Without Interventions # # Let us start with an example. Let us calculate the expected value of the world if risk followed constant risk and value. # In[13]: def eg_plotEw_constant_value(T=1000, r_type="constant", r_high=0.0022289477, step=50): # Generate constant value vector and constant risk vector (can set r_high set to 0.01 as an example) v_constant = generate_value_vector(T, "constant") r_vector = generate_risk_vector(T, r_type, r_high=0.01) # let's use 0.01 for risk since we know (and showed above) that it should converge to 99 # Calculate Ew for each specified period and store in a list (the step is how often we calculate Ew) Ew_list = [calculate_Ew(t, v_constant[:t], r_vector[:t]) for t in range(1, T + 1, step)] # Plot plt.figure(figsize=(10, 6)) plt.plot(range(1, T + 1, step), Ew_list, color='b') if r_type == "constant": plt.title("E(w) for Constant Risk & Constant Value") elif r_type == "top": plt.title("E(w) for Time of Perils & Constant Value") plt.title("E(w) for Constant Risk & Constant Value") plt.xlabel("Period") plt.ylabel("E(w)") plt.show() eg_plotEw_constant_value() # ### Time of Perils under Constant Value # And now under the Time of Perils structure, when the value is constant. # In[14]: if not skip_execution: eg_plotEw_constant_value(T=20000, r_type="top", step=2000) #runtime is about 30s # Performance is an issue, even before having a high enough $T$ that could show convergence. A new approach is warranted. We test the running times further below. # In[15]: if not skip_execution: T = 3000 step = 50 # Calculate E(w) every 50 periods # Start the timer start_time = time.time() # Generate constant value vector and top risk vector v_constant = generate_value_vector(T+1, "constant") r_top = generate_risk_vector(T+1, "top") # Calculate Ew for each specified period and store in a list Ew_list = [] for t in range(step, T + 1, step): Ew = calculate_Ew(t, v_constant[:t], r_top[:t]) Ew_list.append(Ew) # Print the run time up until this point elapsed_time = time.time() - start_time print(f"Elapsed time after {t} periods: {elapsed_time:.2f} seconds") # Plot plt.figure(figsize=(10, 6)) plt.plot(range(step, T + 1, step), Ew_list, color='b') plt.title("E(w) for Constant Risk & Constant Value (every 50 periods)") plt.xlabel("Period") plt.ylabel("E(w)") plt.show() # ## Modifications to make use of cumulative properties of $E(w)$ and speed up code # # For contrast, recall: # ```python # def calculate_Ew(T, v, r): # Ew = 0 # # for i in range(T): # prod_term = 1 # # for j in range(i+1): # prod_term *= (1 - r[j]) # # Ew += prod_term * v[i] # # return Ew # ``` # # One possible improvement is to elliminate the second ```for``` loop. We opt for an alternative. The function `calculate_Ew_incremental` is designed to incrementally update the expected value $ E(w) $ and the "product term" (which I'll refer to as ```prod_term``` at each time period $ t $. # # Here's the new function definition again for reference: # # ```python # def calculate_Ew_incremental(prev_Ew, prev_prod_term, v_i, r_i): # new_prod_term = prev_prod_term * (1 - r_i[-1]) # new_Ew = prev_Ew + new_prod_term * v_i[-1] # return new_Ew, new_prod_term # ``` # # ### Inputs: # # 1. `prev_Ew`: The previous value of $ E(w) $ up to time $ t-1 $. # 2. `prev_prod_term`: The previous value of $ \text{prod\_term} $, which represents the product of $ (1 - r_j) $ terms up to time $ t-1 $. # 3. `v_i`: The value vector for time $ t $, which is expected to be a one-element list in the way it's used here. # 4. `r_i`: The risk vector for time $ t $, which is also expected to be a one-element list in this context. # # ### Steps: # # 1. `new_prod_term = prev_prod_term * (1 - r_i[-1])`: Here, we update $ \text{prod\_term} $ by multiplying it with $ (1 - r_t) $. Since $ r_i $ is expected to be a one-element list, `r_i[-1]` grabs that single element. # # 2. `new_Ew = prev_Ew + new_prod_term * v_i[-1]`: We update $ E(w) $ by adding $ \text{prod\_term} \times v_t $ to the previous $ E(w) $. Again, because $ v_i $ is expected to be a one-element list, `v_i[-1]` grabs that element. # # 3. `return new_Ew, new_prod_term`: Finally, we return the updated $ E(w) $ and $ \text{prod\_term} $ for use in the next iteration. # # By calculating $ E(w) $ in this incremental way, we save computational time. # In[16]: # Modified calculate_Ew to handle incremental Ew calculation def calculate_Ew_incremental(prev_Ew, prev_prod_term, v_i, r_i): new_prod_term = prev_prod_term * (1 - r_i[-1]) # r_i[-1] notation for grabbing last element new_Ew = prev_Ew + new_prod_term * v_i[-1] return new_Ew, new_prod_term # Let us run an example and observe the run times # In[17]: def eg_quicker_ew_top_risk_constant_value(): # Initial setup T = 150000 step = 1 # what we're recording for our graph v_constant = generate_value_vector(T+1, "constant") r_top = generate_risk_vector(T+1, "top") # Initialize variables Ew = 0 prod_term = 1 Ew_list = [] # Start the timer start_time = time.time() # Loop through each period, but only store the E(w) every 'step' periods for t in range(1, T + 1): Ew, prod_term = calculate_Ew_incremental(Ew, prod_term, v_constant[t-1:t], r_top[t-1:t]) # If the current period is a multiple of 'step', then store the E(w) if t % step == 0: Ew_list.append(Ew) # Print the run time up until this point # elapsed_time = time.time() - start_time # print(f"Elapsed time after {t} periods: {elapsed_time:.2f} seconds") # Plot plt.figure(figsize=(10, 6)) plt.plot(range(step, T + 1, step), Ew_list, color='b') plt.title(f"E(w) for ToP Risk & Constant Value") plt.xlabel("Period") plt.ylabel("E(w)") plt.show() eg_quicker_ew_top_risk_constant_value() #runtime is about 0.1s. # With many loops gone, it's over 300x faster than the previous version and has a much higher T. # ## Convergence # # It is useful to automate convergence testing of each scenario. Below we create a separate function that checks for convergence based on a given tolerance level. This function will compare the last two $ E(w) $ values and return `True` if they have converged within the tolerance or `False` otherwise. If the function returns `True`, the loop breaks, and the program will print out the period at which it converged. # In[18]: # Function to check for convergence def has_converged(new_value, prev_value, tolerance=1e-5): if abs(new_value - prev_value) < tolerance: return True return False # And a quick example of convergence is shown below. # In[19]: def eg_convergence_plot(): # Initial setup T = 150000 step = 1 tolerance = 1e-5 # Convergence tolerance v_constant = generate_value_vector(T+1, "constant") r_top = generate_risk_vector(T+1, "top") # Initialize variables Ew = 0 prod_term = 1 Ew_list = [] prev_Ew = None # Variable to store the previous E(w) for convergence check # Start the timer start_time = time.time() # Loop through each period, but only store the E(w) every 'step' periods for t in range(1, T + 1): Ew, prod_term = calculate_Ew_incremental(Ew, prod_term, v_constant[t-1:t], r_top[t-1:t]) # If the current period is a multiple of 'step', then store the E(w) if t % step == 0: Ew_list.append(Ew) # Convergence check if prev_Ew is not None: if has_converged(Ew, prev_Ew, tolerance): print(f"Converged at period {t}, E(w)={Ew:.6f}") break prev_Ew = Ew # Plot plt.figure(figsize=(10, 6)) plt.plot(range(step, len(Ew_list) * step + 1, step), Ew_list, color='b') plt.title(f"E(w) for Constant Risk & Constant Value") plt.xlabel("Period") plt.ylabel("E(w)") plt.show() eg_convergence_plot() # Already, this feels like an insightful result. Suppose that, following the Time of Perils hypothesis, you thought that the world has a 20% chance of ending this century and then it falls to a much lower value of 1% of a catastrophe each century. Then, with constant value, the expected value of the whole of the future is 8,000 years of the value of today's world. Though constant value is an unlikely assumption, this is surprising given that the probability of surviving until an arbitrarily distant future is positive (though of course, it converges to zero, which guarantees convergence of $E(w)$ overall, as demonstrated in the technical report). # ## Grid: Scenarios of E(w) # We are now ready to combine the different cases for both risk and value. We display these various scenarios in a grid. # In[20]: def grid_risk_titles(r_type, axes=True): if axes: if r_type == 'constant': plt.title('Constant Risk') elif r_type == 'top': plt.title('Time of Perils') elif r_type == 'eras': plt.title('Two Great Filters') elif r_type == "exponential_decay": plt.title("Exponential Decay") else: axes.set_title(f"{r_type} Risk") # fix middle graphs like above # if not axes: # In[225]: def Ew_grid(T_value=1000, T_risk=1000, T_middle=135000, tolerance=1e-5, r_types = ["constant", "top", "eras", "exponential_decay"], value_types = ["constant", "linear", "quadratic", "cubic", "logistic"], r_infty=1e-9): # Original implementation without has_converged and without calculate_Ew_incremental # (it would take around 24min to complete partial output (only constant and linear value cols) when T=10000 and step=100) colors_value = ["b", "g", "r", "c", "m"] colors_risk = ["b", "g", "r", "m"] plt.figure(figsize=(30, 25)) T = T_value # First row: different value vectors for i, v_type in enumerate(value_types): plt.subplot(5, 6, i + 2) # +2 to leave the first subplot empty v = generate_value_vector(T, v_type) plt.plot(range(1, T + 1), v, color=colors_value[i]) plt.title(f"{v_type} value") plt.xlabel("years") plt.ylabel("value") T = T_risk # First column: different risk vectors for i, r_type in enumerate(r_types): plt.subplot(5, 6, (i + 1) * 6 + 1) if r_type == "exponential_decay": r = generate_risk_vector(100000, r_type, r_infty=r_infty) plt.plot(range(1, 100000 + 1), r, color=colors_risk[i]) else: r = generate_risk_vector(T, r_type, eras_periods=[100, 500, 100, T - 700], eras_risks=[0.0022289477, 0.0001, 0.0022289477, 0.0001]) plt.plot(range(1, T + 1), r, color=colors_risk[i]) grid_risk_titles(r_type) plt.xlabel("years") plt.ylabel("risk") T = T_middle # Middle plots: Ew for each combination of value and risk for i, v_type in enumerate(value_types): for j, r_type in enumerate(r_types): # # Custom T for some exponential decay cases if r_type == "exponential_decay": T = 105000 # if r_type == "exponential_decay" and v_type == "cubic": # T = 210000 # elif r_type == "exponential_decay" and v_type == "quadratic": # T = 1000000 # elif r_type == "exponential_decay" and v_type == "linear": # T = 15000000 # # Otherwise, use the default T else: T = T_middle plt.subplot(5, 6, (j + 1) * 6 + (i + 2)) # +2 to skip the first column v = generate_value_vector(T+1, v_type) r = generate_risk_vector(T+1, r_type, eras_periods=[100, 500, 100, T - 700], eras_risks=[0.0022289477, 0.0001, 0.0022289477, 0.0001], r_infty=r_infty) Ew = 0 prod_term = 1 Ew_list = [] prev_Ew = None step = 1 # Calculate E(w) every "step" periods (previously tweaked to 1000 to accelerate the process during tests) for t in range(1, T + 1): Ew, prod_term = calculate_Ew_incremental(Ew, prod_term, v[t-1:t], r[t-1:t]) if t % step == 0: Ew_list.append(Ew) # if prev_Ew is not None and has_converged(Ew, prev_Ew, tolerance): # break prev_Ew = Ew plt.plot(range(step, len(Ew_list) * step + 1, step), Ew_list, color=colors_risk[j]) # improve the titles' format for each of the middle plots if r_type == 'constant': r_name = 'Constant' elif r_type == 'top': r_name = 'Time of Perils' elif r_type == 'eras': r_name = 'Two Great Filters' elif r_type == "exponential_decay": r_name = "Exponential Decay" else: r_name = f"{r_type}" plt.title(f"{r_name} risk with {v_type} value") plt.xlabel("years") plt.ylabel("E(w)") plt.tight_layout() plt.show() #runtimes: # 17s for T = 1 million # 7s for T = 300000 # Let us obtain the grid. Consider a relatively long horizon of $T=165,000$ years. Recall $E(w)$ at year $y$ is cumulative, it's the $E(w)$ if the length of the universe was $T=y$. # In[226]: Ew_grid() #runtime is about 22 seconds with exp decay exceptions and 3.6s without # $T=165,000$ for all scenarios above, except for exponential decay risk. Exponential decay does not converge in a similar fashion, below we test it further with a 10 million year horizon. # Convergence is always present in the 20 scenarios above. By contrast, there is no convergence for exponentially decaying risk when $r\to 0$ and $v_t \to \infty$ as $t \to \infty$ (see appendix for this example). This matches the results shown by the report. # # The Value of Existential Risk Mitigation # ## Risk after mitigation action $M$ # # We now define a function that returns the $r^\prime$ risk vector after performing a mitigating action $M$. # # Parameters: # - r (list): The original risk vector. # - f (float): The mitigation factor, between 0 and 1. # - P (int): The persistence of the mitigation, in periods. # # Returns: # - list: The modified risk vector. # In[143]: def apply_mitigation(r, f, P): # Modify a risk vector by applying a mitigation action. # Copy the original risk vector to avoid modifying it directly r_modified = r.copy() # Apply the mitigation factor for the first P periods for i in range(min(P, len(r))): # min in case P exceeds length of r r_modified[i] = r[i] * (1 - f) return r_modified # Example usage: original_risk = generate_risk_vector(r_high=0.1, r_type="constant", T=100) mitigated_risk = apply_mitigation(original_risk, 0.2, 5) # This will reduce the risk by 20% for the first 5 periods. # Print the original and mitigated risk vectors print("Original risk vector for the first 10 periods:", [round(r, 8) for r in original_risk[:10]]) print("Mitigated risk vector for the first 10 periods:", [round(r, 8) for r in mitigated_risk[:10]]) # To save computational effort, it will be useful have the history of ```Ew```s for each period, so we will return that as well in a new function. # In[176]: def calculate_Ew_vector_up_to_t (t, v, r): Ew = 0 prod_term = 1 Ew_list = [] prev_Ew = None for i in range(1, t + 1): Ew, prod_term = calculate_Ew_incremental(Ew, prod_term, v[i-1:i], r[i-1:i]) Ew_list.append(Ew) return Ew_list # Example usage T = 15 v_constant = generate_value_vector(T, "constant") r_constant = generate_risk_vector(T, "constant") print(f"The length of the E(w) vector up to period {T} is: {len(calculate_Ew_vector_up_to_t(T, v_constant, r_constant))}") print(f"The length of the v_constant vector up to period {T} is: {len(v_constant)}") print(f"The length of the r_constant vector up to period {T} is: {len(r_constant)}") print(f"The E(w) vector up to period {T} is: {calculate_Ew_vector_up_to_t(T, v_constant, r_constant)}") # Given the mitigated risk vector, we can now calculate the mitigated E(M) up to each period (i.e. the value of mitigation if the universe suddenly ended $t$ periods from now). Our function calculates $ E(M) = E(w \,|\, M) - E(w) $. In this function, we will utilize the previously defined helper functions to compute $ E(w) $ with and without mitigation and then return the difference. # In[177]: def expected_value_vector_of_M_up_to_t(T, r_type = "constant", v_type = "constant" , f = 0.5, P = 5, eras_periods = [100, 500, 100, T - 700], eras_risks = [0.0022289477, 0.0001, 0.0022289477, 0.0001]): v = generate_value_vector(T, v_type) r_original = generate_risk_vector(T, r_type=r_type, eras_periods=eras_periods, eras_risks=eras_risks) r_mitigated = apply_mitigation(r_original, f, P) Ew_original_vector = calculate_Ew_vector_up_to_t(T, v, r_original) Ew_mitigated_vector = calculate_Ew_vector_up_to_t(T, v, r_mitigated) # Calculate the difference E(M) = E(w | M) - E(w) for each period expected_value_diff_vector = np.array(Ew_mitigated_vector) - np.array(Ew_original_vector) # Test: print lengths of vectors # print(f"Length of Ew_original_vector: {len(Ew_original_vector)}") # print(f"Length of Ew_mitigated_vector: {len(Ew_mitigated_vector)}") # print(f"Length of expected_value_diff_vector: {len(expected_value_diff_vector)}") # print(f"Length of v: {len(v)}") # print(f"Length of r_original: {len(r_original)}") # print(f"Length of r_mitigated: {len(r_mitigated)}") return expected_value_diff_vector # Example usage T=15 result = expected_value_vector_of_M_up_to_t(T) print("\n") print(f"The expected value of M up to period {T} is: {result}") # Can also see directly v = generate_value_vector(T, v_type="constant") r_original = generate_risk_vector(T, r_type="constant") r_mitigated = apply_mitigation(r_original, f=0.5, P=5) Ew_original_vector = calculate_Ew_vector_up_to_t(T, v, r_original) Ew_mitigated_vector = calculate_Ew_vector_up_to_t(T, v, r_mitigated) print(f"The original E(w) vector up to period {T} is: {Ew_original_vector}") print(f"The mitigated E(w) vector up to period {T} is: {Ew_mitigated_vector}") print("\n") # Quick coomparison print(f"The expected value of M by period {6} is: {result[5]}") print(f"The original E(w) by period {6} is: {Ew_original_vector[5]}") print(f"The mitigated E(w) by period {6} is: {Ew_mitigated_vector[5]}") print(f"The difference between the mitigated and original E(w) by period {6} is: {Ew_mitigated_vector[5] - Ew_original_vector[5]}") print(f"This is equal to the expected value of M by period {6}: {result[5]==Ew_mitigated_vector[5] - Ew_original_vector[5]}") # We now have all the necessary ingredients to compare the value of $M$ that each scenario obatins. Let's observe the value of worlds with and without the mitigation action side-by-side. # # We write a function where we can specify the mitigation factor and we plot the value of the world without mitigation (as the benchmark) and with mitigation. For the mitigation case we plot the value of the world for different values of P (the number of periods for which the mitigation is applied). In particular, we plot P = 1,5,50,500,T. We use the constant risk vector and the constant value vector for this example. # # In[25]: def plot_mitigated_vs_unmitigated_Ew(T, f, Ps=[1, 5, 50, 500, 1000], r_type="constant", v_type="constant"): # Initialize plot fig, axes = plt.subplots(1, 2, figsize=(14, 6)) # fig, axes = plt.subplots(1, 2, figsize=(14, 6), dpi=300) # for high resolution # Generate original risk vector r_original = generate_risk_vector(T, r_type=r_type) # Plot risk vectors axes[0].plot(r_original, label="Original Risk", color="blue") for P in Ps: r_mitigated = apply_mitigation(r_original, f, P) axes[0].plot(r_mitigated, linestyle="--", label=f"Mitigated Risk (P={P})") axes[0].set_title("Risk Over Time") axes[0].set_xlabel("Period") axes[0].set_ylabel("Risk") axes[0].legend() # Generate original value vector v = generate_value_vector(T, v_type=v_type) # Calculate and plot E(w) without mitigation Ew_original_vector = calculate_Ew_vector_up_to_t(T, v, r_original) axes[1].plot(Ew_original_vector, label="E(w) without Mitigation", color="blue") # Calculate and plot E(w) with mitigation for P in Ps: r_mitigated = apply_mitigation(r_original, f, P) Ew_mitigated_vector = calculate_Ew_vector_up_to_t(T, v, r_mitigated) axes[1].plot(Ew_mitigated_vector, linestyle="--", label=f"E(w) with Mitigation (P={P})") axes[1].set_title("Expected Value of the World Over Time") axes[1].set_xlabel("Period") axes[1].set_ylabel("E(w)") axes[1].legend() plt.tight_layout() plt.show() # Example usage T=1001 plot_mitigated_vs_unmitigated_Ew(T, 0.5) # Tests: # # - It is useful to cross check these results against closed form solution for constant value and constant risk # # - It should match and the values that the graphs converge to. # # - Let us first check the closed form solution for constant value and constant risk. We know that the expected value of the world is given by the following formula: # # - What about the other cases? # In[26]: # Now we write code that gives us the same plots but until convergence by using the has_converged function def plot_mitigated_vs_unmitigated_Ew_convergence(T, f=0.5, Ps=[1, 5, 50, 500, T], r_type="constant", v_type="constant", tolerance=1e-5): # Initialize variables Ew_original = 0 prod_term_original = 1 Ew_list_original = [] prev_Ew_original = None Ew_mitigated = {P: 0 for P in Ps} prod_term_mitigated = {P: 1 for P in Ps} Ew_list_mitigated = {P: [] for P in Ps} prev_Ew_mitigated = {P: None for P in Ps} converged_flags = {P: False for P in Ps} # Flags to indicate if E(w) has converged for each P # Generate the constant value and risk vectors v = generate_value_vector(T+1, v_type=v_type) r_original = generate_risk_vector(T+1,r_type=r_type) for t in range(1, T + 1): # Update original E(w) values Ew_original, prod_term_original = calculate_Ew_incremental(Ew_original, prod_term_original, v[t-1:t], r_original[t-1:t]) # Update mitigated E(w) values for each P for P in Ps: r_mitigated = apply_mitigation(r_original, f, P) Ew_mitigated[P], prod_term_mitigated[P] = calculate_Ew_incremental(Ew_mitigated[P], prod_term_mitigated[P], v[t-1:t], r_mitigated[t-1:t]) # Store and check the E(w) Ew_list_original.append(Ew_original) for P in Ps: Ew_list_mitigated[P].append(Ew_mitigated[P]) if prev_Ew_mitigated[P] is not None and not converged_flags[P]: if has_converged(Ew_mitigated[P], prev_Ew_mitigated[P], tolerance): print(f"Converged at period {t}, E(w) with P={P} is {Ew_mitigated[P]:.6f}") converged_flags[P] = True prev_Ew_original = Ew_original for P in Ps: prev_Ew_mitigated[P] = Ew_mitigated[P] # Plot plt.figure(figsize=(14, 6)) plt.plot(range(1, len(Ew_list_original) + 1), Ew_list_original, label="E(w) without mitigation", color='b') for P in Ps: plt.plot(range(1, len(Ew_list_mitigated[P]) + 1), Ew_list_mitigated[P], linestyle="--", label=f"E(w) with P={P}") plt.title(f"E(w) for {r_type} Risk & {v_type} Value") plt.xlabel("Period") plt.ylabel("E(w)") plt.legend() plt.show() # Example usage T=11000 plot_mitigated_vs_unmitigated_Ew_convergence(T) # This graph seems to suggest that halving the risk globally doubles total E(w). Is there a rule of this? cf with closed form solution in thorstad # Let us do the same exercise with ToP risk vector and constant value vector # # In[39]: # Let us do the same exercise with Top risk vector and constant value vector plot_mitigated_vs_unmitigated_Ew_convergence(T=50000, r_type="top", v_type="constant") #runtime is about 1min # The relative difference with respect to $E(w)$ in top & constnat value is much smaller, but the absolute difference is much larger. This is because the value of the world is much larger in the ToP case. # Let us plot the expected value of $M$ for different values of $P$. We will use the constant value vector and the constant risk vector. # In[33]: # Use expected_value_vector_of_M_up_to_t to calculate the expected value of M up to period T for each P then plot the results in one graph def plot_expected_value_of_M_up_to_t(T, Ps=[1, 5, 50, 500, T], r_type="constant", v_type="constant", f=0.5): # Initialize plot fig, axes = plt.subplots(1, 1, figsize=(14, 6)) # Generate original risk vector r_original = generate_risk_vector(T, r_type=r_type) # Generate original value vector v = generate_value_vector(T, v_type=v_type) # Calculate and plot E(w) without mitigation Ew_original_vector = calculate_Ew_vector_up_to_t(T, v, r_original) axes.plot(Ew_original_vector, label="E(w) without Mitigation", color="blue") # Plot expected valye of M up to period T for each P for P in Ps: Em_vector = expected_value_vector_of_M_up_to_t(T, r_type, v_type, f=f, P=P) axes.plot(Em_vector, linestyle="--", label=f"E(M) (with P={P})") axes.set_title(f"Expected Value of M up to the Period ({r_type} r, {v_type} v, f = {f}, T = {T})") axes.set_xlabel("Period") axes.set_ylabel("E(M)") axes.legend() plt.tight_layout() plt.show() # Example usage T=1000 plot_expected_value_of_M_up_to_t(T, Ps=[1, 5, 50, 500, T], r_type="constant", v_type="constant") # This example bears highlighting. In a sense, we can already see that persistence will be a major factor in determining how valuable a mitigation action is. If the mitigation action is not persistent, then it is not very valuable. This is because the risk of extinction will return to its original value after the mitigation action is over. # # In the plot above we can take $E(w)$ as the benchmark amount of value available in expectation given our present circumstances. With that in mind, when $P=1,5,50$ the value of $M$ is simply not on the scale of $E(w)$, let alone on an _astronomical_ scale. However, when $P=500, T$ the value of $M$ is on the order of magnitude of of $E(w)$. Next, let us examine convergence and then test a different case: Great Filters, with $F=2$. # In[34]: T=10000 plot_expected_value_of_M_up_to_t(T, Ps=[1, 5, 50, 500, T], r_type="constant", v_type="constant") # The graph above would suggest that the only reason global risk reduction isn't astronomically valuable is because it E(w) wasn't astronomically valuable in the first place. Otherwise, at least in this case, it seems like $M$ with permanent effects would deserve as much counterfactual credit as the entire expected value of the world itself. # Now let us fix the persistence $P$ and try with different values of $f$. # In[180]: #Try again with differnt values of f # We plot the expected value of M up to period T for each f # This time we use the same P=T for all f def plot_expected_value_of_M_up_to_t_different_fs(T, fs=[0.1, 0.3, 0.5, 0.7, 0.9], r_type="constant", v_type="constant", r_high=0.0022289477, P=T, hide_benchmark=False, hide_ratios=False): # Initialize plot fig, axes = plt.subplots(1, 1, figsize=(14, 6)) # Empty list for storing final E(M) values for each f final_Em = [] # Generate original risk vector r_original = generate_risk_vector(T, r_type=r_type) # Generate original value vector v = generate_value_vector(T, v_type=v_type) # Calculate and plot E(w) without mitigation Ew_original_vector = calculate_Ew_vector_up_to_t(T, v, r_original) if not hide_benchmark: # If hide_benchmark is False, then plot the benchmark E(w) too axes.plot(Ew_original_vector, label="E(w) without Mitigation", color="blue") # Plot expected valye of M up to period T for each f for f in fs: Em_vector = expected_value_vector_of_M_up_to_t(T, r_type, v_type, f=f, P=P) axes.plot(Em_vector, linestyle="--", label=f"E(M) (with f={f})") final_Em.append(Em_vector[-1]) axes.set_title(f"Expected Value of M up to the Period ({r_type} r, {v_type} v and P={P})") axes.set_xlabel("Period") axes.set_ylabel("E(M)") axes.legend() plt.tight_layout() plt.show() # It's interesting to calculate some ratios of E(M) wrt E(w) for each f print(f"Final E(M) values for each f: {final_Em}") if not hide_ratios: for i, f in enumerate(fs): print(f"Ratio of E(M) to E(w) for f={f}: {final_Em[i]/Ew_original_vector[-1]}") # Example usage T=10000 plot_expected_value_of_M_up_to_t_different_fs(T, fs=[0.1, 0.25, 0.5, 0.75, 0.9], r_type="constant", v_type="constant") # And with really high f values # T=5000000 #30s to run T=10000 plot_expected_value_of_M_up_to_t_different_fs(T, fs=[0.9, 0.99, 0.999, 0.9999, 0.99999], r_type="constant", v_type="constant") # It's interesting to see that in this scenario the $f=0.5$ case converges to the benchmark. That is, the value of the mitigation action with $P=T$ is the same as $E(w)$ the value of the future without mitigation. # # - To do: Let us quickly check whether that depends on the risk level. # - This requires altering functions above so they can take customised r_high, which would be good. They should have all the options for full customisability. # From Thorstad, we know that $E(M)$ in the $P=1$ case when risk and value are constant is given by $E(M) = fv$. That is, value should be capped at $v$ as $f \to 1$. The plots below confirm this. # In[37]: plot_expected_value_of_M_up_to_t_different_fs(10000, fs=[0.9, 0.99, 0.999, 0.9999, 0.99999], r_type="constant", v_type="constant", P=1, hide_benchmark=True) # In terms of the interpretation, in the constant risk and value scenario. If persistence is of one year $P = 1$, even an action that essentially totally erradicates risk, is worth one year of value, as would be intuitively expected. # ## Grid: Scenarios of E(M) # First, based on ```plot_mitigated_vs_unmitigated_Ew_convergence```, we define a new function that plots the convergence of $E(M)$ for different values of $P$ and can be used in our grid. # In[192]: def middle_plot_mitigated_vs_unmitigated_Em_convergence(T, f=0.5, Ps=[1, 5, 50, 500, 2000], r_type="constant", v_type="constant", tolerance=1e-5, eras_periods=[100, 500, 100, T - 700], eras_risks=[0.0022289477, 0.0001, 0.0022289477, 0.0001], axes=None): if axes is None: fig, axes = plt.subplots() # Initialize variables Ew_original = 0 prod_term_original = 1 Ew_list_original = [] prev_Ew_original = None Ew_mitigated = {P: 0 for P in Ps} prod_term_mitigated = {P: 1 for P in Ps} Ew_list_mitigated = {P: [] for P in Ps} prev_Ew_mitigated = {P: None for P in Ps} converged_flags = {P: False for P in Ps} # Flags to indicate if E(w) has converged for each P # Generate the constant value and risk vectors v = generate_value_vector(T+1, v_type=v_type) r_original = generate_risk_vector(T+1,r_type=r_type, eras_periods=eras_periods, eras_risks=eras_risks) for t in range(1, T + 1): # Update original E(w) values Ew_original, prod_term_original = calculate_Ew_incremental(Ew_original, prod_term_original, v[t-1:t], r_original[t-1:t]) # Update mitigated E(w) values for each P for P in Ps: r_mitigated = apply_mitigation(r_original, f, P) Ew_mitigated[P], prod_term_mitigated[P] = calculate_Ew_incremental(Ew_mitigated[P], prod_term_mitigated[P], v[t-1:t], r_mitigated[t-1:t]) # Store and check the E(w) Ew_list_original.append(Ew_original) for P in Ps: Ew_list_mitigated[P].append(Ew_mitigated[P]) if prev_Ew_mitigated[P] is not None and not converged_flags[P]: if has_converged(Ew_mitigated[P], prev_Ew_mitigated[P], tolerance): print(f"{r_type} r and {v_type} v converged at period {t}, E(w) with P={P} is {Ew_mitigated[P]:.6f}") converged_flags[P] = True prev_Ew_original = Ew_original for P in Ps: prev_Ew_mitigated[P] = Ew_mitigated[P] # Plot axes.plot(Ew_list_original, label="E(w) without mitigation", color='b') for P in Ps: axes.plot(Ew_list_mitigated[P], linestyle="--", label=f"E(w) with P={P}") axes.set_title(f"E(w) for {r_type} r & {v_type} v") if r_type == "eras": # axes.axvspan(eras_periods[0], eras_periods[1], color='red', alpha=0.5) # axes.axvspan(eras_periods[3], eras_periods[3], color='blue', alpha=0.5) axes.set_title(f"Great Filters, F={int(len(eras_periods)/2)} & {v_type} v") axes.set_xlabel("Period") axes.set_ylabel("E(w)") axes.legend() # Test usage T=100 middle_plot_mitigated_vs_unmitigated_Em_convergence(T, f=0.5, r_type="constant", v_type="constant") plt.show() # and under eras T=2000 middle_plot_mitigated_vs_unmitigated_Em_convergence(T, f=0.5, r_type="eras", v_type="constant") plt.show() # We are now ready to evaluate $E(M)$ under the different cases for both risk and value. We display these various scenarios in a grid. For efficiency, we will specify manual Ts for each middle subplot. # In[28]: def plot_grid_Em_manual(Ts=[1000], f = 0.5, tolerance=1e-5, Ps=[1, 5, 50, 500, T]): # Have the option to set different values of T for each sub-plot # Ts is a list of T values for each sub-plot, in the order of the middle sub-plots in the grid # If Ts is shorter than the number of sub-plots, the last T value will be used for the remaining sub-plots Ts = Ts + [Ts[-1]] * (20 - len(Ts)) # Pad Ts with the last value if the list is shorter than 20 T = Ts[0] # We'll use this T value for the first row and column # Eras parameters eras_periods_here=[100, 500, 100, T - 700] eras_risks_here=[0.0022289477, 0.0001, 0.0022289477, 0.0001] # Initialize values, types, Ps and colors value_types = ["constant", "linear", "quadratic", "cubic", "logistic"] r_types = ["constant", "top", "eras", "exponential_decay"] Ps = Ps # We'll calculate E(M) for each of these values of P colors_value = ["b", "g", "r", "c", "m"] colors_risk = ["b", "g", "r", "m"] fig, all_axes = plt.subplots(5, 6, figsize=(30, 25)) # Turn off the axis for the top-left subplot. all_axes[0, 0].axis('off') # First row: different value vectors for i, v_type in enumerate(value_types): axes = all_axes[0, i + 1] v = generate_value_vector(T, v_type) axes.plot(v, color=colors_value[i]) axes.set_title(f"{v_type} value") axes.set_xlabel("years") axes.set_ylabel("value") # First column: different risk vectors for i, r_type in enumerate(r_types): axes = all_axes[i + 1, 0] r = generate_risk_vector(T, r_type, eras_periods=eras_periods_here, eras_risks=eras_risks_here) axes.plot(r, color=colors_risk[i]) # previously: plt.plot(range(1, T + 1), r, color=colors_risk[i]) for plotting against period earlier (shifted by 1) if r_type == "eras": axes.set_title(f"Great Filters, F={int(len(eras_periods_here)/2)}") else: axes.set_title(f"{r_type} risk") axes.set_xlabel("years") axes.set_ylabel("risk") # Middle plots: E(M) for each combination of value and risk for i, v_type in enumerate(value_types): for j, r_type in enumerate(r_types): # For the middle sub-plots, we'll use the T value from the Ts list T = Ts[j + 4 * i] # Update Ps and eras_periods_here according to the new T Ps = Ps eras_periods_here=[100, 500, 100, T - 700] # For debugging print(f"Plotting {v_type} v and {r_type} r for T={T}") axes = all_axes[j + 1, i + 1] middle_plot_mitigated_vs_unmitigated_Em_convergence(T, f=f, Ps=Ps, r_type=r_type, v_type=v_type, tolerance=tolerance, eras_periods=eras_periods_here, eras_risks=eras_risks_here, axes=axes) plt.tight_layout() plt.show() # In[45]: # For internal experiments only # skip this cell in general # if not skip_execution: def properties_Ts(Ts): length = len(Ts) print("Length of the list:", length) print("Total periods evaluated:", sum(Ts)) print("\nTable of periods used in each case:") row_labels = ['Constant r', 'ToP r', '2 Great Filters', 'Decaying r'] col_labels = ['Constant v', 'Linear v', 'Quadratic v', 'Cubic v', 'Logistic v'] rows, cols = 4, 5 array = [["" for _ in range(cols + 1)] for _ in range(rows + 1)] array[0][1:] = col_labels for r in range(1, rows + 1): array[r][0] = row_labels[r - 1] for r in range(1, rows + 1): for c in range(1, cols + 1): index = (c - 1) * rows + (r - 1) if index < length: array[r][c] = str(Ts[index]) col_widths = [max(len(str(value)) for value in column) for column in zip(*array)] for row in array: for value, width in zip(row, col_widths): print(f"{value:<{width}}", end=" ") print() # In[46]: # generate list of Ts from 100 onwards, increasing by a factor of 100 each time≥, 20 times Ts_eg = [1000 +100 * i for i in range(20)] # 1000, 1100, 1200, ..., 2900, 3000 This takes about 11 seconds to run properties_Ts(Ts_eg) # In[49]: # plot_grid_Em_manual(Ts=Ts_eg) # This takes about 10s to run # In[29]: Ts_experiment = [5000, 40000, 40000, 10000, 5000, 120000, 120000, 10000, 10000, 120000, 120000, 10000, 10000, 120000, 120000, 10000, 10000, 120000, 120000, 10000] # total periods considered in Ts_experiment is sum of elements in Ts_experiment # print(f"Total periods considered in Ts_experiment: {sum(Ts_experiment)}") # 1,130,000 plot_grid_Em_manual(Ts=Ts_experiment) # This assumes f = 0.5 # # Experiment with other Ps # plot_grid_Em_manual(Ts=Ts_experiment, Ps=[1, 50, 500, 10000]) # for some reason largest P was older one we passed of 2000, not 10000, this took 45m to run # Some headline results from the grid: # - The value of $M$ is, in expectation, consistently of the same magnitude as $E(w)$ for all $f\leq0.5$. [confirm] # - The value of $M$ (and of $E(w)$) explodes if the risk decays exponentially. # - As a corollary, an alternative version of ToP that had expontentially decaying risk after the high risk period would behave similarly. # # P makes a big difference in pretty much all the cases, think about why in exp decay difference between 50 and 500 is much bigger than 500 and 2000 # # Emphasise scale (e^7) orders of magnitude difference depending on growth paths # # Do comparison between base case within each row, how many orders of magnitude moving to a graph to the right buy. eg TOP 5 to 8 to 12 to 16 to 12 # # fixing col: those comparisons are within one order of magnitude difference, in a sense ToP isn't buying that much more value in constant [contrast to T?], the impact of how many orders of magnitude Top buys you depends on assumptions about growth. 4 orders of m in cubic # # Adding another filter keeps you in the same order of magnitude, only reduces you by 20% # # Discussion: one graph to another # # fix y axis if pulling out for discussion # # could show constant as good pedagogical example # # quadratic as more realistic alternative # # given these values # Risk: constant vs top # Risk: different filters # # Risk Exp decay to top # # 5 to 50 are most interesting persistences # 5 feels particularly realistic # # pick and choose your one... # # stretch exponential (Economically relevant?) # # Intuition: The more valueable we expect the world to be, the more it is worth saving it. There are no departures from that intuition. # # Organise these int 'lessons' wo explicitly saying lessons with bold titles. # # Do a table/grid but for E(M) # Lastly, for these grids, let us try with a very large persistence of $P=100,000$. This time, we will ommit the other $P$ values. After experimenting, we try with custom Ts for each subplot that let us observe convergence. # In[68]: Ts_largeP = [5000, 50000, 50000, 10000, 6000, 90000, 90000, 10000, 7000, 120000, 120000, 10000, 10000, 130000, 130000, 10000, 7000, 70000, 70000, 10000] properties_Ts(Ts_largeP) # In[70]: Ps_large = [100000] # plot_grid_Em_manual(Ts=Ts_largeP, Ps=Ps_large) # 1h 32m to run # Display the saved plot image display(Image(filename='E(M) large P.png')) # In[ ]: # %%skip # # This cell will be skipped during execution # print("This won't be executed") # To do in the future: It would be interesting to do a grid where we vary the number of filters. 1,2 we already have, it'd be good to see 3 and 5. # # Appendix # In[24]: # Grid of $E(M)$ but for different values of $f$ Ts_experiment = [5000, 40000, 40000, 10000, 5000, 120000, 120000, 10000, 10000, 120000, 120000, 10000, 10000, 120000, 120000, 10000, 10000, 120000, 120000, 10000] plot_grid_Em_manual(Ts=Ts_experiment, f = 0.0001) #101m to run # In[198]: # Zoom test eras vs top T=100000 middle_plot_mitigated_vs_unmitigated_Em_convergence(T, f=0.0001, r_type="top", v_type="cubic") plt.ylim([4.84e10 , 4.849e10]) # plt.xlim([90000, 100000]) plt.show() # # and under eras # T=100000 # middle_plot_mitigated_vs_unmitigated_Em_convergence(T, f=0.0001, r_type="eras", v_type="cubic") # plt.ylim([3.6e10, 4e10]) # plt.show() # ## Quick Check, script based on the concrete example from report # In[155]: # Write a script that evaluates the following equation \left( \sum ^{100}_{i=1} \left( 1-0.002229\right)^i + \left( 1-0.002229\right)^{100}\sum ^{9900}_{i=1}\left( 1-0.0001\right)^i \right) def evaluate_equation(r1,r2): sum1 = 0 for i in range(1, 101): sum1 += (1 - r1) ** i sum2 = 0 for i in range(1, 9901): sum2 += (1 - r2) ** i result = sum1 + (1 - r1) ** 100 * sum2 return result # Write a script that evaluates the following equation \sum ^{5}_{i=1} \beta ^i + \beta ^{5} \sum ^{95}_{i=1} \left( 1-0.002229\right)^i + \beta ^{5}\left( 1-0.002229\right)^{95}\sum ^{9900}_{i=1}\left( 1-0.0001\right)^i def evaluate_equation_M(r1,r2): sum1 = 0 beta=(1-r1/2) for i in range(1, 6): sum1 += beta ** i sum2 = 0 for i in range(1, 96): sum2 += (1 - r1) ** i sum3 = 0 for i in range(1, 9901): sum3 += (1 - r2) ** i result = sum1 + beta ** 5 * sum2 + beta ** 5 * (1 - r1) ** 95 * sum3 return result # In[163]: def example_check(): general = expected_value_vector_of_M_up_to_t(10000, r_type="eras", v_type="constant",f = 0.5, P = 5, eras_periods = [100, 10000 - 100], eras_risks = [0.002229, 0.0001])[-1] eval = evaluate_equation(0.002229, 0.0001) evalm = evaluate_equation_M(0.002229, 0.0001) direct = evalm - eval # Print the example with and without M print(f"When calculated directly E(w) = ",eval) print(f"When calculated directly E(w') = ",evalm) print(f"When calculated directly E(M) = ",direct) print("\n") # With our general method print(f"When calculated generally, E(M) = ",general) print("\n") # print difference equation of the two approaches print(f"The difference between in previous estimate is:" , general - direct) example_check() # tiny difference: 0.0000000000000004440892098500627 # ## Example constant value, persistence 5 # In[146]: def general_method(T, v_c, r_first_100, r_after_100): sum_r = 0 product_r = 1 for i in range(1, T + 1): sum_r += product_r * v_c if i <= 100: product_r *= (1 - r_first_100) else: product_r *= (1 - r_after_100) return sum_r def geometric_series_method(T, v_c, r_first_100, r_after_100): # First 100 years geometric series S_100 = (1 / (1 - (1 - r_first_100))) * (1 - (1 - r_first_100)**100) # After 100 years geometric series S_10000 = ((1 - r_first_100)**100 / (1 - (1 - r_after_100))) * (1 - (1 - r_after_100)**(T-100)) return v_c * (S_100 + S_10000) T = 10000 v_c = 1 r_first_100 = 0.00222894771 r_after_100 = 0.0001 print("General method:", general_method(T, v_c, r_first_100, r_after_100)) print("Geometric series method:", geometric_series_method(T, v_c, r_first_100, r_after_100)) # ## Table of $E(M)$ for different values and $P$ # In[169]: # A function just like middle_plot_mitigated_vs_unmitigated_Em_convergence but that outputs the final E(M) values for each scenario and for all values of P, instead of plotting them def calculate_final_Em(T, f=0.5, Ps=[1, 5, 50, 500, 2000], r_type="constant", v_type="constant", tolerance=1e-5, eras_risks=[0.0022289477, 0.0001, 0.0022289477, 0.0001]): eras_periods=[100, 500, 100, T - 700] # Initialize variables Ew_original = 0 prod_term_original = 1 Ew_list_original = [] prev_Ew_original = None Ew_mitigated = {P: 0 for P in Ps} Em = {P: 0 for P in Ps} prod_term_mitigated = {P: 1 for P in Ps} Ew_list_mitigated = {P: [] for P in Ps} prev_Ew_mitigated = {P: None for P in Ps} converged_flags = {P: False for P in Ps} # Flags to indicate if E(w) has converged for each P # Generate the value and risk vectors v = generate_value_vector(T+1, v_type=v_type) r_original = generate_risk_vector(T+1,r_type=r_type, eras_periods=eras_periods, eras_risks=eras_risks) for t in range(1, T + 1): # Update original E(w) values Ew_original, prod_term_original = calculate_Ew_incremental(Ew_original, prod_term_original, v[t-1:t], r_original[t-1:t]) # Update mitigated E(w) values for each P for P in Ps: r_mitigated = apply_mitigation(r_original, f, P) Ew_mitigated[P], prod_term_mitigated[P] = calculate_Ew_incremental(Ew_mitigated[P], prod_term_mitigated[P], v[t-1:t], r_mitigated[t-1:t]) # Store and check the E(w) Ew_list_original.append(Ew_original) for P in Ps: Ew_list_mitigated[P].append(Ew_mitigated[P]) if prev_Ew_mitigated[P] is not None and not converged_flags[P]: if has_converged(Ew_mitigated[P], prev_Ew_mitigated[P], tolerance): print(f"{r_type} r and {v_type} v converged at period {t}, E(w) with P={P} is {Ew_mitigated[P]:.6f}") converged_flags[P] = True prev_Ew_original = Ew_original for P in Ps: prev_Ew_mitigated[P] = Ew_mitigated[P] # calculate E(M) for each P for P in Ps: Em[P] = Ew_list_mitigated[P][-1] -Ew_list_original[-1] # Return the final E(M) values for each P return Em # In[170]: # Example usage T=1000 # calculate_final_Em(T, f=0.5, Ps=[1, 5, 50, 500, T], r_type="constant", v_type="constant") calculate_final_Em(T, f=0.5, Ps=[1, 5, 50, 500, 2000], r_type="constant", v_type="constant") # This example is interesting, quintupling persistence roughly increases value by 5 times. A 50-fold increase ($P$ of 1 vs 50) by 40 times and $P= 1$ to $P=500$ is 300 times more value. # In[196]: print(calculate_final_Em(10000, f=0.5, Ps=[1, 5, 50, 500, 1000, 2000, 5000], r_type="top", v_type="cubic")) print(calculate_final_Em(10000, f=0.5, Ps=[1, 5, 50, 500, 1000, 2000, 5000], r_type="eras", v_type="cubic")) # # print a dictionary with the difference between the two dictionaries # print({key: calculate_final_Em(10000, f=0.5, Ps=[1, 5, 50, 500, 2000], r_type="top", v_type="cubic")[key] - calculate_final_Em(10000, f=0.5, Ps=[1, 5, 50, 500, 2000], r_type="eras", v_type="cubic")[key] for key in calculate_final_Em(10000, f=0.5, Ps=[1, 5, 50, 500, 2000], r_type="top", v_type="cubic")}) # In[173]: 285267231.9386312-220245554.1461116 # In[26]: # Let us create a function that works exactly like plot_grid_Em_manual but it outputs the final E(M) values for each scenario and for all values of P, instead of plotting them def calculate_final_Ems(Ts=[1000], f = 0.5, tolerance=1e-5, Ps=[1, 5, 50, 500, 2000]): #2000 used to be T for Ps (which led to issues in loops) Ts = Ts + [Ts[-1]] * (20 - len(Ts)) # Pad Ts with the last value if the list is shorter than 20 # T = Ts[0] # We'll use this T value for the first row and column # # Eras parameters # eras_periods_here=[100, 500, 100, T - 700] eras_risks_here=[0.0022289477, 0.0001, 0.0022289477, 0.0001] # Initialize values, types, Ps and colors value_types = ["constant", "linear", "quadratic", "cubic", "logistic"] r_types = ["constant", "top", "eras", "exponential_decay"] Ps = Ps # We'll calculate E(M) for each of these values of P # Create a dictionary to store the final E(M) values for each scenario and for all values of P final_Ems = {v_type: {r_type: {P: None for P in Ps} for r_type in ["constant", "top", "eras", "exponential_decay"]} for v_type in ["constant", "linear", "quadratic", "cubic", "logistic"]} # print(final_Ems) for i, v_type in enumerate(value_types): for j, r_type in enumerate(r_types): # For the middle sub-plots, we'll use the T value from the Ts list T = Ts[j + 4 * i] # Update Ps and eras_periods_here according to the new T Ps = Ps eras_periods_here=[100, 500, 100, T - 700] # Add the final E(M) values for each P to the dictionary final_Ems[v_type][r_type] = calculate_final_Em(T, f=f, Ps=Ps, r_type=r_type, v_type=v_type, tolerance=tolerance, eras_periods=eras_periods_here, eras_risks=eras_risks_here) return final_Ems # In[80]: # Use it to calculate the final E(M) values for each scenario and for all values of P, as we saw in the grid Ts_experiment = [5000, 40000, 40000, 10000, 5000, 120000, 120000, 10000, 10000, 120000, 120000, 10000, 10000, 120000, 120000, 10000, 10000, 120000, 120000, 10000] # T=1e5 # later this is used for the grid with large P T=2000 calculate_final_Ems(Ts=Ts_experiment, tolerance=1e-5, Ps=[1, 5, 50, 500, T]) # as expected runtime is independednt of the T value in Ps # T=2000 and Ts_experiment had a runtime of 46m 41s # In[27]: # same again for f=0.0001 # Use it to calculate the final E(M) values for each scenario and for all values of P, as we saw in the appendix grid Ts_experiment = [5000, 40000, 40000, 10000, 5000, 120000, 120000, 10000, 10000, 120000, 120000, 10000, 10000, 120000, 120000, 10000, 10000, 120000, 120000, 10000] # T=1e5 # later this is used for the grid with large P T=2000 calculate_final_Ems(Ts=Ts_experiment, f=0.0001, tolerance=1e-5, Ps=[1, 5, 50, 500, T]) # as expected runtime is independednt of the T value in Ps # T=2000 and Ts_experiment with f =0.0001 had a runtime of 46m 41s # ## Varying Persistence: Plot value of mitigation for different values of P, constant value, constant risk # In[219]: # Plot value of mitigation for different values of P, constant value, constant risk def plot_value_of_mitigation_different_Ps(T, f=0.5, Ps=[1, 5, 50, 500, T], r_type="constant", v_type="constant", tolerance=1e-5, eras_risks=[0.0022289477, 0.0001, 0.0022289477, 0.0001], dont_show=False): # use calculate_final_T and plot those values against P Em = calculate_final_Em(T, f, Ps, r_type, v_type, tolerance, eras_risks) #used to have eras_periods plt.plot(Ps, Em.values()) plt.title(f"Value of Mitigation for {r_type} r & {v_type} v") plt.xlabel("P") plt.ylabel("E(M)") if not dont_show: plt.show() # In[28]: # Example usage T=1000 plot_value_of_mitigation_different_Ps(T, f=0.5, Ps=[1, 5, 10, 50, 100, 200, 400, 500, 600, 800, 900, T], r_type="constant", v_type="constant") # In[29]: # And for f= 1 bp T=1000 plot_value_of_mitigation_different_Ps(T, f=0.0001, Ps=[1, 5, 10, 50, 100, 200, 400, 500, 600, 800, 900, T], r_type="constant", v_type="constant") # Increasing persistence is important but it exhibits decreasing marginal returns in the concave fashion illustrated above. # # This result matches our intuitions. Because of its cumulative nature, the probability of avoiding extinction in the near-term is much higher than avoiding it long-term. That means that the contributions of $E(w)$, and thus of $E(M)$, are much higher in the short term than in the long term. So the marginal gains from increasing persistence are much higher in the short term than in the long term. In other words, for example, adding 1 year of persistence to a mitigation action that lasts 1 year is much more valuable than adding 1 year of persistence to a mitigation action that lasts 100 years. # # A general lesson follows: performing actions that have larger persistence is key, but increasing persistence is particularly valuable for low persistence values. # ### Investigate eras vs top. Given unexpected values in main results table. # In[220]: # compare top and eras under cubic T=30000 # set a Ps list with equally spaced values between 1 and T Ps_eg = np.linspace(1, T, 20) Ps_eg = [int(i) for i in Ps_eg] plot_value_of_mitigation_different_Ps(T, f=0.5, Ps=Ps_eg, r_type="top", v_type="cubic", dont_show=True) plot_value_of_mitigation_different_Ps(T, f=0.5, Ps=Ps_eg, r_type="eras", v_type="cubic", dont_show=True) plt.legend(["ToP", "Eras"]) plt.show() # until second filter arrives, it behaves as expectedly, then, for high enough P, it starts to behave differently, where Eras is reducing a higher risk by f than in ToP (where it stayed constant low and M is proportionally reducing a lower risk). Why is risk pessimism about second filter behaving differently, for high enough P, than risk pessimism about first filter? # In[223]: T=2000 # set a Ps list with equally spaced values between 1 and T Ps_eg = np.linspace(1, T, 20) Ps_eg = [int(i) for i in Ps_eg] plot_value_of_mitigation_different_Ps(T, f=0.5, Ps=Ps_eg, r_type="top", v_type="cubic", dont_show=True) plot_value_of_mitigation_different_Ps(T, f=0.5, Ps=Ps_eg, r_type="eras", v_type="cubic", dont_show=True) plt.legend(["ToP", "Eras"]) plt.show() # # # Until second filter arrives, it behaves as expectedly, then, for high enough P, it starts to behave differently, where Eras is reducing a higher risk by f than in ToP (where it stayed constant low and M is proportionally reducing a lower risk). Why is risk pessimism about second filter behaving differently, for high enough P, than risk pessimism about first filter? # # There are two effects: # # 1) higher risk means pr of survival longterm is lower so M's value integrated over time is lower # # 2) higher risk means f shaves off more when performed, since it's a % of the risk. # # On 2. Dave's work on discounting should then tell us that pessimism is slightly less bad for retaining astronomical value. # # Lesson if true: if there are interventions that could help with more than one filter (eg improving institutional decision making, or indeed if aligned AI is able to help with future filters), those interventions are particularly valueable, and they are a domain where _pessimism exhibits the opposite dynamic to what Thorstad observed: the more pessimism the larger the value in expectation of a mitigation effort._ # # To take stock, there's a sense of pessimism, the level of risk during the filter, where higher pessimism means mitigation actions have lower value in expectation. There we see no departure with respect to OAT. But there's another, where higher pessimism entails expecting more filters in humanity's future, and there pessimism exhibits the opposite dynamic to what Thorstad observed: the more pessimism the larger the value in expectation of a mitigation effort. # # # In[222]: T=100000 # set a Ps list with equally spaced values between 1 and T Ps_eg = np.linspace(1, T, 20) Ps_eg = [int(i) for i in Ps_eg] plot_value_of_mitigation_different_Ps(T, f=0.5, Ps=Ps_eg, r_type="top", v_type="cubic", dont_show=True) plot_value_of_mitigation_different_Ps(T, f=0.5, Ps=Ps_eg, r_type="eras", v_type="cubic", dont_show=True) plt.legend(["ToP", "Eras"]) plt.show() # 236m (3h 56m) to run # Asymptotically, we get the more familiar behaviour: ToP is more valuable than Eras, and, under that logic, the more pessimistic we are about the future, the less valuable mitigation is. The scope of the previous observaitons is a finite universe of specific duration (e.g. maximum of around 15,000 years above). # In[227]: T=18000 # set a Ps list with equally spaced values between 1 and T Ps_eg = np.linspace(1, T, 10) Ps_eg = [int(i) for i in Ps_eg] plot_value_of_mitigation_different_Ps(T, f=0.5, Ps=Ps_eg, r_type="top", v_type="cubic", dont_show=True) plot_value_of_mitigation_different_Ps(T, f=0.5, Ps=Ps_eg, r_type="eras", v_type="cubic", dont_show=True) plt.legend(["ToP", "Eras"]) plt.show() # ## Convergence in computational settings: nuances under exponential decay risk # ### The phenomenon # Let us return to `Ew_grid`, and focus on the exponential decay case. # In[35]: # Only do exponential decay row, with a 10 million cap. # Ew_grid(T_middle=10000000, r_types = ["exponential_decay"]) # runtime is about 40s # Ew_grid(T_middle=10000000, r_types = ["exponential_decay"]) # runtime is about 40s # Ew_grid(T_middle=100000, r_types = ["exponential_decay"]) #, r_infty=0.001) # runtime is about 18.6s but with settings overriding linear quadratic cubic Ts Ew_grid(T_middle=165000, r_types = ["exponential_decay"]) #, r_infty=0.001) # runtime is about 1.2s # Observe how, under cubic value, for a high enough $T$, we appear to lack convergence. Let us try a much larger $T$, where this effect can be seen for linear and quadratic value as well. # In[70]: Ew_grid(T_middle=10000000, r_types = ["exponential_decay"]) # (we are trying T=10 million) runtime is 49.2s # ### The first factor # Why is this happening? To explore this next, we need to look at the probability of extinction, on its own, without thinking about value growth. We will do this by plotting the probability of survival until time $t$. # In[47]: # write a function that for each r_type plots the cummulative probability of extinction against T without mitigation def plot_probability_of_survival_without_mitigation(T, r_type="constant", r_high=0.0022289477, r_low=0.0001, eras_periods=None, eras_risks=None, r_0=0.0022289477, lambda_val=-0.0001, r_infty=1e-9): # generate the risk vector r = generate_risk_vector(T+1, r_type=r_type, r_high=r_high, r_low=r_low, eras_periods=eras_periods, eras_risks=eras_risks, r_0=r_0, lambda_val=lambda_val, r_infty=r_infty) #generate value vector under cubic for comparison v = generate_value_vector(T+1, v_type="cubic") # initialize a list to store the cummulative probability of survival prob_list = [] prob_times_v_list = [] prob = 1 for i in range(T+1): # print prob value every 0.1*T of the way through if i % (T/10) == 0: print(f"Probability of survival until t = {i} is {prob}. Here r_t = {r[i]}, v_t = {v[i]} and prob*v_t={prob*v[i]}.") # store each value of prob in a list prob_list.append(prob) # store rv prob_times_v_list.append(prob*v[i]) # update prob prob *= (1 - r[i]) # plot the list against T plt.plot(prob_list) plt.title(f"Probability of survival until t ('prob') under {r_type} r") plt.xlabel("t") plt.ylabel("Probability") plt.show() # plot rv against T plt.plot(prob_times_v_list) plt.title(f"prob*v_t under {r_type} r") plt.xlabel("t") plt.ylabel("prob*v_t ") plt.show() # In[48]: # Example with r_infty = 1e-9 T=300000 plot_probability_of_survival_without_mitigation(T, r_type="exponential_decay", r_high=0.0022289477, r_low=0.0001, eras_periods=None, eras_risks=None, r_0=0.0022289477, lambda_val=-0.0001, r_infty=1e-9) # **Graphs**: # - The first graph represents the probability of survival over time. As expected, due to the exponential decay in risk, the survival probability drops sharply initially and then plateaus as time goes on. It never reaches zero, but it gets very close to it. # - The second graph is a plot of the product [probability of survival] times value for each $t$. This product represents the expected contribution to the series $E(w)$ at each time $t$. We see that this value spikes initially (indicating the high initial risk and high value) and then gradually diminishes over time. Eventually it starts slowly increasing again, just like the graphs below 'The phenomenon' section. # # In[120]: # Example for larger T T=int(1e7) plot_probability_of_survival_without_mitigation(T, r_type="exponential_decay", r_high=0.0022289477, r_low=0.0001, eras_periods=None, eras_risks=None, r_0=0.0022289477, lambda_val=-0.0001, r_infty=1e-9) # In[136]: # Example with r_inf = 1e-3 T=50000000 plot_probability_of_survival_without_mitigation(T, r_type="exponential_decay", r_high=0.0022289477, r_low=0.0001, eras_periods=None, eras_risks=None, r_0=0.0022289477, lambda_val=-0.0001, r_infty=1e-3) # It looks like we're hitting the smallest non-zero number representable by python. Given enough time, the value of the world -- when it isn't capped like with constant and logistic -- will be large enough so that the it outweighs this tiny number and $E(w)$ starts to explode. With usual 64-bit floating-point numbers, the normalized min is approximately 2.2e-308. We can locally check in the system which one it is. See below. # In[53]: import sys sys.float_info # This phenomena doesn't just happen with exponential decay, it also occurs with the others, eg constant below. But there, the value never outweights this probabilty of survival. # In[39]: # Example with constant T=500000 plot_probability_of_survival_without_mitigation(T, r_type="constant", r_high=0.0022289477, r_low=0.0001, eras_periods=None, eras_risks=None, # plot_probability_of_survival_without_mitigation(T, r_type="constant", r_high=0.0022289477, r_low=0.0001, eras_periods=None, eras_risks=None, r_0=0.0022289477, lambda_val=-0.0001, r_infty=1e-9) # observe how at T = 350000, the probability stays at 1.107e-321 and never goes lower than this, as it should do # In[40]: # Example with constant risk=r_high=1e-6 T=50000000 plot_probability_of_survival_without_mitigation(T, r_type="constant", r_high=1e-6, lambda_val=-0.0001, r_infty=1e-9) # 20s runtime # observe how a lower constant risk (r_high) makes an important difference on this, the convergence is much slower and we haven't hit the smallest representable number yet # ### The second factor # The second factor is that, asymptotically, exponentially decreasing probability (which is a constant case that bounds the exponential decay case, see report for more detials) beats polynomial value growth every time. To develop an intuition for this, we should think about how in polynomial growth, eg cubic, while the percentage increase in value from, say, $t=2$ to $t=3$ (i.e. $2^3$ to $3^3$) is massive, the percentage increase from $1,000,000^3$ to $1,000,001^3$ is tiny. Overall, the effect is that the probability of survival is asymptotically shrinking by a constant proportional decrease (given by `r_infty`), whereas the polynomial value is asymptotically growing by a shrinking proportion. We can see this illustrated in the plots below. # What is the marginal percentage increase for each value of $t$ for constant, linear, quadratic, cubic and logistic vlaue? Let us show this in a grid of plots. # In[51]: # first something to control the number of significant figures import matplotlib.ticker as ticker # supress some limit warnings below import warnings def format_ticks(val, pos): return f'{val:.3g}' # Change '.3g' to control the number of significant figures # In[64]: def plot_grid_mar_value_increase(T, v_types=["constant", "linear", "quadratic", "cubic", "logistic"], last_1000=False, manual_ylim=False): # initialize the grid of plots fig, axes = plt.subplots(1, len(v_types), figsize=(20, 4)) # for each v_type, plot the marginal value increase for i, v_type in enumerate(v_types): # generate the value vector v = generate_value_vector(T+2, v_type=v_type) # initialize a list to store the marginal value increase mar_value_increase_list = [] for t in range(T+1): # store each value of in a list mar_value_increase_list.append((100*(v[t+1]-v[t]))/v[t]) # plot the list against T axes[i].plot(mar_value_increase_list) axes[i].set_title(f"Marginal value % increase for {v_type} v") axes[i].set_xlabel("t") axes[i].set_ylabel("100 (v_{t+1} - v_t) / v_t") axes[i].set_ylabel("new value - old over old") # for i, ax in enumerate(axes): # ax.yaxis.set_major_formatter(ticker.FuncFormatter(format_ticks)) if manual_ylim: axes[i].set_ylim(-0.001, 0.02) # supress some limit warnings below with warnings.catch_warnings(): warnings.simplefilter("ignore") # if options are set to True, only display the last 1000 periods if last_1000: # now set the range to display only last 1000 ts axes[i].set_xlim(T-1000, T) # Adjusted to right range # and adjust the ylim to be so that these are legible axes[i].set_ylim(min(mar_value_increase_list[T-1000:T]), max(mar_value_increase_list[T-1000:T])) # Specify ticks manually # axes[i].set_xticks([T-1000, T-800, T-600, T-400, T-200, T]) #overlaps for large T # axes[i].set_xticklabels([T-1000, T-800, T-600, T-400, T-200, T]) #overlaps for large T axes[i].set_xticks([T-1000, T-500, T]) axes[i].set_xticklabels([T-1000, T-500, T]) print(f"T = {T}") plt.tight_layout() plt.show() # In[65]: # Examples with increasing T plot_grid_mar_value_increase(1000, v_types=["constant", "linear", "quadratic", "cubic", "logistic"]) plot_grid_mar_value_increase(2000, v_types=["constant", "linear", "quadratic", "cubic", "logistic"]) plot_grid_mar_value_increase(50000, v_types=["constant", "linear", "quadratic", "cubic", "logistic"], manual_ylim=True) plot_grid_mar_value_increase(100000, v_types=["constant", "linear", "quadratic", "cubic", "logistic"], last_1000=True) # 2s runtime plot_grid_mar_value_increase(int(1e+6), v_types=["constant", "linear", "quadratic", "cubic", "logistic"], last_1000=True) # In[66]: # Just to emphasise our point we can do a dramatic example plot_grid_mar_value_increase(100000000, v_types=["constant", "linear", "quadratic", "cubic", "logistic"], last_1000=True) # 3m runtime # From the above we can observe that the marginal value increase for cubic has reached the $3\times10^-6$ mark after 100 million years. Eventually it'll go well below `r_infty` even when it is a tiny number like $1\times10^-9$. This is why the probability of survival will eventually be asymptotically decreasing faster than the value is asymptotically increasing, so $E(w)$ will eventually converge. # ## Closed Form Expressions # ### Constant Risk under Polynomial Value # Suppose that the risk is constant. Then, the expected value of the world is given by the following formula. # # --- # **Case: Constant** # For $ n = 0 $: # $ v_c \times \frac{1-r_c}{r_c} $ # # --- # # **Case: Linear** # For $ n = 1 $: # $ v_c \times \frac{1-r_c}{r_c^2} $ # # --- # # **Case: Quadratic** # For $ n = 2 $: # $ v_c \times \frac{(1-r_c)(2-r_c)}{r_c^3} $ # # --- # # **Case: Cubic** # For $ n = 3 $: # $ v_c \times \frac{(1-r_c)(1+4(1-r_c)+(1-r_c)^2)}{r_c^4} $ # # --- # We now define `asymptotic_Ew_constant_risk` function to compute the value for different cases. # In[51]: def asymptotic_Ew_constant_risk(v_type, v_c=1, r_c=0.0022289477): """ Computes the value based on n, v_c, and r_c. Parameters: - n (int): The order of the polynomial. (0, 1, 2, or 3) - v_c (float): Constant multiplier. - r_c (float): Risk factor, should be between 0 and 1. Returns: - float: The computed value. """ if v_type == "constant": return v_c * (1-r_c) / r_c elif v_type == "linear": return v_c * (1-r_c) / (r_c**2) elif v_type == "quadratic": return v_c * (1-r_c) * (2-r_c) / (r_c**3) elif v_type == "cubic": return v_c * (1-r_c) * (1 + 4*(1-r_c) + (1-r_c)**2) / (r_c**4) else: raise ValueError("Unsupported value for n. Value should be polynomial.") # In[86]: # what is the asymptotic value of E(w) for each v_type when r_c = 0.0022289477? print(f"{'v_type':<10} {'E(w)':<10}") # :<10 means left align with 10 spaces for v_type in ["constant", "linear", "quadratic", "cubic"]: print(f"{v_type:<10} {asymptotic_Ew_constant_risk(v_type, v_c=1, r_c=0.0022289477):.3e}") # 3 significant figures # All but constant are all considerably higher than in the main $E(w)$ grid because, here, we have not made any year adjustements for the closed form (i.e. `alpha = 1` implicitly and, for example, under cubic, the value in 3 years is $3^3$ here). Recall that in the grid we used `v = [(time_adjustment(i+1,alpha))**n for i in range(T)]` instead, where # ``` # def time_adjustment(input, alpha): # return alpha * input + (1 - alpha) # #alpha is simply 1/units_in_century # ``` # In[75]: # using this we plot the asymptotic E(w) for each v_type, for a range of r_c values def plot_asymptotic_Ew_constant_risk(v_types=["constant", "linear", "quadratic", "cubic", "logistic"], r_c_range=[0.0001, 0.0022289477], num_points=1000): v_c = 1 # we investigate r_c in the range of values between r_c_range[0] and r_c_range[1] r_c_values = np.linspace(r_c_range[0], r_c_range[1], num_points) # initialize the grid of plots fig, axes = plt.subplots(1, len(v_types), figsize=(20, 4)) # for each v_type, plot the asymptotic E(w) for i, v_type in enumerate(v_types): # initialize a list to store the asymptotic E(w) asymptotic_Ew_list = [] for r_c in r_c_values: # store each value of in a list asymptotic_Ew_list.append(asymptotic_Ew_constant_risk(v_type, v_c, r_c)) # plot the list against r_c axes[i].plot(r_c_values, asymptotic_Ew_list) axes[i].set_title(f"Asymptotic E(w) for {v_type} v") axes[i].set_xlabel("r_c") axes[i].set_ylabel("E(w)") plt.tight_layout() plt.show() # In[58]: # Useful to have the E(w) under constant risk and value when risk is tiny r_infty = 1e-9 print(f"{'v_type':<10} {'E(w)':<10}") # :<10 means left align with 10 spaces for v_type in ["constant"]: print(f"{v_type:<10} {asymptotic_Ew_constant_risk(v_type, v_c=1, r_c=1e-9):.3e}") # 3 significant figures # In[79]: plot_asymptotic_Ew_constant_risk(v_types=["constant", "linear", "quadratic", "cubic"]) plot_asymptotic_Ew_constant_risk(v_types=["constant", "linear", "quadratic", "cubic"], r_c_range=[0.0022289477+0.0005, 0.0022289477-0.0005]) plot_asymptotic_Ew_constant_risk(v_types=["constant", "linear", "quadratic", "cubic"], r_c_range=[0.0001, 0.0004]) plot_asymptotic_Ew_constant_risk(v_types=["constant", "linear", "quadratic", "cubic"], r_c_range=[1e-9, 1e-8]) # ### With time adjustments # # Given a constant risk $r_c$ and a multiplicative factor $\alpha$, we explore different value functions $v_i$ and compute the expected value $\mathbb{E}[w]$ under these. # # ##### Constant Value Function # For the constant value function: # $$ # \mathbb{E}(w) = v_c \frac{(1-r_c)}{r_c} # $$ # # ##### Linear Value Function # For the linear value function: # $$ # \mathbb{E}(w) = v_c \left[ \alpha \frac{(1-r_c)}{{r_c}^2} + (1-\alpha) \frac{(1-r_c)}{r_c} \right] # $$ # # ##### Quadratic Value Function # For the quadratic value function: # $$ # \mathbb{E}(w) = v_c \left[ \alpha^2 \frac{(1-r_c)(2-r_c)}{r_c^3} + 2\alpha(1-\alpha) \frac{(1-r_c)}{r_c^2} + (1-\alpha)^2 \frac{(1-r_c)}{r_c} \right] # $$ # # ##### Cubic Value Function # For the cubic value function: # $$ # \mathbb{E}(w) = v_c \left[ \alpha^3 \frac{(1-r_c)(1+4(1-r_c)+(1-r_c)^2)}{r_c^4} + 3\alpha^2(1-\alpha) \frac{(1-r_c)(2-r_c)}{r_c^3} + 3\alpha(1-\alpha)^2 \frac{(1-r_c)}{r_c^2} + (1-\alpha)^3 \frac{(1-r_c)}{r_c} \right] # $$ # # In[111]: def Ew_constant_risk_adjusted(v_c=1, r_c=0.0022289477, alpha=0.01, v_type="constant"): """Calculate expected value based on the type of value function.""" if v_type == "constant": return v_c * (1 - r_c) / r_c elif v_type == "linear": return v_c * (alpha * (1 - r_c) / r_c**2 + (1 - alpha) * (1 - r_c) / r_c) elif v_type == "quadratic": term1 = alpha**2 * (1 - r_c) * (2 - r_c) / r_c**3 term2 = 2 * alpha * (1 - alpha) * (1 - r_c) / r_c**2 term3 = (1 - alpha)**2 * (1 - r_c) / r_c return v_c * (term1 + term2 + term3) elif v_type == "cubic": term1 = alpha**3 * (1 - r_c) * (1 + 4 * (1 - r_c) + (1 - r_c)**2) / r_c**4 term2 = 3 * alpha**2 * (1 - alpha) * (1 - r_c) * (2 - r_c) / r_c**3 term3 = 3 * alpha * (1 - alpha)**2 * (1 - r_c) / r_c**2 term4 = (1 - alpha)**3 * (1 - r_c) / r_c return v_c * (term1 + term2 + term3 + term4) else: raise ValueError("Unsupported value type provided. Choose from: constant, linear, quadratic, cubic.") # In[201]: # make a table with all the values of E(w) for each v_type print(f"{'v_type':<10} {'E(w)':<10}") # :<10 means left align with 10 spaces for v_type in ["constant", "linear", "quadratic", "cubic"]: print(f"{v_type:<10} {Ew_constant_risk_adjusted(v_type=v_type):.3e}") # 3 significant figures # ## More cases of long-run behaviour under exponentail decay risk # In[23]: def eg_quicker_ew_large_Ts(T, vtype="cubic", rtype="exponential_decay", r_infty=1e-9): # Initial setup step = 1 # what we're recording for our graph v_constant = generate_value_vector(T+1, v_type=vtype) r_top = generate_risk_vector(T+1, r_type=rtype, r_infty=r_infty) # Initialize variables Ew = 0 prod_term = 1 Ew_list = [] # Start the timer start_time = time.time() # Loop through each period, but only store the E(w) every 'step' periods for t in range(1, T + 1): Ew, prod_term = calculate_Ew_incremental(Ew, prod_term, v_constant[t-1:t], r_top[t-1:t]) # If the current period is a multiple of 'step', then store the E(w) if t % step == 0: Ew_list.append(Ew) # Print the run time up until this point # elapsed_time = time.time() - start_time # print(f"Elapsed time after {t} periods: {elapsed_time:.2f} seconds") # Plot plt.figure(figsize=(10, 6)) plt.plot(range(step, T + 1, step), Ew_list, color='b') plt.title(f"E(w) for Exponential Decay Risk & {vtype} Value") # fix title if rtype isnt exponential decay if rtype != "exponential_decay": plt.title(f"E(w) for {rtype} Risk & {vtype} Value") plt.xlabel("Year") plt.ylabel("E(w)") plt.show() # In[24]: eg_quicker_ew_large_Ts(T=int(1e5)) # 0.1s runtime # eg_quicker_ew_large_Ts(T=int(1e6)) # 0.9s runtime # eg_quicker_ew_large_Ts(T=int(1e7)) # 9s runtime # eg_quicker_ew_large_Ts(T=int(1e8)) # 2m runtime # final test with T=1e9 and smaller r_infty eg_quicker_ew_large_Ts(T=int(0.5e9), r_infty=1e-7) # 17/2 m runtime approx # In[29]: eg_quicker_ew_large_Ts(T=int(1e5), r_infty=1e-7) # 17/2 m runtime approx eg_quicker_ew_large_Ts(T=int(5e5), r_infty=1e-7) # 17/2 m runtime approx # In[63]: # and same tests again but value is linear # run times are same as above, depedn only on T # eg_quicker_ew_large_Ts(T=int(1e5), vtype="linear") # eg_quicker_ew_large_Ts(T=int(1e6), vtype="linear") # eg_quicker_ew_large_Ts(T=int(1e6), vtype="linear", r_infty=1e-7) # eg_quicker_ew_large_Ts(T=int(1e7), vtype="linear") # eg_quicker_ew_large_Ts(T=int(1e8), vtype="linear") # again but with a bigger r_infty eg_quicker_ew_large_Ts(T=int(1e6), vtype="linear", r_infty=1e-7) eg_quicker_ew_large_Ts(T=int(0.2e8), vtype="linear", r_infty=1e-7) eg_quicker_ew_large_Ts(T=int(0.5e8), vtype="linear", r_infty=1e-7) eg_quicker_ew_large_Ts(T=int(1e8), vtype="linear", r_infty=1e-7) # This perfectly illustrates, using a higher `r_infty`, how $E(w)$ momentarily converges, then shoots up, only to eventually converge again when the proportionally shrinking survival probability beats the proportionaly increase in value. # One meaningful thing here is that, all of a sudden, the **choice of exactly how big $T$ is, really matters**. # - Above, if $T \in (10^4, 10^6)$, then $E(w) \approx 2750$, whereas if $T>0.6\times 10^8$, then $E(w) \approx 3000$. # - Suppose, for exmaple, that we believed that our dying sun was an inescapable filter. It's estimated that the Earth will become uninhabitable due to the increasing luminosity of the Sun long before it becomes a red giant - possibly in about 1 to 1.5 billion years from now. With this in mind, we might think that $T=1,000,000,000$ is a reasonable choice. In this casee this would provide a value cap for low enough `r_infty` under exponential decay. # Lastly, we look at ther role of `r_infty` for exponential decay risk under cubic value, having fixed lambda at -0.0001. # In[32]: # write a function that plots Ew versus r_infty for a given T under exponential decay for some value trajectory def plot_Ew_vs_r_infty(T, v_type="cubic", r_type="exponential_decay", r_infty_range=[1e-9, 1e-3], num_points=10): # create a list of r_infty values r_infty_values = np.linspace(r_infty_range[0], r_infty_range[1], num_points) print(r_infty_values) # initialise v v = generate_value_vector(T+1, v_type=v_type) # initialize a list to store the E(w) values Ew_list = [] # for each r_infty value, calculate the E(w) and store it in the list for r_infty in r_infty_values: r = generate_risk_vector(T+1, r_type=r_type, r_infty=r_infty) Ew = 0 prod_term = 1 for t in range(1, T + 1): Ew, prod_term = calculate_Ew_incremental(Ew, prod_term, v[t-1:t], r[t-1:t]) Ew_list.append(Ew) # plot the list against r_infty plt.plot(r_infty_values, Ew_list) plt.title(f"E(w) for {v_type} v & {r_type} r") plt.xlabel("r_infty") plt.ylabel("E(w)") plt.show() # In[33]: plot_Ew_vs_r_infty(int(1e4), v_type="cubic", r_type="exponential_decay", r_infty_range=[1e-9, 1e-3], num_points=10) plot_Ew_vs_r_infty(int(1e5), v_type="cubic", r_type="exponential_decay", r_infty_range=[1e-9, 1e-3], num_points=10) plot_Ew_vs_r_infty(int(1e6), v_type="cubic", r_type="exponential_decay", r_infty_range=[1e-9, 1e-3], num_points=10) # ## Many filters # Recall # # ``` # def generate_risk_vector_more_filters(T, r_type="constant", r_high=0.0022289477, r_low=0.0001, eras_periods=None, eras_risks=None, # r_0=0.0022289477, lambda_val=-0.0001, r_infty=1e-9, custom_r=None): # ... # elif r_type == "eras": # if eras_periods is None or eras_risks is None: # raise ValueError("For 'eras' type, both eras_periods and eras_risks must be provided.") # r = [] # for i, r_val in zip(eras_periods, eras_risks): # r.extend([r_val] * i) # ... # return r # ``` # # so far we've been working with at most 2 filters, modelled by # # ``` # eras_periods = [100, 500, 100, T - 700] # # eras_risks = [0.0022289477, 0.0001, 0.0022289477, 0.0001] # ``` # In[36]: # Next, create a function that takes in the number of filters F and outputs eras_periods and eras_risks for the eras risk vector. # For the eras_risks it alternates between the two values of r_high and r_low, and # for the eras_periods it alternates between the two values of ell_danger and ell_safety (for in high risk era duration and low risk era duration respectively). # The last era's duration should be T - (F)*ell_danger - (F-1)*ell_safety def generate_filter_eras(T, F, r_high, r_low, ell_danger, ell_safety): # initialize lists to store the eras periods and risks eras_periods = [] eras_risks = [] # for each filter, append the risks levels and era durations to the lists for i in range(F): eras_risks.append(r_high) eras_risks.append(r_low) for i in range(F-1): eras_periods.append(ell_danger) eras_periods.append(ell_safety) # append the last two eras' duration to the periods list eras_periods.append(ell_danger) eras_periods.append(T - (F)*ell_danger - (F-1)*ell_safety) # return the lists return eras_periods, eras_risks # In[48]: # Example usage generate_filter_eras(5000, 2, 0.0022289477, 0.0001, 100, 500) # In[49]: # one more example with 5 filters print(generate_filter_eras(10000, 5, 0.0022289477, 0.0001, 100, 500)) # some tests print("sum duration of all eras should be T") print(sum(generate_filter_eras(10000, 5, 0.0022289477, 0.0001, 100, 500)[0])) #== 10000 print("there should be 2*F eras") print(len(generate_filter_eras(10000, 5, 0.0022289477, 0.0001, 100, 500)[1])) #== 10 print("there should be F eras with risk r_high and F eras with risk r_low") print(sum(generate_filter_eras(10000, 5, 0.0022289477, 0.0001, 100, 500)[1])) print(5*0.0022289477 + 5*0.0001) # In[137]: # write the exact same function as before but plot alongside it how risk varies over time. the risk is different for each value, plotted in a different colour dotted line def plot_Ew_vs_F_and_risk(T, v_type="cubic", r_high=0.0022289477, r_low=0.0001, ell_danger=100, ell_safety=500, F_min=1, F_max=10, num_points=10, zoomed=False, zoom_small_value=False): # put the plots side by side if zoomed: fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 8)) else: fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 5)) # create a list of F values F_values = np.linspace(F_min, F_max, num_points) # initialise v v = generate_value_vector(T+1, v_type=v_type) # initialize a list to store the E(w) values Ew_list = [] # for each F value, calculate the E(w) and store it in the list for F in F_values: eras_periods, eras_risks = generate_filter_eras(T, int(F), r_high, r_low, ell_danger, ell_safety) r = generate_risk_vector(T+1, r_type="eras", eras_periods=eras_periods, eras_risks=eras_risks) Ew = 0 prod_term = 1 for t in range(1, T + 1): Ew, prod_term = calculate_Ew_incremental(Ew, prod_term, v[t-1:t], r[t-1:t]) Ew_list.append(Ew) # plot the list against F ax1.plot(F_values, Ew_list) ax1.set_title(f"E(w) for {v_type} value with varying filters F") ax1.set_xlabel("F") ax1.set_ylabel("E(w)") if zoom_small_value: ax1.set_ylim(0, (1e11)/F_max) # plot the risk vector against T ax2.plot(r, color="green") ax2.set_title(f" Risk profile under {F_max} Great Filters") ax2.set_xlabel("t") ax2.set_ylabel("r") ax2.set_xlim(0, F_max*600+4000) if zoomed: # plot the risk vector again but zoomed in ax3.plot(r, color="green") ax3.set_title(f"Zoomed out: Risk profile under {F_max} Great Filters") ax3.set_xlabel("t") ax3.set_ylabel("r") plt.show() # In[132]: # Example usage plot_Ew_vs_F_and_risk(100000, v_type="cubic", r_high=0.0022289477, r_low=0.0001, ell_danger=100, ell_safety=500, F_min=1, F_max=10, num_points=10) # In[135]: plot_Ew_vs_F_and_risk(100000, v_type="cubic", r_high=0.0022289477, r_low=0.0001, ell_danger=100, ell_safety=500, F_min=1, F_max=100, num_points=10) # In[139]: plot_Ew_vs_F_and_risk(100000, v_type="cubic", r_high=0.0022289477, r_low=0.0001, ell_danger=100, ell_safety=500, F_min=1, F_max=100, num_points=50, zoom_small_value=True) # In[140]: # If we compare the value with 1 vs 100 filters, roughly, their ratio is: 5e10/0.1e9 # The first plot exhibits convex behaviour: as number of filters increases, $E(w)$ decreases by less and less. In particular $E(w)$ appears to converge in the example above (to 500 times less value than under Time of Perils where $F=1$). Filters in the future matter less and less since it is more and more unlikely we'll reach them.