#!/usr/bin/env python # coding: utf-8 # Open In Colab # # Value at Risk, VaR # In this code, using Geometric Brownian Motion, simulate returns and create distribution. And then find the VaR. # # ###VaR # # Value at risk (VaR): Loss that will be incurred in the event of an extreme adverse price change with some given, usually low, probability. i.e., the worst-case outcome. For example, 1% VaR means that 99% of returns will exceed the VaR and 1% of returns will be worse. # # Assuming that portfolio returns are normally distributed, the VaR is fully determined by the mean and standard deviation of the distribution. # # For example, VaR(1%, normal) = Mean - 2.33SD # # To obtain a sample estimate of 1% VaR, we sort the observations from high to low. The VaR is the return at the 1st percentile of the sample distribution. # # For example, with 95% confidence, we expect that our worst daily loss will not exceed 4%. If we invest 100 dollars, we are 95% confident that our worst daily loss will not exceed 4 dollars (100 dollars x -4%). # # # |Confidence Level |Two sided CV |One sided CV | # |---|---|---| # | 90% |1.64 |1.28 | # | 95% |1.96 |1.64 | # | 99% |2.58 |2.33 | # In[49]: get_ipython().system('pip install pandas-datareader') get_ipython().system('pip install --upgrade pandas-datareader') get_ipython().system('pip install yfinance') #Diable the warnings #import warnings #warnings.filterwarnings('ignore') # In[4]: import math import numpy as np import numpy.random as npr import pandas as pd import pandas_datareader as pdr import yfinance as yf import scipy as sp from scipy import stats from pylab import plt, mpl plt.style.use('fivethirtyeight') #plt.style.use('seaborn') mpl.rcParams['font.family'] = 'DejaVu Sans' pd.set_option('precision', 3) pd.set_option('display.max_colwidth', 100) get_ipython().run_line_magic('matplotlib', 'inline') # In[5]: print("At 95% confidence level {:.2f}".format(sp.stats.norm.ppf(1-.10/2))) print("At 97.5% confidence level {:.2f}".format(sp.stats.norm.ppf(1-.05/2))) print("At 90% confidence level {:.2f}".format(sp.stats.norm.ppf(1-.10))) print("At 99% confidence level {:.2f}".format(sp.stats.norm.ppf(1-.01))) # In[6]: def get_prices(tickers, freq_p, st_day, end_day): mystock = pd.DataFrame() for t in tickers: mystock[t] = yf.download(t, start=st_day, end=end_day, interval=freq_p)['Adj Close'] return mystock # In[16]: # for short time horizons er will be small, and therefore VaR estimations # will not be much influenced by it # assuming that portfolio is normally distributed, def var_calc(CL, days, p_val, vol, t): VaR = p_val * vol * np.sqrt(t/days) * sp.stats.norm.ppf(CL) percent_loss = -VaR/p_val *100 print("Assuming that we invest {:.2f}, for the next {:.1f} trading days".format(p_val, days)) print("At {:.3f} confidence level, loss will not exceed {:,.2f}".format(CL, VaR)) print("This represents a move of {:.2f} standard deviations below the expected return,\ or a loss of {:.2f}%.".format(sp.stats.norm.ppf(CL), percent_loss)) return # In[14]: tic=['SPY', 'TLT', 'TSLA', 'AAPL', 'VNQ', 'BAC', 'WMT', 'AMD', 'JNJ', 'GM', 'CMG', 'SHV'] prices= get_prices(tic, freq_p='1wk', st_day="2011-01-01", end_day="2022-05-31") # id , 1wk, 1mo prices.info() prices.tail(3) # In[17]: for t in tic: confidence_l = .95 annual_volatility = np.std(prices[t].pct_change()*np.sqrt(252)) most_recent_p = prices[t][-1] holding_period = 252 frequency = 21 print("Historical annual volatility of {} = {:.4f}".format(t, annual_volatility)) var_calc(confidence_l, holding_period, most_recent_p, annual_volatility, frequency) print(50 * "-") # Monte Carolo approach using stochastic geometric Borwnian motion, # In[18]: S0 = prices['AAPL'][-1] print(S0) r = 0.05 sigma = np.std(prices['AAPL'].pct_change()*np.sqrt(252)) T = 21.0 I = 10000 ST1 = S0 * np.exp((r - 0.5 * sigma ** 2) * T + sigma * math.sqrt(T) * np.random.standard_normal(I)) ST2 = S0 * np.random.lognormal((r - 0.5 * sigma ** 2) * T, sigma * math.sqrt(T), size=I) # In[19]: def print_statistics(a1, a2): ''' Parameters ========== a1, a2: ndarray objects results objects from simulation ''' sta1 = sp.stats.describe(a1) sta2 = sp.stats.describe(a2) print('%14s %14s %14s' % ('statistic', 'data set 1', 'data set 2')) print(45 * "-") print('%14s %14.3f %14.3f' % ('size', sta1[0], sta2[0])) print('%14s %14.3f %14.3f' % ('min', sta1[1][0], sta2[1][0])) print('%14s %14.3f %14.3f' % ('max', sta1[1][1], sta2[1][1])) print('%14s %14.3f %14.3f' % ('mean', sta1[2], sta2[2])) print('%14s %14.3f %14.3f' % ('std', np.sqrt(sta1[3]), np.sqrt(sta2[3]))) print('%14s %14.3f %14.3f' % ('skew', sta1[4], sta2[4])) print('%14s %14.3f %14.3f' % ('kurtosis', sta1[5], sta2[5])) # In[20]: print_statistics(ST1, ST2) # In[ ]: # ### Let's assume that we buy 5000 shares of AAPL. And using historical returns, let's estimate mean and standard deviation of returns on AAPL. In order to save time, let's assume they are 19% and 30.7%, respectively. # In[21]: aapl = 1 aapl_price = pdr.get_quote_yahoo('AAPL')['price'] # the most recent price aapl_value = aapl * aapl_price aapl_value = aapl_value.at['AAPL'] t = 21/252 mu = .05 volatility = .307 iterations = 10000 # In[22]: type(aapl_price) # In[23]: aapl_price.describe() # In[24]: # checking to see what get_quote_yahoo is retreveing pdr.get_quote_yahoo('AAPL') # In[25]: type(aapl_price) # In[26]: type(aapl_value) # In[27]: aapl_price # In[28]: # aapl_value = # of shares times price aapl_value # Below, let's simulate future value from Geometric Brownian Motion series. # In[29]: def VaR(pv, mu, vol, T, iterations): end = pv * np.exp((mu - .5 * vol ** 2) * T + vol * np.sqrt(T) * np.random.standard_normal(iterations)) ending_values = end - pv return ending_values # In[30]: at_risk = VaR(aapl_value, mu, volatility, t, iterations) at_risk # In[31]: type(at_risk) # In[32]: np.shape(at_risk) # In[33]: np.ndim(at_risk) # In[34]: at_risk.mean() # In[35]: at_risk.std() # In[36]: plt.hist(at_risk,bins=100) # In[37]: percentiles = [1,5,10] np.percentile(at_risk, percentiles) # In[ ]: