import numpy as np
import matplotlib.pyplot as plt
# lets first of all create some Classes to make our lives a bit easier
class Coin:
def __init__(self):
pass
def toss(self):
return np.random.choice(['H','T'])
class Dice:
def __init__(self):
pass
def roll(self):
return np.random.choice([1,2,3,4,5,6])
We all have done this problem before. (I assume)
Its most likely that we have solved it using the following steps :-
sub_space = { (Head, Head), (Head, Tail), (Tail,Head), (Tail,Tail) }
total number of outcomes = 4
favourable number of outcomes = 2 ,ie, {(Head, Head), (Tail,Tail) }
prob = 2 / 4 = 0.5
def problem_1(n_itr):
# select two coins
coin1 = Coin()
coin2 = Coin()
num_fav = 0
for itr in range(n_itr):
result1 = coin1.toss() # toss the first coin
result2= coin2.toss() # toss the second coin
if result1 == result2:
num_fav += 1 # the number of instance when both
# coins showed the same output
return (num_fav / n_itr)
iterations = 10**np.arange(1,7)
iterations
array([ 10, 100, 1000, 10000, 100000, 1000000])
from tqdm import tqdm
score_dict = {}
for curr_itr in tqdm(iterations):
score_dict[curr_itr] = problem_1(curr_itr)
100%|██████████| 6/6 [00:13<00:00, 2.21s/it]
The probability would be 0.49909
print('The probability would be ',list(score_dict.values())[-1])
# we get a very similar number as from direct caculation
The probability would be 0.49909
# lets see how this number varies as number of times the stimlation was ran
plt.figure(figsize = (12,8))
plt.plot(list(score_dict.keys()),list(score_dict.values()),label = 'Prob calculated')
plt.axhline(y = 0.5,linestyle = '--',c = 'red',label = '0.5')
plt.xscale('log')
plt.legend()
plt.xlabel('Number of Iterations')
plt.ylabel('Probability')
Text(0, 0.5, 'Probability')
We can see from the above graph that the prob of getting the same outcomes on both the coins merges to 0.5 (the desired output) as the number of stimulations is increased.
In this case we would get a sample size of 8 instances, out of which we would have 2 desired outcomes. Hence the probability in this case would be 3/8 = 0.375
def problem_2(n_itr):
# select three coins
coin1 = Coin()
coin2 = Coin()
coin3 = Coin()
num_fav = 0
for itr in range(n_itr):
# toss the three coins, and save their outcomes in a list
results = [coin1.toss(),coin2.toss(),coin3.toss()]
if results.count('H') == 2: # checking if it has 2 Heads
num_fav += 1
return num_fav / n_itr
iterations = 10**np.arange(1,7)
iterations
array([ 10, 100, 1000, 10000, 100000, 1000000])
score_dict = {}
for curr_itr in tqdm(iterations):
# run the stimulation for different number of iterations
score_dict[curr_itr] = problem_2(curr_itr)
100%|██████████| 6/6 [00:20<00:00, 3.38s/it]
print('The probability would be ',list(score_dict.values())[-1])
# we get a very similar number as from direct caculation
The probability would be 0.374621
# lets see how this number varies as number of times the stimlation was ran
plt.figure(figsize = (12,8))
plt.plot(list(score_dict.keys()),list(score_dict.values()),label = 'Prob calculated')
plt.axhline(y = 0.375,linestyle = '--',c = 'red',label = '0.375')
plt.xscale('log')
plt.legend()
plt.xlabel('Number of Iterations')
plt.ylabel('Probability')
Text(0, 0.5, 'Probability')
Again we observe the similar trend that as the number of iterations increases the probability we get merges with the desired probability.
In this case
def problem_3(n_itr):
# select the dice
die1 = Dice()
die2 = Dice()
num_fav = 0
for itr in range(n_itr):
result1 = die1.roll() # roll
result2 = die2.roll() # roll
if result1 == result2:
num_fav += 1
return num_fav / n_itr
score_dict = {}
for curr_itr in tqdm(iterations):
score_dict[curr_itr] = problem_3(curr_itr)
100%|██████████| 6/6 [00:13<00:00, 2.18s/it]
print('The probability would be ',list(score_dict.values())[-1])
# we get a very similar number as from direct caculation
The probability would be 0.167223
# lets see how this number varies as number of times the stimlation was ran
plt.figure(figsize = (12,8))
plt.plot(list(score_dict.keys()),list(score_dict.values()),label = 'Prob calculated')
plt.axhline(y = 0.1667,linestyle = '--',c = 'red',label = '0.1667')
plt.xscale('log')
plt.legend()
plt.xlabel('Number of Iterations')
plt.ylabel('Probability')
Text(0, 0.5, 'Probability')