] add Graphs Distributions DataFrames GLM ProgressMeter using Graphs using Distributions, DataFrames, GLM, ProgressMeter using Dates using Random: shuffle, seed! seed!(20130810); function initialize_network(n_nodes::Int, n_strong_ties::Int) G = [complete_graph(n_strong_ties) for g in 1:floor(Int, n_nodes/n_strong_ties)] return G end function reset_node_status(G::Vector{Graphs.SimpleGraphs.SimpleGraph{Int}}) node_status = [falses(nv(g)) for g in G] return node_status end function count_active_str_ties(G::Vector{Graphs.SimpleGraphs.SimpleGraph{Int}}, node_network_id::Int, node::Int, node_status::Vector{BitVector}) n_active_str_ties = sum([node_status[node_network_id][nbr] for nbr in neighbors(G[node_network_id], node)]) return n_active_str_ties end function random_meetings(G::Vector{Graphs.SimpleGraphs.SimpleGraph{Int}}, node_network_id::Int, node::Int, node_status::Vector{BitVector}, n_weak_ties::Int) # Choose a random sample of size `n_weak_ties` from the other sub-networks and query # their status. We first sample the network id, and use this to sample a random node # in the sub-network defined by this id. all_network_ids = 1:length(G) other_network_ids = all_network_ids[all_network_ids .!= node_network_id] possible_weak_ties = [] nsamples = 1 while nsamples < n_weak_ties rand_network_id = sample(other_network_ids) rand_nbr = sample(vertices(G[rand_network_id])) if !((rand_network_id, rand_nbr) in possible_weak_ties) push!(possible_weak_ties, (rand_network_id, rand_nbr)) nsamples += 1 end end n_active_wk_ties = sum([node_status[network_id][weak_tie] for (network_id, weak_tie) in possible_weak_ties]) return n_active_wk_ties end function update_status!(G::Vector{Graphs.SimpleGraphs.SimpleGraph{Int}}, node_status::Vector{BitVector}, n_weak_ties::Int, alpha::Float64, beta_w::Float64, beta_s::Float64) # assuming that the nodes update in random order for node_network_id in shuffle(1:length(G)) for node in shuffle(vertices(G[node_network_id])) n_active_str_ties = count_active_str_ties(G, node_network_id, node, node_status) n_active_wk_ties = random_meetings(G, node_network_id, node, node_status, n_weak_ties) activation_prob = 1 - (1 - alpha) * (1 - beta_w)^n_active_wk_ties * (1 - beta_s)^n_active_str_ties if rand(Uniform()) < activation_prob node_status[node_network_id][node] = true end end end return nothing end println("Number of strong ties per node (s): ", floor.(Int, range(5, stop=29, length=3))) println("Number of weak ties per node(w): ", floor.(Int, range(5, stop=29, length=3))) println("Effect of advertising (α): ", collect(range(0.0005, stop=0.01, length=3))) println("Effect of weak ties (β_w): ", collect(range(0.005, stop=0.015, length=3))) println("Effect of strong ties (β_s): ", collect(range(0.01, stop=0.07, length=3))) parameter_space = [(s, w, alpha, beta_w, beta_s) for s in floor.(Int, range(5, stop=29, length=3)), w in floor.(Int, range(5, stop=29, length=3)), alpha in range(0.0005, stop=0.01, length=3), beta_w in range(0.005, stop=0.015, length=3), beta_s in range(0.01, stop=0.07, length=3)] size(parameter_space), length(parameter_space) function execute_simulation(parameter_space, n_nodes::Int) # n_nodes dictates how big the network will be # We cannot pre-allocate the output since we do not know for how many time steps the simulation will # run at each setting output = DataFrame(s = Int[], w = Int[], alpha = Float64[], beta_w = Float64[], beta_s = Float64[], t = Int[], num_engaged = Int[]) println("Beginning simulation at : ", Dates.format(now(), "HH:MM")) println("You might want to grab a cup of coffee while Julia brews the simulation...") @showprogress 1 "Crunching numbers while you munch..." for (s, w, alpha, beta_w, beta_s) in parameter_space[1:end] G = initialize_network(n_nodes, s) node_status = reset_node_status(G) num_engaged = sum(sum(node_status)) # Continue updates at each setting till 95% of the network engages t = 1 while num_engaged < floor(Int, 0.95 * n_nodes) update_status!(G, node_status, w, alpha, beta_w, beta_s) num_engaged = sum(sum(node_status)) push!(output, [s, w, alpha, beta_w, beta_s, t, num_engaged]) t += 1 end end return output end results = execute_simulation(parameter_space, 3000) first(results, 10) all_engaged = combine(groupby(results, [:s, :w, :alpha, :beta_w, :beta_s]), df -> DataFrame(T95 = maximum(df[!,:t]))); first(all_engaged, 10) ols = lm(@formula(T95 ~ s + w + alpha + beta_s + beta_w), all_engaged) r2(ols)