!pip install numpy numba --upgrade
!pip install lmfit
assert np.isin
Requirement already up-to-date: numpy in /opt/conda/lib/python3.5/site-packages (1.15.4) Requirement already up-to-date: numba in /opt/conda/lib/python3.5/site-packages (0.41.0) Requirement not upgraded as not directly required: llvmlite>=0.26.0dev0 in /opt/conda/lib/python3.5/site-packages (from numba) (0.26.0) cryptography 2.2.1 requires asn1crypto>=0.21.0, which is not installed. cffi 1.11.5 requires pycparser, which is not installed. allensdk 0.14.2 has requirement pandas<0.20.0,>=0.16.2, but you'll have pandas 0.23.1 which is incompatible. You are using pip version 10.0.1, however version 18.1 is available. You should consider upgrading via the 'pip install --upgrade pip' command. Requirement already satisfied: lmfit in /opt/conda/lib/python3.5/site-packages (0.9.12) Requirement already satisfied: numpy>=1.10 in /opt/conda/lib/python3.5/site-packages (from lmfit) (1.15.4) Requirement already satisfied: six>1.10 in /opt/conda/lib/python3.5/site-packages (from lmfit) (1.11.0) Requirement already satisfied: asteval>=0.9.12 in /opt/conda/lib/python3.5/site-packages (from lmfit) (0.9.13) Requirement already satisfied: uncertainties>=3.0 in /opt/conda/lib/python3.5/site-packages (from lmfit) (3.0.3) Requirement already satisfied: scipy>=0.17 in /opt/conda/lib/python3.5/site-packages (from lmfit) (1.1.0) cffi 1.11.5 requires pycparser, which is not installed. cryptography 2.2.1 requires asn1crypto>=0.21.0, which is not installed. allensdk 0.14.2 has requirement pandas<0.20.0,>=0.16.2, but you'll have pandas 0.23.1 which is incompatible. You are using pip version 10.0.1, however version 18.1 is available. You should consider upgrading via the 'pip install --upgrade pip' command.
electro_tests = []
obs_frame = {}
test_frame = {}
import os
import pickle
try:
electro_path = str(os.getcwd())+'all_tests.p'
assert os.path.isfile(electro_path) == True
with open(electro_path,'rb') as f:
(obs_frame,test_frame) = pickle.load(f)
except:
for p in pipe:
p_tests, p_observations = get_neab.get_neuron_criteria(p)
obs_frame[p["name"]] = p_observations#, p_tests))
test_frame[p["name"]] = p_tests#, p_tests))
electro_path = str(os.getcwd())+'all_tests.p'
with open(electro_path,'wb') as f:
pickle.dump((obs_frame,test_frame),f)
import sciunit, neuronunit, quantities
from neuronunit.tests.dm import *
import time
from neuronunit.tests import dm #this is me importing the Druckman tests
from neuronunit.tests import RheobaseTestP, fi, RheobaseTest
import matplotlib.pyplot as plt
from neuronunit.models.reduced import ReducedModel
from neuronunit.optimization.data_transport_container import DataTC
from neuronunit.optimization.model_parameters import model_params, path_params, transcribe_units
from neuronunit.optimization.optimization_management import mint_generic_model
LEMS_MODEL_PATH = path_params['model_path']
model = ReducedModel(LEMS_MODEL_PATH,name = str('vanilla'),backend = ('RAW'))
dtc = DataTC()
dtc.attrs = model.attrs
dtc.ampl=0
dtc.attrs_raw = {'C':89.7960714285714, 'a':0.01, 'b':15, 'c':-60, 'd':10,\
'k':1.6, 'vPeak':(86.364525297619-65.2261863636364),\
'vr':-65.2261863636364, 'vt':-50}
dtc.attrs = transcribe_units(dtc.attrs_raw)
start0 = time.time()
model = ReducedModel(LEMS_MODEL_PATH, name= str('vanilla'), backend=('NEURON', {'DTC':dtc}))
rt = RheobaseTestP(obs_frame['Dentate gyrus basket cell']['Rheobase'])
pred0 = rt.generate_prediction(model)
end0 = time.time()
print(model.attrs)
model = ReducedModel(LEMS_MODEL_PATH, name= str('vanilla'), backend=('NEURON', {'DTC':dtc}))
start1 = time.time()
rt = fi.RheobaseTest(obs_frame['Dentate gyrus basket cell']['Rheobase'])
pred1 = rt.generate_prediction(model)
end1 = time.time()
print('parallel Rhsearch time NEURON', end0-start0)
print('serial Rhsearch time NEURON',end1-start1)
explore_ranges = {'E_Na' : (40,70), 'E_K': (-90.0,-75.0)}
attrs = { 'g_K' : 36.0, 'g_Na' : 120.0, 'g_L' : 0.3, \
'C_m' : 1.0, 'E_L' : -54.387, 'E_K' : -77.0, 'E_Na' : 50.0, 'vr':-65.0 }
model = ReducedModel(LEMS_MODEL_PATH,name = str('vanilla'),backend = ('HH'))
model.attrs = attrs
dtc.attrs = attrs
iparams = {}
iparams['injected_square_current'] = {}
iparams['injected_square_current']['amplitude'] = 1.98156805*pq.pA
iparams['injected_square_current']['amplitude'] = 2.98156805*pq.pA
DELAY = 100.0*pq.ms
DURATION = 1000.0*pq.ms
iparams['injected_square_current']['delay'] = DELAY
iparams['injected_square_current']['duration'] = int(DURATION)
bf = time.time()
model.inject_square_current(iparams)
vm = model.get_membrane_potential()
af = time.time()
#volts = [v[0] for v in vm ]
print(len(vm[0]),len(vm.times))
bfp = time.time()
model = ReducedModel(LEMS_MODEL_PATH, name= str('vanilla'), backend=('HH', {'DTC':dtc}))
model._backend.cell_name = str('vanilla')
rt = RheobaseTestP(obs_frame['Dentate gyrus basket cell']['Rheobase'])
pred0 = rt.generate_prediction(model)
afp = time.time()
print(model.attrs)
plt.plot(vm.times,vm)
plt.show()
print('elapsed parallel: ',afp-bfp)
bfs = time.time()
model = ReducedModel(LEMS_MODEL_PATH, name= str('vanilla'), backend=('HH', {'DTC':dtc}))
model._backend.cell_name = str('vanilla')
rt = RheobaseTest(obs_frame['Dentate gyrus basket cell']['Rheobase'])
pred0 = rt.generate_prediction(model)
afs = time.time()
print(model.attrs)
plt.plot(vm.times,vm)
plt.show()
print('elapsed serial: ',afs-bfs)
import pdb
with open(electro_path,'rb') as f:
(obs_frame,test_frame) = pickle.load(f)
use_test = test_frame["Neocortex pyramidal cell layer 5-6"]
use_test[0].observation
#from neuronunit.tests import RheobaseP
from neuronunit.tests.fi import RheobaseTestP# as discovery
rtp = RheobaseTestP(use_test[0].observation)
use_test[0] = rtp
explore_ranges = {'E_Na' : (40.0,70.0), 'E_K': (-90.0,-75.0), 'g_K':(30,40), 'g_Na':(100.0,140.0), 'g_L':(0.1,0.5), 'E_L':(-60.0,-45.0)}
attrs = { 'g_K' : 36.0, 'g_Na' : 120.0, 'g_L' : 0.3, \
'C_m' : 1.0, 'E_L' : -54.387, 'E_K' : -77.0, 'E_Na' : 50.0, 'vr':-65.0 }
from neuronunit.optimization import optimization_management as om
print(test_frame)
MU = 12
NGEN = 25
cnt = 1
#hc = { 'g_L' : 0.3, 'E_L' : -54.387,
hc = {'vr':-65.0, 'C_m':1.0 }
npcl, DO = om.run_ga(explore_ranges,NGEN,use_test,free_params=explore_ranges.keys(), hc = hc, NSGA = True, MU = MU,model_type='HH')
attrs = {'cm': 0.20000000000000001,
'e_rev_E': 0.0,
'e_rev_I': -80.0,
'e_rev_K': -90.0,
'e_rev_Na': 50.0,
'e_rev_leak': -65.0,
'g_leak': 0.01,
'gbar_K': 6.0,
'gbar_Na': 20.0,
'i_offset': 0.0,
'tau_syn_E': 0.20000000000000001,
'tau_syn_I': 2.0,
'v_offset': -63.0}
# def __init__(self,
# I_ampl=10., g_leak=0.3,
# g_K=36., g_Na=120., V_leak=-54.402, V_K=-77., V_Na=50.):
dtc.attrs = attrs
bfp = time.time()
#model = ReducedModel(LEMS_MODEL_PATH, name= str('vanilla'), backend=('HH', {'DTC':dtc}))
model = ReducedModel(LEMS_MODEL_PATH,name = str('vanilla'),backend = ('HHpyNN', {'DTC':dtc}))
rt0 = RheobaseTestP(obs_frame['Dentate gyrus basket cell']['Rheobase'])
pred0 = rt.generate_prediction(model)
afp = time.time()
print('elapsed parallel: ',afp-bfp)
bfs = time.time()
#model = ReducedModel(LEMS_MODEL_PATH, name= str('vanilla'), backend=('HH', {'DTC':dtc}))
model = ReducedModel(LEMS_MODEL_PATH,name = str('vanilla'),backend = ('HHpyNN', {'DTC':dtc}))
rt1 = RheobaseTest(obs_frame['Dentate gyrus basket cell']['Rheobase'])
pred1 = rt.generate_prediction(model)
afs = time.time()
print(model.attrs)
plt.plot(vm.times,vm)
plt.show()
print('elapsed Serial: ',afs-bfs)
print(rt0,rt1)
attrs = { 'gK' : 36.0, 'gNa' : 120.0, 'gL' : 0.3, 'Cm' : 1.0, 'Vl' : -10.402, 'VK' : -12.0, 'VNa' : -115, 'vr':-58.402 }
C_m = 1
V_Na = -115
V_K = 12
V_l = -10.613
g_Na = 120
g_K = 36
g_l = 0.3
model = ReducedModel(LEMS_MODEL_PATH,name = str('vanilla'),backend = ('HH'))
model.attrs = attrs
iparams['injected_square_current']['amplitude'] = 15.6805*pq.pA
model.inject_square_current(iparams)
vm = model.get_membrane_potential()
af = time.time()
#volts = [v[0] for v in vm ]
print(len(vm[0]),len(vm.times))
plt.plot(vm.times,vm)
len(vm)
len(vm.times)
np.shape(vm)
print(vm[6000])
print(vm[5000])
print(vm[0])
start4 = time.time()
model = ReducedModel(LEMS_MODEL_PATH, name= str('vanilla'), backend=('RAW'))
rt = fi.RheobaseTestP(obs_frame['Dentate gyrus basket cell']['Rheobase'])
pred1 = rt.generate_prediction(model)
end4 = time.time()
start3 = time.time()
model = ReducedModel(LEMS_MODEL_PATH, name= str('vanilla'), backend=('RAW'))
rt = fi.RheobaseTest(obs_frame['Dentate gyrus basket cell']['Rheobase'])
pred1 = rt.generate_prediction(model)
end3 = time.time()
print(pred1)
ir_currents = {}
ir_currents = pred1['value']
standard = 1.5*ir_currents
standard*=1.5
strong = 3*ir_currents
print(standard,strong,ir_currents)
print('parallel Rhsearch time RAW', end4-start4)
print('serial Rhsearch time RAW',end3-start3)
print(pred)
#for i in npcl['pf'][0:2]:
iparams = {}
iparams['injected_square_current'] = {}
iparams['injected_square_current']['amplitude'] = pred1['value']
model = None
model = ReducedModel(LEMS_MODEL_PATH,name = str('vanilla'),backend = ('RAW'))
#model.set_attrs(i.dtc.attrs)
#['amplitude'] = dtc.vtest[k]['injected_square_current']['amplitude']
DELAY = 100.0*pq.ms
DURATION = 1000.0*pq.ms
iparams['injected_square_current']['delay'] = DELAY
iparams['injected_square_current']['duration'] = int(DURATION)
model.inject_square_current(iparams)
n_spikes = len(model.get_spike_train())
if n_spikes:
print(n_spikes)
#print(i[0].scores['RheobaseTestP']*pq.pA)
plt.plot(model.get_membrane_potential().times,model.get_membrane_potential())#,label='ground truth')
plt.legend()
print(obs_frame['Dentate gyrus basket cell']['Rheobase'])
#speed_up= (end1-start1)/(end0-start0)
#print(speed_up, 'speed up for NEURON')
speed_up= (end3-start3)/(end4-start4)
print(speed_up, 'speed up (slow down) for rawpy')
These results show that parallel rheobase is ~3.5-7 times faster for NEURON, but slower for numba jit depending on model.
This makes sense, because numba jit evaluations are over so quickly, it rivals the time, for interprocessor communication, not so with NEURON simulations, where simulation takes a long time.
The reason parallel is faster given interprocessor comm speed < sim evaluation time, is because in the case of binary search.
For each sim evaluation, the search engine only narrows by 50%.
In the parallel case, 8 simultaneous sim evaluations, are able to narrow the search interval space, by 7/8ths.
This fast narrowing of intervals is what makes the parallel case faster than the binary case.
tests = [AP12AmplitudeDropTest(standard),
AP1SSAmplitudeChangeTest(standard),
AP1AmplitudeTest(standard),
AP1WidthHalfHeightTest(standard),
AP1WidthPeakToTroughTest(standard),
AP1RateOfChangePeakToTroughTest(standard),
AP1AHPDepthTest(standard),
AP2AmplitudeTest(standard),
AP2WidthHalfHeightTest(standard),
AP2WidthPeakToTroughTest(standard),
AP2RateOfChangePeakToTroughTest(standard),
AP2AHPDepthTest(standard),
AP12AmplitudeChangePercentTest(standard),
AP12HalfWidthChangePercentTest(standard),
AP12RateOfChangePeakToTroughPercentChangeTest(standard),
AP12AHPDepthPercentChangeTest(standard),
AP1DelayMeanTest(standard),
AP1DelaySDTest(standard),
AP2DelayMeanTest(standard),
AP2DelaySDTest(standard),
Burst1ISIMeanTest(standard),
Burst1ISISDTest(standard),
InitialAccommodationMeanTest(standard),
SSAccommodationMeanTest(standard),
AccommodationRateToSSTest(standard),
AccommodationAtSSMeanTest(standard),
AccommodationRateMeanAtSSTest(standard),
ISICVTest(standard),
ISIMedianTest(standard),
ISIBurstMeanChangeTest(standard),
SpikeRateStrongStimTest(strong),
AP1DelayMeanStrongStimTest(strong),
AP1DelaySDStrongStimTest(strong),
AP2DelayMeanStrongStimTest(strong),
AP2DelaySDStrongStimTest(strong),
Burst1ISISDStrongStimTest(strong),
Burst1ISIMeanStrongStimTest(strong)]
AHP_list = [AP1AHPDepthTest(standard),
AP2AHPDepthTest(standard),
AP12AHPDepthPercentChangeTest(standard) ]
print(ir_currents)
print(standard)
print(strong)
start2 = time.time()
model = ReducedModel(LEMS_MODEL_PATH, name= str('vanilla'), backend=('RAW'))
for i, test in enumerate(tests):
mean = test.generate_prediction(model)['mean']
#print(mean,test)
stop2 = time.time()
delta2 = stop2-start2
print('serial time: ',stop2-start2)
'''
USING NEURON WOULD TAKE HALF AN HOUR
start3 = time.time()
model = ReducedModel(LEMS_MODEL_PATH, name= str('vanilla'), backend=('NEURON', {'DTC':dtc}))
model.atts = dtc.attrs
for i, test in enumerate(tests):
mean = test.generate_prediction(model)['mean']
#print(mean, tests)
stop3 = time.time()
print(stop3-start3)
'''
# can do these tests in parallel:
import dask.bag as db
import multiprocessing
npart = multiprocessing.cpu_count()
start5 = time.time()
bag = db.from_sequence(tests, npartitions = npart)
means = list(bag.map(takes_tests).compute())
end5 = time.time()
#print(end5-start5)
print(means)
print('parallel time: ',end5-start5)
print('speed up:',delta2/(end5-start5))
dmtests = dm.Druckmann2013Test
d_tests = []
for d in dir(dm):
if "Test" in d:
exec('d_tests.append(dm.'+str(d)+')')
#print()
dt = d_tests[1:-1]
print(dt)
import quantities as pq
current_amplitude = {'mean': 106.7 * pq.pA, 'n': 1, 'std': 0.0 * pq.pA}
test = dm.AP12AmplitudeChangePercentTest(current_amplitude)
import os
import pickle
import matplotlib.pyplot as plt
electro_path = str(os.getcwd())+'all_tests.p'
assert os.path.isfile(electro_path) == True
with open(electro_path,'rb') as f:
(obs_frame,test_frame) = pickle.load(f)
rt = RheobaseTestP(obs_frame['Dentate gyrus basket cell']['Rheobase'])
pred = rt.generate_prediction(model)
print(pred)
#for i in npcl['pf'][0:2]:
iparams = {}
iparams['injected_square_current'] = {}
iparams['injected_square_current']['amplitude'] = pred['value']
model = None
model = ReducedModel(LEMS_MODEL_PATH,name = str('vanilla'),backend = ('RAW'))
#model.set_attrs(i.dtc.attrs)
#['amplitude'] = dtc.vtest[k]['injected_square_current']['amplitude']
DELAY = 100.0*pq.ms
DURATION = 1000.0*pq.ms
iparams['injected_square_current']['delay'] = DELAY
iparams['injected_square_current']['duration'] = int(DURATION)
model.inject_square_current(iparams)
n_spikes = len(model.get_spike_train())
if n_spikes:
print(n_spikes)
#print(i[0].scores['RheobaseTestP']*pq.pA)
plt.plot(model.get_membrane_potential().times,model.get_membrane_potential())#,label='ground truth')
plt.legend()
print(obs_frame['Dentate gyrus basket cell']['Rheobase'])
print(model)
help(dt[0])
dt = dt[0]()
dt[0].generate_prediction(model)
'''
import pdb
for d in dt:
pdb.set_trace()
#dmtO = d(pred['value'])#obs_frame['Dentate gyrus basket cell']['Rheobase'])
print(dmt0)
'''