import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from sqlalchemy import create_engine
engine = create_engine('postgresql://[email protected]:5432/stuartlynn')
%matplotlib inline
The Census produces summary tables of a large number of variables such as number of individuals in a given age bracket, rent range etc. While these are often sufficent for producing segments the joint probabilities for say vision problems by household income are not often avaliable.
Public use microdata however contain the individual responses of a number of individual at PUMA tract levels which allow any conditional probability distribution of any set of variables to be computed.
This document is a test to see if we can train a machine learning model to predict custom probability distributions for a given set of microdata variables and then map these to other census geometries
To get a given breakdown of variables we use the Data Ferret tool provided by the census. This is not ideal for general use but it allows us to perform some easy tests.
From there we extract the variables $DEYE$, $DEAR$ and $age$ brackets
DEYE | DEAR | |
---|---|---|
1 | Has Vision problem | Has Hearing problem |
2 | No Vision problem | No Hearing problem |
The datafile pums_age_vision_hearing.csv extracted from Data Ferret has an entry per survey respondant with their age and their responses to the eye and ear questions
data = pd.read_csv("data/pums_age_vision_hearing.csv")
data.head()
fig = plt.figure( figsize= (20,10))
plt.subplot(121)
labels = ['> 50 problem', '>50 no problem', '<50 problem', '<50 no problem']
ax = data.groupby([data['AGEP'] < 50, 'DEAR']).count()['AGEP'].plot(kind='bar')
ax.set_xticklabels(labels)
ax.set_title("EAR")
plt.subplot(122)
labels = ['> 50 problem', '>50 no problem', '<50 problem', '<50 no problem']
ax = data.groupby([data['AGEP'] < 50, 'DEYE']).count()['AGEP'].plot(kind='bar')
ax.set_xticklabels(labels)
ax.set_title("EYE")
We are also going to read in the pumus summary table which contains the same columns as the other census geometries. This will give us the training data we need to train the model
pumas = pd.read_sql_query('select * from pumas_joined', engine)
Our goal is going to be to calculate the joint distribution of a person having eye and ear problems per ceunsus geometry. To do this we group by PUMA and count up the various different variations
eye_ear = data[data['DEYE'] ==1][data['DEAR']==1].groupby("PUMA10").count()['AGEP']
eye_noear = data[data['DEYE']==1][data['DEAR']==2].groupby("PUMA10").count()['AGEP']
noeye_ear = data[data['DEYE']==2][data['DEAR']==1].groupby("PUMA10").count()['AGEP']
noeye_noear = data[data['DEYE']==2][data['DEAR']==2].groupby("PUMA10").count()['AGEP']
summary = pd.DataFrame({ "eye_ear": eye_ear,
"eye_noear": eye_noear,
"noeye_ear": noeye_ear,
"noeye_no_ear":noeye_noear})
summary.head()
We can plot out some of the breakdown of our counts across a few PUMAS
summary.head().T.plot(kind='pie', subplots=True, figsize=(80,15))
Next step is to generate the model features. We take the PUMAS dataset and discard id's, geometries etc.
a =pumas.set_index( pumas[ 'pumas_10'])
moe_cols = [ f for f in a.columns if 'moe' in f ]
data = a[a.columns.difference(['the_geom',
'ogc_fid',
'wkb_geometry',
'cartodb_id',
'geoid',
'pumas_10' ] + moe_cols)].join(summary)
data.head()
We next normalize the various target counts to get a probability matrix across the different variables we are interested in
target= data[summary.columns].apply(lambda x: x/data[summary.columns].sum(axis=1), axis=0)
features = data[data.columns.difference(summary.columns)]
print ' target : ', np.shape(target.as_matrix()), " features", np.shape(features.as_matrix())
target.head()
For the machine learning algorithm to work well we need to make sure that both our features are distributed about 0 and are all scaled to roughtly the same range. to do this we first normalize any columns that are fractional counts by their universe root and then we subtract the mean and divide by the standard deviation.
$$ \frac{x- \bar{x}}{ \sigma(x)}$$def scale_features(features):
housing_brackets = [ f for f in features if 'in_households' in f and 'gini' not in f]
housing_features = features[housing_brackets].apply(lambda x: x/features[housing_brackets].sum(axis=1))
other_housing = [a for a in features.columns if 'housing' in a and 'moe' not in a ]
other_housing_features = features[other_housing].apply(lambda x: x/ features[other_housing].sum(axis=1), axis=0)
pop_brackets = [ f for f in features if 'pop' in f and 'not_a_us_citizen_pop' not in f]
ignore = ['rn','not_a_us_citizen_pop','not_a_us_citizen_pop_moe','total_pop'] + [ r for r in features.columns if 'moe' in r]
scaled_features = features[pop_brackets].apply(lambda x: x/features['total_pop'])
scaled_features = scaled_features.join(housing_features).join(other_housing_features)
normed_features = scaled_features.join(features[features.columns.difference(ignore+housing_brackets+pop_brackets+other_housing)])
normed_features = normed_features[normed_features.columns.difference(['total_pop'])]
return normed_features
def norm_features(features,means,stds):
normed_features = features.apply(lambda x: (x-np.mean(x))/np.std(x), axis=0)
return normed_features
scaled_features = scale_features(features)
means = scaled_features.apply(lambda x: np.mean(x), axis=0)
stds = scaled_features.apply(lambda x: np.std(x), axis=0)
normed_features = norm_features(scaled_features,means,stds)
normed_features.head()
normed_features[['median_age_female',
'median_age',
'percent_household_income_spent_on_rent',
'10_to_14_years_female_pop']].hist( figsize=(20,10))
scaled_features.to_csv("/Users/stuartlynn/scaled_pumas.csv")
target_means = target.apply(lambda x: np.mean(x), axis=0 )
target_std = target.apply(lambda x: np.std(x), axis =0)
target_max = target.apply(lambda x: np.min(x), axis=0)
target_min = target.apply(lambda x: np.max(x), axis=0)
normed_target = target.apply(lambda x: (x-np.min(x))/(np.max(x)-np.min(x)), axis=0 )
normed_target.hist(figsize=(20,10))
Now we have the data in good condition we try to train our model. We split the data in to training and test data.
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = train_test_split(normed_features.as_matrix(),
normed_target.as_matrix(),
test_size=0.30,
random_state=42)
def invert_target(t):
return t*target_std + target_means
Our model is going to be a multiple layer neural network with $tanh$ activation between layers and a linear activation for the final output. We define the loss by the mean squared error and use a stochastic gradient descent method to train the network.
The input dimension is the number of features we are useing and as we are predicting the probability of 4 classes our output layer is a vector or 4 values
model = Sequential()
model.add(Dense(output_dim=70, input_dim=np.shape(x_train)[1]))
model.add(Dropout(0.2))
model.add(Activation('tanh'))
# model.add(Dense(20))
# model.add(Dropout(0.2))
# model.add(Activation('tanh'))
model.add(Dense(output_dim=4))
model.add(Activation('tanh'))
model.compile(loss='mean_squared_error', optimizer='sgd')
With the model compiled we can fit it to our training set
history = model.fit(x_train, y_train,
verbose=0,
nb_epoch=3000,
batch_size=600,
show_accuracy=True,
validation_data=(x_test, y_test) )
def plot_model_perfromance(history):
fig = plt.figure(figsize=(20,10))
plt.subplot(121)
plt.title('Loss')
plt.xlabel('epoch')
plt.ylabel('loss')
train = plt.plot(history.epoch, history.history['loss'], 'g-', label='train')
val = plt.plot(history.epoch, history.history['val_loss'], 'r-', label='validation')
plt.legend( loc='upper left' )
plt.subplot(122)
plt.title('Accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.plot(history.epoch, history.history['acc'], 'g-', label='train')
plt.plot(history.epoch, history.history['val_acc'], 'r-',label='validation')
plt.legend( loc='upper left' )
plot_model_perfromance(history)
score = model.evaluate(x_test, y_test, batch_size=2000)
print 'error %f, error in units of sd %f, accuracy %f ' %(score, score/np.std(y_test), 1-score)
With the model trained we can predict our test set results and comapre with the actual data
result = model.predict_proba(x_test, batch_size=16)
result
bins = 20
range = [0,1]
alpha = 0.7
fig = plt.figure(figsize=(20,10))
plt.subplot(141)
labels = target.columns
plt.xlabel('Probability')
plt.title("Both vision and hearing disability")
truth = [r[0] for r in y_test]
prediction = [r[0] for r in result]
range = [np.min(truth + prediction), np.max(truth + prediction)]
plt.hist(truth, color='red', bins=bins ,alpha =alpha, range=range )
plt.hist(prediction, color='blue', bins=bins, alpha =alpha, range=range )
plt.subplot(142)
plt.title("Vision disability only")
plt.xlabel('Probability')
truth = [r[1] for r in y_test]
prediction = [r[1] for r in result]
range = [np.min(truth + prediction), np.max(truth + prediction)]
plt.hist([r[1] for r in y_test], color='red', bins=bins, alpha =alpha, range=range )
plt.hist([r[1] for r in result], color='blue',bins=bins, alpha =alpha, range=range )
plt.subplot(143)
plt.title("Hearing disability only")
plt.xlabel('Probability')
truth = [r[2] for r in y_test]
prediction = [r[2] for r in result]
range = [np.min(truth + prediction), np.max(truth + prediction)]
plt.hist([r[2] for r in y_test], color='red',bins=bins, alpha =alpha, range=range)
plt.hist([r[2] for r in result], color='blue',bins=bins, alpha =alpha,range=range )
plt.subplot(144)
plt.title("No disability")
plt.xlabel('Probability')
truth = [r[3] for r in y_test]
prediction = [r[3] for r in result]
range = [np.min(truth + prediction), np.max(truth + prediction)]
plt.hist([r[3] for r in y_test], color='red', bins=bins, alpha =alpha,range=range )
plt.hist([r[3] for r in result], color='blue' ,bins=bins, alpha =alpha,range=range)
fig = plt.figure(figsize=(20,10))
plt.title('Errors')
def invert(x,xmin,xmax):
return x*(xmax-xmin) + xmin
plt.subplot(231)
plt.title("Both vision and hearing disability")
plt.xlabel('Truth')
plt.ylabel('Predction')
truth = [invert(r[0],target_min[0], target_max[0]) for r in y_test]
prediction = [invert(r[0],target_min[0], target_max[0]) for r in result]
plt.scatter(truth , prediction)
plt.plot( [np.min(truth), np.max(truth)], [np.min(truth),np.max(truth)], 'r' )
plt.subplot(232)
plt.title("Vision and hearing disability")
plt.xlabel('Truth')
plt.ylabel('Predction')
truth = [invert(r[1],target_min[1], target_max[1]) for r in y_test]
prediction = [invert(r[1],target_min[1], target_max[1]) for r in result]
plt.scatter(truth , prediction)
plt.plot( [np.min(truth), np.max(truth)], [np.min(truth),np.max(truth)], 'r' )
plt.subplot(233)
plt.title("Hearing disability only")
plt.xlabel('Truth')
plt.ylabel('Predction')
truth = [invert(r[2],target_min[2], target_max[2]) for r in y_test]
prediction = [invert(r[2],target_min[2], target_max[2]) for r in result]
plt.scatter(truth , prediction)
plt.plot( [np.min(truth), np.max(truth)], [np.min(truth),np.max(truth)], 'r' )
plt.subplot(234)
plt.title("No disability")
plt.xlabel('Truth')
plt.ylabel('Predction')
truth = [invert(r[3],target_min[3], target_max[3]) for r in y_test]
prediction = [invert(r[3],target_min[3], target_max[3]) for r in result]
plt.scatter(truth , prediction)
plt.plot( [np.min(truth), np.max(truth)], [np.min(truth),np.max(truth)], 'r' )
feature_list = ','.join(normed_features.columns)
reader = pd.read_sql_query('select * from census_tacts_human' % locals(),
engine,
chunksize=10000)
results = []
geoms = []
for chunk in reader:
data = chunk[normed_features.columns | ['total_pop', 'geoid']]
data = data.convert_objects(convert_numeric=True)
data.index = data['geoid']
new_features = data[data.columns.difference(['geoid'])]
scaled_data = scale_features(new_features)
normed_data = norm_features(scaled_data, means,stds )
prediction = model.predict_proba(normed_data.as_matrix(), batch_size=16)
results.append(pd.DataFrame(prediction,
columns=['eye_ear','eye_noear','noeye_ear', 'noeye_noear'],
index= chunk['geoid'] ))
result = pd.concat(results)
result['eye_ear']= invert(result['eye_ear'], target_min[0], target_max[0])
result['eye_noear'] = invert(result['eye_noear'], target_min[1], target_max[1])
result['noeye_ear'] = invert(result['noeye_ear'], target_min[2], target_max[2])
result['noeye_noear'] = invert(result['noeye_noear'],target_min[3], target_max[3])
geom_lookup = pd.read_sql_query('select geoid, the_geom, total_pop from census_tacts_human', engine)
geom_lookup.index= geom_lookup['geoid']
result.join(geom_lookup).to_csv("eye_ear_result.csv")
pumas.index = pumas['pumas_10']
target_with_geom = target.join(pumas[['pumas_10','total_pop','geoid']], )
target_with_geom.to_csv("pumas_eye_ear.csv")
from IPython.display import IFrame
IFrame('https://team.cartodb.com/u/stuartlynn/viz/e02906b6-e0a8-11e5-b257-0e787de82d45/embed_map',
width=800, height=700)
IFrame('https://team.cartodb.com/u/stuartlynn/viz/355e76f6-e0aa-11e5-bf37-0e5db1731f59/embed_map',
width=800, height=700)