#!/usr/bin/env python # coding: utf-8 # # Data Aggregation and Group Operations # In[ ]: from __future__ import division from numpy.random import randn import numpy as np import os import matplotlib.pyplot as plt np.random.seed(12345) plt.rc('figure', figsize=(10, 6)) from pandas import Series, DataFrame import pandas as pd np.set_printoptions(precision=4) # In[ ]: pd.options.display.notebook_repr_html = False # In[ ]: get_ipython().run_line_magic('matplotlib', 'inline') # ## GroupBy mechanics # In[ ]: df = DataFrame({'key1' : ['a', 'a', 'b', 'b', 'a'], 'key2' : ['one', 'two', 'one', 'two', 'one'], 'data1' : np.random.randn(5), 'data2' : np.random.randn(5)}) df # In[ ]: grouped = df['data1'].groupby(df['key1']) grouped # In[ ]: grouped.mean() # In[ ]: means = df['data1'].groupby([df['key1'], df['key2']]).mean() means # In[ ]: means.unstack() # In[ ]: states = np.array(['Ohio', 'California', 'California', 'Ohio', 'Ohio']) years = np.array([2005, 2005, 2006, 2005, 2006]) df['data1'].groupby([states, years]).mean() # In[ ]: df.groupby('key1').mean() # In[ ]: df.groupby(['key1', 'key2']).mean() # In[ ]: df.groupby(['key1', 'key2']).size() # ### Iterating over groups # In[ ]: for name, group in df.groupby('key1'): print(name) print(group) # In[ ]: for (k1, k2), group in df.groupby(['key1', 'key2']): print((k1, k2)) print(group) # In[ ]: pieces = dict(list(df.groupby('key1'))) pieces['b'] # In[ ]: df.dtypes # In[ ]: grouped = df.groupby(df.dtypes, axis=1) dict(list(grouped)) # ### Selecting a column or subset of columns df.groupby('key1')['data1'] df.groupby('key1')[['data2']]df['data1'].groupby(df['key1']) df[['data2']].groupby(df['key1']) # In[ ]: df.groupby(['key1', 'key2'])[['data2']].mean() # In[ ]: s_grouped = df.groupby(['key1', 'key2'])['data2'] s_grouped # In[ ]: s_grouped.mean() # ### Grouping with dicts and Series # In[ ]: people = DataFrame(np.random.randn(5, 5), columns=['a', 'b', 'c', 'd', 'e'], index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis']) people.ix[2:3, ['b', 'c']] = np.nan # Add a few NA values people # In[ ]: mapping = {'a': 'red', 'b': 'red', 'c': 'blue', 'd': 'blue', 'e': 'red', 'f' : 'orange'} # In[ ]: by_column = people.groupby(mapping, axis=1) by_column.sum() # In[ ]: map_series = Series(mapping) map_series # In[ ]: people.groupby(map_series, axis=1).count() # ### Grouping with functions # In[ ]: people.groupby(len).sum() # In[ ]: key_list = ['one', 'one', 'one', 'two', 'two'] people.groupby([len, key_list]).min() # ### Grouping by index levels # In[ ]: columns = pd.MultiIndex.from_arrays([['US', 'US', 'US', 'JP', 'JP'], [1, 3, 5, 1, 3]], names=['cty', 'tenor']) hier_df = DataFrame(np.random.randn(4, 5), columns=columns) hier_df # In[ ]: hier_df.groupby(level='cty', axis=1).count() # ## Data aggregation # In[ ]: df # In[ ]: grouped = df.groupby('key1') grouped['data1'].quantile(0.9) # In[ ]: def peak_to_peak(arr): return arr.max() - arr.min() grouped.agg(peak_to_peak) # In[ ]: grouped.describe() # In[ ]: tips = pd.read_csv('ch08/tips.csv') # Add tip percentage of total bill tips['tip_pct'] = tips['tip'] / tips['total_bill'] tips[:6] # ### Column-wise and multiple function application # In[ ]: grouped = tips.groupby(['sex', 'smoker']) # In[ ]: grouped_pct = grouped['tip_pct'] grouped_pct.agg('mean') # In[ ]: grouped_pct.agg(['mean', 'std', peak_to_peak]) # In[ ]: grouped_pct.agg([('foo', 'mean'), ('bar', np.std)]) # In[ ]: functions = ['count', 'mean', 'max'] result = grouped['tip_pct', 'total_bill'].agg(functions) result # In[ ]: result['tip_pct'] # In[ ]: ftuples = [('Durchschnitt', 'mean'), ('Abweichung', np.var)] grouped['tip_pct', 'total_bill'].agg(ftuples) # In[ ]: grouped.agg({'tip' : np.max, 'size' : 'sum'}) # In[ ]: grouped.agg({'tip_pct' : ['min', 'max', 'mean', 'std'], 'size' : 'sum'}) # ### Returning aggregated data in "unindexed" form # In[ ]: tips.groupby(['sex', 'smoker'], as_index=False).mean() # ## Group-wise operations and transformations # In[ ]: df # In[ ]: k1_means = df.groupby('key1').mean().add_prefix('mean_') k1_means # In[ ]: pd.merge(df, k1_means, left_on='key1', right_index=True) # In[ ]: key = ['one', 'two', 'one', 'two', 'one'] people.groupby(key).mean() # In[ ]: people.groupby(key).transform(np.mean) # In[ ]: def demean(arr): return arr - arr.mean() demeaned = people.groupby(key).transform(demean) demeaned # In[ ]: demeaned.groupby(key).mean() # ### Apply: General split-apply-combine # In[ ]: def top(df, n=5, column='tip_pct'): return df.sort_index(by=column)[-n:] top(tips, n=6) # In[ ]: tips.groupby('smoker').apply(top) # In[ ]: tips.groupby(['smoker', 'day']).apply(top, n=1, column='total_bill') # In[ ]: result = tips.groupby('smoker')['tip_pct'].describe() result # In[ ]: result.unstack('smoker') f = lambda x: x.describe() grouped.apply(f) # #### Suppressing the group keys # In[ ]: tips.groupby('smoker', group_keys=False).apply(top) # ### Quantile and bucket analysis # In[ ]: frame = DataFrame({'data1': np.random.randn(1000), 'data2': np.random.randn(1000)}) factor = pd.cut(frame.data1, 4) factor[:10] # In[ ]: def get_stats(group): return {'min': group.min(), 'max': group.max(), 'count': group.count(), 'mean': group.mean()} grouped = frame.data2.groupby(factor) grouped.apply(get_stats).unstack() #ADAPT the output is not sorted in the book while this is the case now (swap first two lines) # In[ ]: # Return quantile numbers grouping = pd.qcut(frame.data1, 10, labels=False) grouped = frame.data2.groupby(grouping) grouped.apply(get_stats).unstack() # ### Example: Filling missing values with group-specific values # In[ ]: s = Series(np.random.randn(6)) s[::2] = np.nan s # In[ ]: s.fillna(s.mean()) # In[ ]: states = ['Ohio', 'New York', 'Vermont', 'Florida', 'Oregon', 'Nevada', 'California', 'Idaho'] group_key = ['East'] * 4 + ['West'] * 4 data = Series(np.random.randn(8), index=states) data[['Vermont', 'Nevada', 'Idaho']] = np.nan data # In[ ]: data.groupby(group_key).mean() # In[ ]: fill_mean = lambda g: g.fillna(g.mean()) data.groupby(group_key).apply(fill_mean) # In[ ]: fill_values = {'East': 0.5, 'West': -1} fill_func = lambda g: g.fillna(fill_values[g.name]) data.groupby(group_key).apply(fill_func) # ### Example: Random sampling and permutation # In[ ]: # Hearts, Spades, Clubs, Diamonds suits = ['H', 'S', 'C', 'D'] card_val = (range(1, 11) + [10] * 3) * 4 base_names = ['A'] + range(2, 11) + ['J', 'K', 'Q'] cards = [] for suit in ['H', 'S', 'C', 'D']: cards.extend(str(num) + suit for num in base_names) deck = Series(card_val, index=cards) # In[ ]: deck[:13] # In[ ]: def draw(deck, n=5): return deck.take(np.random.permutation(len(deck))[:n]) draw(deck) # In[ ]: get_suit = lambda card: card[-1] # last letter is suit deck.groupby(get_suit).apply(draw, n=2) # In[ ]: # alternatively deck.groupby(get_suit, group_keys=False).apply(draw, n=2) # ### Example: Group weighted average and correlation # In[ ]: df = DataFrame({'category': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b'], 'data': np.random.randn(8), 'weights': np.random.rand(8)}) df # In[ ]: grouped = df.groupby('category') get_wavg = lambda g: np.average(g['data'], weights=g['weights']) grouped.apply(get_wavg) # In[ ]: close_px = pd.read_csv('ch09/stock_px.csv', parse_dates=True, index_col=0) close_px.info() # In[ ]: close_px[-4:] # In[ ]: rets = close_px.pct_change().dropna() spx_corr = lambda x: x.corrwith(x['SPX']) by_year = rets.groupby(lambda x: x.year) by_year.apply(spx_corr) # In[ ]: # Annual correlation of Apple with Microsoft by_year.apply(lambda g: g['AAPL'].corr(g['MSFT'])) # ### Example: Group-wise linear regression # In[ ]: import statsmodels.api as sm def regress(data, yvar, xvars): Y = data[yvar] X = data[xvars] X['intercept'] = 1. result = sm.OLS(Y, X).fit() return result.params # In[ ]: by_year.apply(regress, 'AAPL', ['SPX']) # ## Pivot tables and Cross-tabulation # In[ ]: tips.pivot_table(index=['sex', 'smoker']) # In[ ]: tips.pivot_table(['tip_pct', 'size'], index=['sex', 'day'], columns='smoker') # In[ ]: tips.pivot_table(['tip_pct', 'size'], index=['sex', 'day'], columns='smoker', margins=True) # In[ ]: tips.pivot_table('tip_pct', index=['sex', 'smoker'], columns='day', aggfunc=len, margins=True) # In[ ]: tips.pivot_table('size', index=['time', 'sex', 'smoker'], columns='day', aggfunc='sum', fill_value=0) # ### Cross-tabulations: crosstab # In[ ]: from StringIO import StringIO data = """\ Sample Gender Handedness 1 Female Right-handed 2 Male Left-handed 3 Female Right-handed 4 Male Right-handed 5 Male Left-handed 6 Male Right-handed 7 Female Right-handed 8 Female Left-handed 9 Male Right-handed 10 Female Right-handed""" data = pd.read_table(StringIO(data), sep='\s+') # In[ ]: data # In[ ]: pd.crosstab(data.Gender, data.Handedness, margins=True) # In[ ]: pd.crosstab([tips.time, tips.day], tips.smoker, margins=True) # ## Example: 2012 Federal Election Commission Database # In[ ]: fec = pd.read_csv('ch09/P00000001-ALL.csv') # In[ ]: fec.info() # In[ ]: fec.ix[123456] # In[ ]: unique_cands = fec.cand_nm.unique() unique_cands # In[ ]: unique_cands[2] # In[ ]: parties = {'Bachmann, Michelle': 'Republican', 'Cain, Herman': 'Republican', 'Gingrich, Newt': 'Republican', 'Huntsman, Jon': 'Republican', 'Johnson, Gary Earl': 'Republican', 'McCotter, Thaddeus G': 'Republican', 'Obama, Barack': 'Democrat', 'Paul, Ron': 'Republican', 'Pawlenty, Timothy': 'Republican', 'Perry, Rick': 'Republican', "Roemer, Charles E. 'Buddy' III": 'Republican', 'Romney, Mitt': 'Republican', 'Santorum, Rick': 'Republican'} # In[ ]: fec.cand_nm[123456:123461] # In[ ]: fec.cand_nm[123456:123461].map(parties) # In[ ]: # Add it as a column fec['party'] = fec.cand_nm.map(parties) # In[ ]: fec['party'].value_counts() # In[ ]: (fec.contb_receipt_amt > 0).value_counts() # In[ ]: fec = fec[fec.contb_receipt_amt > 0] # In[ ]: fec_mrbo = fec[fec.cand_nm.isin(['Obama, Barack', 'Romney, Mitt'])] # ### Donation statistics by occupation and employer # In[ ]: fec.contbr_occupation.value_counts()[:10] # In[ ]: occ_mapping = { 'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED', 'INFORMATION REQUESTED' : 'NOT PROVIDED', 'INFORMATION REQUESTED (BEST EFFORTS)' : 'NOT PROVIDED', 'C.E.O.': 'CEO' } # If no mapping provided, return x f = lambda x: occ_mapping.get(x, x) fec.contbr_occupation = fec.contbr_occupation.map(f) # In[ ]: emp_mapping = { 'INFORMATION REQUESTED PER BEST EFFORTS' : 'NOT PROVIDED', 'INFORMATION REQUESTED' : 'NOT PROVIDED', 'SELF' : 'SELF-EMPLOYED', 'SELF EMPLOYED' : 'SELF-EMPLOYED', } # If no mapping provided, return x f = lambda x: emp_mapping.get(x, x) fec.contbr_employer = fec.contbr_employer.map(f) # In[ ]: by_occupation = fec.pivot_table('contb_receipt_amt', index='contbr_occupation', columns='party', aggfunc='sum') # In[ ]: over_2mm = by_occupation[by_occupation.sum(1) > 2000000] over_2mm # In[ ]: over_2mm.plot(kind='barh') # In[ ]: def get_top_amounts(group, key, n=5): totals = group.groupby(key)['contb_receipt_amt'].sum() # Order totals by key in descending order return totals.order(ascending=False)[-n:] # In[ ]: grouped = fec_mrbo.groupby('cand_nm') grouped.apply(get_top_amounts, 'contbr_occupation', n=7) # In[ ]: grouped.apply(get_top_amounts, 'contbr_employer', n=10) # ### Bucketing donation amounts # In[ ]: bins = np.array([0, 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000]) labels = pd.cut(fec_mrbo.contb_receipt_amt, bins) labels # In[ ]: grouped = fec_mrbo.groupby(['cand_nm', labels]) grouped.size().unstack(0) # In[ ]: bucket_sums = grouped.contb_receipt_amt.sum().unstack(0) bucket_sums # In[ ]: normed_sums = bucket_sums.div(bucket_sums.sum(axis=1), axis=0) normed_sums # In[ ]: normed_sums[:-2].plot(kind='barh', stacked=True) # ### Donation statistics by state # In[ ]: grouped = fec_mrbo.groupby(['cand_nm', 'contbr_st']) totals = grouped.contb_receipt_amt.sum().unstack(0).fillna(0) totals = totals[totals.sum(1) > 100000] totals[:10] # In[ ]: percent = totals.div(totals.sum(1), axis=0) percent[:10]