from __future__ import division
from numpy.random import randn
import numpy as np
import os
import sys
import matplotlib.pyplot as plt
np.random.seed(12345)
plt.rc('figure', figsize=(10, 6))
from pandas import Series, DataFrame
import pandas as pd
np.set_printoptions(precision=4)
%pwd
!cat ch06/ex1.csv
df = pd.read_csv('ch06/ex1.csv')
df
pd.read_table('ch06/ex1.csv', sep=',')
!cat ch06/ex2.csv
pd.read_csv('ch06/ex2.csv', header=None)
pd.read_csv('ch06/ex2.csv', names=['a', 'b', 'c', 'd', 'message'])
names = ['a', 'b', 'c', 'd', 'message']
pd.read_csv('ch06/ex2.csv', names=names, index_col='message')
!cat ch06/csv_mindex.csv
parsed = pd.read_csv('ch06/csv_mindex.csv', index_col=['key1', 'key2'])
parsed
list(open('ch06/ex3.txt'))
result = pd.read_table('ch06/ex3.txt', sep='\s+')
result
!cat ch06/ex4.csv
pd.read_csv('ch06/ex4.csv', skiprows=[0, 2, 3])
!cat ch06/ex5.csv
result = pd.read_csv('ch06/ex5.csv')
result
pd.isnull(result)
result = pd.read_csv('ch06/ex5.csv', na_values=['NULL'])
result
sentinels = {'message': ['foo', 'NA'], 'something': ['two']}
pd.read_csv('ch06/ex5.csv', na_values=sentinels)
result = pd.read_csv('ch06/ex6.csv')
result
pd.read_csv('ch06/ex6.csv', nrows=5)
chunker = pd.read_csv('ch06/ex6.csv', chunksize=1000)
chunker
chunker = pd.read_csv('ch06/ex6.csv', chunksize=1000)
tot = Series([])
for piece in chunker:
tot = tot.add(piece['key'].value_counts(), fill_value=0)
tot = tot.order(ascending=False)
tot[:10]
data = pd.read_csv('ch06/ex5.csv')
data
data.to_csv('ch06/out.csv')
!cat ch06/out.csv
data.to_csv(sys.stdout, sep='|')
data.to_csv(sys.stdout, na_rep='NULL')
data.to_csv(sys.stdout, index=False, header=False)
data.to_csv(sys.stdout, index=False, columns=['a', 'b', 'c'])
dates = pd.date_range('1/1/2000', periods=7)
ts = Series(np.arange(7), index=dates)
ts.to_csv('ch06/tseries.csv')
!cat ch06/tseries.csv
Series.from_csv('ch06/tseries.csv', parse_dates=True)
!cat ch06/ex7.csv
import csv
f = open('ch06/ex7.csv')
reader = csv.reader(f)
for line in reader:
print(line)
lines = list(csv.reader(open('ch06/ex7.csv')))
header, values = lines[0], lines[1:]
data_dict = {h: v for h, v in zip(header, zip(*values))}
data_dict
class my_dialect(csv.Dialect):
lineterminator = '\n'
delimiter = ';'
quotechar = '"'
quoting = csv.QUOTE_MINIMAL
with open('mydata.csv', 'w') as f:
writer = csv.writer(f, dialect=my_dialect)
writer.writerow(('one', 'two', 'three'))
writer.writerow(('1', '2', '3'))
writer.writerow(('4', '5', '6'))
writer.writerow(('7', '8', '9'))
%cat mydata.csv
obj = """
{"name": "Wes",
"places_lived": ["United States", "Spain", "Germany"],
"pet": null,
"siblings": [{"name": "Scott", "age": 25, "pet": "Zuko"},
{"name": "Katie", "age": 33, "pet": "Cisco"}]
}
"""
import json
result = json.loads(obj)
result
asjson = json.dumps(result)
siblings = DataFrame(result['siblings'], columns=['name', 'age'])
siblings
NB. The Yahoo! Finance API has changed and this example no longer works
from lxml.html import parse
from urllib2 import urlopen
parsed = parse(urlopen('http://finance.yahoo.com/q/op?s=AAPL+Options'))
doc = parsed.getroot()
links = doc.findall('.//a')
links[15:20]
lnk = links[28]
lnk
lnk.get('href')
lnk.text_content()
urls = [lnk.get('href') for lnk in doc.findall('.//a')]
urls[-10:]
tables = doc.findall('.//table')
calls = tables[9]
puts = tables[13]
rows = calls.findall('.//tr')
def _unpack(row, kind='td'):
elts = row.findall('.//%s' % kind)
return [val.text_content() for val in elts]
_unpack(rows[0], kind='th')
_unpack(rows[1], kind='td')
from pandas.io.parsers import TextParser
def parse_options_data(table):
rows = table.findall('.//tr')
header = _unpack(rows[0], kind='th')
data = [_unpack(r) for r in rows[1:]]
return TextParser(data, names=header).get_chunk()
call_data = parse_options_data(calls)
put_data = parse_options_data(puts)
call_data[:10]
%cd ch06/mta_perf/Performance_XML_Data
!head -21 Performance_MNR.xml
from lxml import objectify
path = 'Performance_MNR.xml'
parsed = objectify.parse(open(path))
root = parsed.getroot()
data = []
skip_fields = ['PARENT_SEQ', 'INDICATOR_SEQ',
'DESIRED_CHANGE', 'DECIMAL_PLACES']
for elt in root.INDICATOR:
el_data = {}
for child in elt.getchildren():
if child.tag in skip_fields:
continue
el_data[child.tag] = child.pyval
data.append(el_data)
perf = DataFrame(data)
perf
root
root.get('href')
root.text
cd ../..
frame = pd.read_csv('ch06/ex1.csv')
frame
frame.to_pickle('ch06/frame_pickle')
pd.read_pickle('ch06/frame_pickle')
store = pd.HDFStore('mydata.h5')
store['obj1'] = frame
store['obj1_col'] = frame['a']
store
store['obj1']
store.close()
os.remove('mydata.h5')
import requests
url = 'https://api.github.com/repos/pydata/pandas/milestones/28/labels'
resp = requests.get(url)
resp
data[:5]
issue_labels = DataFrame(data)
issue_labels
import sqlite3
query = """
CREATE TABLE test
(a VARCHAR(20), b VARCHAR(20),
c REAL, d INTEGER
);"""
con = sqlite3.connect(':memory:')
con.execute(query)
con.commit()
data = [('Atlanta', 'Georgia', 1.25, 6),
('Tallahassee', 'Florida', 2.6, 3),
('Sacramento', 'California', 1.7, 5)]
stmt = "INSERT INTO test VALUES(?, ?, ?, ?)"
con.executemany(stmt, data)
con.commit()
cursor = con.execute('select * from test')
rows = cursor.fetchall()
rows
cursor.description
DataFrame(rows, columns=zip(*cursor.description)[0])
import pandas.io.sql as sql
sql.read_sql('select * from test', con)