#!/usr/bin/env python # coding: utf-8 # *Python Machine Learning 3rd Edition* by [Sebastian Raschka](https://sebastianraschka.com), Packt Publishing Ltd. 2019 # # Code Repository: https://github.com/rasbt/python-machine-learning-book-3rd-edition # # Code License: [MIT License](https://github.com/rasbt/python-machine-learning-book-3rd-edition/blob/master/LICENSE.txt) # # Python Machine Learning - Code Examples # # Chapter 10 - Predicting Continuous Target Variables with Regression Analysis # Note that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s). # In[1]: get_ipython().run_line_magic('load_ext', 'watermark') get_ipython().run_line_magic('watermark', '-a "Sebastian Raschka" -u -d -v -p numpy,pandas,matplotlib,sklearn,mlxtend') # *The use of `watermark` is optional. You can install this IPython extension via "`pip install watermark`". For more information, please see: https://github.com/rasbt/watermark.* # The mlxtend package (http://rasbt.github.io/mlxtend/), which contains a few useful functions on top of scikit-learn and matplotloib, can be installed via # # conda install mlxtend # # or # # pip install mlxtend #
#
# ### Overview # - [Introducing regression](#Introducing-linear-regression) # - [Simple linear regression](#Simple-linear-regression) # - [Exploring the Housing Dataset](#Exploring-the-Housing-Dataset) # - [Loading the Housing dataset into a data frame](Loading-the-Housing-dataset-into-a-data-frame) # - [Visualizing the important characteristics of a dataset](#Visualizing-the-important-characteristics-of-a-dataset) # - [Implementing an ordinary least squares linear regression model](#Implementing-an-ordinary-least-squares-linear-regression-model) # - [Solving regression for regression parameters with gradient descent](#Solving-regression-for-regression-parameters-with-gradient-descent) # - [Estimating the coefficient of a regression model via scikit-learn](#Estimating-the-coefficient-of-a-regression-model-via-scikit-learn) # - [Fitting a robust regression model using RANSAC](#Fitting-a-robust-regression-model-using-RANSAC) # - [Evaluating the performance of linear regression models](#Evaluating-the-performance-of-linear-regression-models) # - [Using regularized methods for regression](#Using-regularized-methods-for-regression) # - [Turning a linear regression model into a curve - polynomial regression](#Turning-a-linear-regression-model-into-a-curve---polynomial-regression) # - [Modeling nonlinear relationships in the Housing Dataset](#Modeling-nonlinear-relationships-in-the-Housing-Dataset) # - [Dealing with nonlinear relationships using random forests](#Dealing-with-nonlinear-relationships-using-random-forests) # - [Decision tree regression](#Decision-tree-regression) # - [Random forest regression](#Random-forest-regression) # - [Summary](#Summary) #
#
# In[2]: from IPython.display import Image get_ipython().run_line_magic('matplotlib', 'inline') # # Introducing linear regression # ## Simple linear regression # In[3]: Image(filename='images/10_01.png', width=500) # ## Multiple linear regression # In[4]: Image(filename='images/10_15.png', width=500) #
#
# # Exploring the Housing dataset # ## Loading the Housing dataset into a data frame # Description, which was previously available at: [https://archive.ics.uci.edu/ml/datasets/Housing](https://archive.ics.uci.edu/ml/datasets/Housing) # # Attributes: # #
# 1. CRIM      per capita crime rate by town
# 2. ZN        proportion of residential land zoned for lots over 
#                  25,000 sq.ft.
# 3. INDUS     proportion of non-retail business acres per town
# 4. CHAS      Charles River dummy variable (= 1 if tract bounds 
#                  river; 0 otherwise)
# 5. NOX       nitric oxides concentration (parts per 10 million)
# 6. RM        average number of rooms per dwelling
# 7. AGE       proportion of owner-occupied units built prior to 1940
# 8. DIS       weighted distances to five Boston employment centres
# 9. RAD       index of accessibility to radial highways
# 10. TAX      full-value property-tax rate per $10,000
# 11. PTRATIO  pupil-teacher ratio by town
# 12. B        1000(Bk - 0.63)^2 where Bk is the proportion of blacks 
#                  by town
# 13. LSTAT    % lower status of the population
# 14. MEDV     Median value of owner-occupied homes in $1000s
# 
# In[5]: import pandas as pd df = pd.read_csv('https://raw.githubusercontent.com/rasbt/' 'python-machine-learning-book-3rd-edition/' 'master/ch10/housing.data.txt', header=None, sep='\s+') df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV'] df.head() #
# # ### Note: # # # You can find a copy of the housing dataset (and all other datasets used in this book) in the code bundle of this book, which you can use if you are working offline or the UCI server at https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data is temporarily unavailable. For instance, to load the housing dataset from a local directory, you can replace the lines # df = pd.read_csv('https://archive.ics.uci.edu/ml/' # 'machine-learning-databases' # '/housing/housing.data', # sep='\s+') # in the following code example by # df = pd.read_csv('./housing.data', # sep='\s+') #
#
# ## Visualizing the important characteristics of a dataset # In[6]: import matplotlib.pyplot as plt from mlxtend.plotting import scatterplotmatrix # In[7]: cols = ['LSTAT', 'INDUS', 'NOX', 'RM', 'MEDV'] scatterplotmatrix(df[cols].values, figsize=(10, 8), names=cols, alpha=0.5) plt.tight_layout() #plt.savefig('images/10_03.png', dpi=300) plt.show() # In[8]: import numpy as np from mlxtend.plotting import heatmap cm = np.corrcoef(df[cols].values.T) hm = heatmap(cm, row_names=cols, column_names=cols) # plt.savefig('images/10_04.png', dpi=300) plt.show() #
#
# # Implementing an ordinary least squares linear regression model # ... # ## Solving regression for regression parameters with gradient descent # In[9]: class LinearRegressionGD(object): def __init__(self, eta=0.001, n_iter=20): self.eta = eta self.n_iter = n_iter def fit(self, X, y): self.w_ = np.zeros(1 + X.shape[1]) self.cost_ = [] for i in range(self.n_iter): output = self.net_input(X) errors = (y - output) self.w_[1:] += self.eta * X.T.dot(errors) self.w_[0] += self.eta * errors.sum() cost = (errors**2).sum() / 2.0 self.cost_.append(cost) return self def net_input(self, X): return np.dot(X, self.w_[1:]) + self.w_[0] def predict(self, X): return self.net_input(X) # In[10]: X = df[['RM']].values y = df['MEDV'].values # In[11]: from sklearn.preprocessing import StandardScaler sc_x = StandardScaler() sc_y = StandardScaler() X_std = sc_x.fit_transform(X) y_std = sc_y.fit_transform(y[:, np.newaxis]).flatten() # In[12]: lr = LinearRegressionGD() lr.fit(X_std, y_std) # In[13]: plt.plot(range(1, lr.n_iter+1), lr.cost_) plt.ylabel('SSE') plt.xlabel('Epoch') #plt.tight_layout() #plt.savefig('images/10_05.png', dpi=300) plt.show() # In[14]: def lin_regplot(X, y, model): plt.scatter(X, y, c='steelblue', edgecolor='white', s=70) plt.plot(X, model.predict(X), color='black', lw=2) return # In[15]: lin_regplot(X_std, y_std, lr) plt.xlabel('Average number of rooms [RM] (standardized)') plt.ylabel('Price in $1000s [MEDV] (standardized)') #plt.savefig('images/10_06.png', dpi=300) plt.show() # In[16]: print('Slope: %.3f' % lr.w_[1]) print('Intercept: %.3f' % lr.w_[0]) # In[17]: num_rooms_std = sc_x.transform(np.array([[5.0]])) price_std = lr.predict(num_rooms_std) print("Price in $1000s: %.3f" % sc_y.inverse_transform(price_std)) #
#
# ## Estimating the coefficient of a regression model via scikit-learn # In[18]: from sklearn.linear_model import LinearRegression # In[19]: slr = LinearRegression() slr.fit(X, y) y_pred = slr.predict(X) print('Slope: %.3f' % slr.coef_[0]) print('Intercept: %.3f' % slr.intercept_) # In[20]: lin_regplot(X, y, slr) plt.xlabel('Average number of rooms [RM]') plt.ylabel('Price in $1000s [MEDV]') #plt.savefig('images/10_07.png', dpi=300) plt.show() # **Normal Equations** alternative: # In[21]: # adding a column vector of "ones" Xb = np.hstack((np.ones((X.shape[0], 1)), X)) w = np.zeros(X.shape[1]) z = np.linalg.inv(np.dot(Xb.T, Xb)) w = np.dot(z, np.dot(Xb.T, y)) print('Slope: %.3f' % w[1]) print('Intercept: %.3f' % w[0]) #
#
# # Fitting a robust regression model using RANSAC # In[22]: from sklearn.linear_model import RANSACRegressor ransac = RANSACRegressor(LinearRegression(), max_trials=100, min_samples=50, loss='absolute_loss', residual_threshold=5.0, random_state=0) ransac.fit(X, y) inlier_mask = ransac.inlier_mask_ outlier_mask = np.logical_not(inlier_mask) line_X = np.arange(3, 10, 1) line_y_ransac = ransac.predict(line_X[:, np.newaxis]) plt.scatter(X[inlier_mask], y[inlier_mask], c='steelblue', edgecolor='white', marker='o', label='Inliers') plt.scatter(X[outlier_mask], y[outlier_mask], c='limegreen', edgecolor='white', marker='s', label='Outliers') plt.plot(line_X, line_y_ransac, color='black', lw=2) plt.xlabel('Average number of rooms [RM]') plt.ylabel('Price in $1000s [MEDV]') plt.legend(loc='upper left') #plt.savefig('images/10_08.png', dpi=300) plt.show() # In[23]: print('Slope: %.3f' % ransac.estimator_.coef_[0]) print('Intercept: %.3f' % ransac.estimator_.intercept_) #
#
# # Evaluating the performance of linear regression models # In[24]: from sklearn.model_selection import train_test_split X = df.iloc[:, :-1].values y = df['MEDV'].values X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=0) # In[25]: slr = LinearRegression() slr.fit(X_train, y_train) y_train_pred = slr.predict(X_train) y_test_pred = slr.predict(X_test) # In[26]: import numpy as np import scipy as sp ary = np.array(range(100000)) # In[27]: get_ipython().run_line_magic('timeit', 'np.linalg.norm(ary)') # In[28]: get_ipython().run_line_magic('timeit', 'sp.linalg.norm(ary)') # In[29]: get_ipython().run_line_magic('timeit', 'np.sqrt(np.sum(ary**2))') # In[30]: plt.scatter(y_train_pred, y_train_pred - y_train, c='steelblue', marker='o', edgecolor='white', label='Training data') plt.scatter(y_test_pred, y_test_pred - y_test, c='limegreen', marker='s', edgecolor='white', label='Test data') plt.xlabel('Predicted values') plt.ylabel('Residuals') plt.legend(loc='upper left') plt.hlines(y=0, xmin=-10, xmax=50, color='black', lw=2) plt.xlim([-10, 50]) plt.tight_layout() # plt.savefig('images/10_09.png', dpi=300) plt.show() # In[31]: from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error print('MSE train: %.3f, test: %.3f' % ( mean_squared_error(y_train, y_train_pred), mean_squared_error(y_test, y_test_pred))) print('R^2 train: %.3f, test: %.3f' % ( r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred))) #
#
# # Using regularized methods for regression # In[32]: from sklearn.linear_model import Lasso lasso = Lasso(alpha=0.1) lasso.fit(X_train, y_train) y_train_pred = lasso.predict(X_train) y_test_pred = lasso.predict(X_test) print(lasso.coef_) # In[33]: print('MSE train: %.3f, test: %.3f' % ( mean_squared_error(y_train, y_train_pred), mean_squared_error(y_test, y_test_pred))) print('R^2 train: %.3f, test: %.3f' % ( r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred))) # Ridge regression: # In[34]: from sklearn.linear_model import Ridge ridge = Ridge(alpha=1.0) # LASSO regression: # In[35]: from sklearn.linear_model import Lasso lasso = Lasso(alpha=1.0) # Elastic Net regression: # In[36]: from sklearn.linear_model import ElasticNet elanet = ElasticNet(alpha=1.0, l1_ratio=0.5) #
#
# # Turning a linear regression model into a curve - polynomial regression # In[37]: X = np.array([258.0, 270.0, 294.0, 320.0, 342.0, 368.0, 396.0, 446.0, 480.0, 586.0])\ [:, np.newaxis] y = np.array([236.4, 234.4, 252.8, 298.6, 314.2, 342.2, 360.8, 368.0, 391.2, 390.8]) # In[38]: from sklearn.preprocessing import PolynomialFeatures lr = LinearRegression() pr = LinearRegression() quadratic = PolynomialFeatures(degree=2) X_quad = quadratic.fit_transform(X) # In[39]: # fit linear features lr.fit(X, y) X_fit = np.arange(250, 600, 10)[:, np.newaxis] y_lin_fit = lr.predict(X_fit) # fit quadratic features pr.fit(X_quad, y) y_quad_fit = pr.predict(quadratic.fit_transform(X_fit)) # plot results plt.scatter(X, y, label='Training points') plt.plot(X_fit, y_lin_fit, label='Linear fit', linestyle='--') plt.plot(X_fit, y_quad_fit, label='Quadratic fit') plt.xlabel('Explanatory variable') plt.ylabel('Predicted or known target values') plt.legend(loc='upper left') plt.tight_layout() #plt.savefig('images/10_11.png', dpi=300) plt.show() # In[40]: y_lin_pred = lr.predict(X) y_quad_pred = pr.predict(X_quad) # In[41]: print('Training MSE linear: %.3f, quadratic: %.3f' % ( mean_squared_error(y, y_lin_pred), mean_squared_error(y, y_quad_pred))) print('Training R^2 linear: %.3f, quadratic: %.3f' % ( r2_score(y, y_lin_pred), r2_score(y, y_quad_pred))) #
#
# ## Modeling nonlinear relationships in the Housing Dataset # In[42]: X = df[['LSTAT']].values y = df['MEDV'].values regr = LinearRegression() # create quadratic features quadratic = PolynomialFeatures(degree=2) cubic = PolynomialFeatures(degree=3) X_quad = quadratic.fit_transform(X) X_cubic = cubic.fit_transform(X) # fit features X_fit = np.arange(X.min(), X.max(), 1)[:, np.newaxis] regr = regr.fit(X, y) y_lin_fit = regr.predict(X_fit) linear_r2 = r2_score(y, regr.predict(X)) regr = regr.fit(X_quad, y) y_quad_fit = regr.predict(quadratic.fit_transform(X_fit)) quadratic_r2 = r2_score(y, regr.predict(X_quad)) regr = regr.fit(X_cubic, y) y_cubic_fit = regr.predict(cubic.fit_transform(X_fit)) cubic_r2 = r2_score(y, regr.predict(X_cubic)) # plot results plt.scatter(X, y, label='Training points', color='lightgray') plt.plot(X_fit, y_lin_fit, label='Linear (d=1), $R^2=%.2f$' % linear_r2, color='blue', lw=2, linestyle=':') plt.plot(X_fit, y_quad_fit, label='Quadratic (d=2), $R^2=%.2f$' % quadratic_r2, color='red', lw=2, linestyle='-') plt.plot(X_fit, y_cubic_fit, label='Cubic (d=3), $R^2=%.2f$' % cubic_r2, color='green', lw=2, linestyle='--') plt.xlabel('% lower status of the population [LSTAT]') plt.ylabel('Price in $1000s [MEDV]') plt.legend(loc='upper right') #plt.savefig('images/10_12.png', dpi=300) plt.show() # Transforming the dataset: # In[43]: X = df[['LSTAT']].values y = df['MEDV'].values # transform features X_log = np.log(X) y_sqrt = np.sqrt(y) # fit features X_fit = np.arange(X_log.min()-1, X_log.max()+1, 1)[:, np.newaxis] regr = regr.fit(X_log, y_sqrt) y_lin_fit = regr.predict(X_fit) linear_r2 = r2_score(y_sqrt, regr.predict(X_log)) # plot results plt.scatter(X_log, y_sqrt, label='Training points', color='lightgray') plt.plot(X_fit, y_lin_fit, label='Linear (d=1), $R^2=%.2f$' % linear_r2, color='blue', lw=2) plt.xlabel('log(% lower status of the population [LSTAT])') plt.ylabel('$\sqrt{Price \; in \; \$1000s \; [MEDV]}$') plt.legend(loc='lower left') plt.tight_layout() #plt.savefig('images/10_13.png', dpi=300) plt.show() #
#
# # Dealing with nonlinear relationships using random forests # ... # ## Decision tree regression # In[44]: from sklearn.tree import DecisionTreeRegressor X = df[['LSTAT']].values y = df['MEDV'].values tree = DecisionTreeRegressor(max_depth=3) tree.fit(X, y) sort_idx = X.flatten().argsort() lin_regplot(X[sort_idx], y[sort_idx], tree) plt.xlabel('% lower status of the population [LSTAT]') plt.ylabel('Price in $1000s [MEDV]') #plt.savefig('images/10_14.png', dpi=300) plt.show() #
#
# ## Random forest regression # In[45]: X = df.iloc[:, :-1].values y = df['MEDV'].values X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.4, random_state=1) # In[46]: from sklearn.ensemble import RandomForestRegressor forest = RandomForestRegressor(n_estimators=1000, criterion='mse', random_state=1, n_jobs=-1) forest.fit(X_train, y_train) y_train_pred = forest.predict(X_train) y_test_pred = forest.predict(X_test) print('MSE train: %.3f, test: %.3f' % ( mean_squared_error(y_train, y_train_pred), mean_squared_error(y_test, y_test_pred))) print('R^2 train: %.3f, test: %.3f' % ( r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred))) # In[47]: plt.scatter(y_train_pred, y_train_pred - y_train, c='steelblue', edgecolor='white', marker='o', s=35, alpha=0.9, label='Training data') plt.scatter(y_test_pred, y_test_pred - y_test, c='limegreen', edgecolor='white', marker='s', s=35, alpha=0.9, label='Test data') plt.xlabel('Predicted values') plt.ylabel('Residuals') plt.legend(loc='upper left') plt.hlines(y=0, xmin=-10, xmax=50, lw=2, color='black') plt.xlim([-10, 50]) plt.tight_layout() #plt.savefig('images/10_15.png', dpi=300) plt.show() #
#
# # Summary # ... # --- # # Readers may ignore the next cell. # In[48]: get_ipython().system(' python ../.convert_notebook_to_script.py --input ch10.ipynb --output ch10.py')