#!/usr/bin/env python # coding: utf-8 # # AddMissingIndicator # # # AddMissingIndicator adds additional binary variables indicating missing data (thus, called missing indicators). The binary variables take the value 1 if the observation's value is missing, or 0 otherwise. AddMissingIndicator adds 1 binary variable per variable. # # **For this demonstration, we use the Ames House Prices dataset produced by Professor Dean De Cock:** # # [Dean De Cock (2011) Ames, Iowa: Alternative to the Boston Housing # Data as an End of Semester Regression Project, Journal of Statistics Education, Vol.19, No. 3](http://jse.amstat.org/v19n3/decock.pdf) # # The version of the dataset used in this notebook can be obtained from [Kaggle](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data) # ## Version # In[1]: # Make sure you are using this # Feature-engine version. import feature_engine feature_engine.__version__ # In[2]: import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from feature_engine.imputation import ( AddMissingIndicator, MeanMedianImputer, CategoricalImputer, ) # ## Load data # In[3]: # Download the data from Kaggle and store it # in the same folder as this notebook. data = pd.read_csv('houseprice.csv') data.head() # In[4]: # Separate the data into train and test sets. X_train, X_test, y_train, y_test = train_test_split( data.drop(['Id', 'SalePrice'], axis=1), data['SalePrice'], test_size=0.3, random_state=0, ) X_train.shape, X_test.shape # ## Add indicators # # We will add indicators to 4 variables with missing data. # In[5]: # Check missing data X_train[['Alley', 'MasVnrType', 'LotFrontage', 'MasVnrArea']].isnull().mean() # In[6]: # Start the imputer with the variables for which # we want indicators. imputer = AddMissingIndicator( variables=['Alley', 'MasVnrType', 'LotFrontage', 'MasVnrArea'], ) imputer.fit(X_train) # In[7]: # the variables for which missing # indicators will be added. imputer.variables_ # In[8]: # Check the added indicators. They take the name of # the variable underscore na train_t = imputer.transform(X_train) test_t = imputer.transform(X_test) train_t[['Alley_na', 'MasVnrType_na', 'LotFrontage_na', 'MasVnrArea_na']].head() # In[9]: # Note that the original variables still have missing data. train_t[['Alley_na', 'MasVnrType_na', 'LotFrontage_na', 'MasVnrArea_na']].mean() # ## Indicators plus imputation # # We normally add missing indicators and impute the original variables with the mean or median if the variable is numerical, or with the mode if the variable is categorical. So let's do that. # In[10]: # Check variable types X_train[['Alley', 'MasVnrType', 'LotFrontage', 'MasVnrArea']].dtypes # The first 2 variables are categorical, so I will impute them with the most frequent category. The last variables are numerical, so I will impute with the median. # In[11]: # Create a pipeline with the imputation strategy pipe = Pipeline([ ('indicators', AddMissingIndicator( variables=['Alley', 'MasVnrType', 'LotFrontage', 'MasVnrArea'], )), ('imputer_num', MeanMedianImputer( imputation_method='median', variables=['LotFrontage', 'MasVnrArea'], )), ('imputer_cat', CategoricalImputer( imputation_method='frequent', variables=['Alley', 'MasVnrType'], )), ]) # In[12]: # With fit() the transformers learn the # required parameters. pipe.fit(X_train) # In[13]: # We can look into the attributes of the # different transformers. # Check the variables that will take indicators. pipe.named_steps['indicators'].variables_ # In[14]: # Check the median values for the imputation. pipe.named_steps['imputer_num'].imputer_dict_ # In[15]: # Check the mode values for the imputation. pipe.named_steps['imputer_cat'].imputer_dict_ # In[16]: # Now, we transform the data. train_t = pipe.transform(X_train) test_t = pipe.transform(X_test) # In[17]: # Lets' look at the transformed variables. # original variables plus indicators vars_ = ['Alley', 'MasVnrType', 'LotFrontage', 'MasVnrArea', 'Alley_na', 'MasVnrType_na', 'LotFrontage_na', 'MasVnrArea_na'] train_t[vars_].head() # In[18]: # After the transformation, the variables do not # show missing data train_t[vars_].isnull().sum() # ## Automatically select the variables # # We have the option to add indicators to all variables in the dataset, or to all variables with missing data. AddMissingIndicator can select which variables to transform automatically. # # When the parameter `variables` is left to None and the parameter `missing_only` is left to True, the imputer add indicators to all variables with missing data. # # When the parameter `variables` is left to None and the parameter `missing_only` is switched to False, the imputer add indicators to all variables. # # It is good practice to use `missing_only=True` when we set `variables=None`, so that the transformer handles the imputation automatically in a meaningful way. # # ### Automatically find variables with NA # In[19]: # With missing_only=True, missing indicators will only be added # to those variables with missing data found during the fit method # in the train set imputer = AddMissingIndicator( variables=None, missing_only=True, ) # finds variables with missing data imputer.fit(X_train) # In[20]: # The original variables argument was None imputer.variables # In[21]: # In variables_ we find the list of variables with NA # in the train set imputer.variables_ # In[22]: len(imputer.variables_) # We've got 19 variables with NA in the train set. # In[23]: # After transforming the dataset, we see more columns # corresponding to the missing indicators. train_t = imputer.transform(X_train) test_t = imputer.transform(X_test) X_train.shape, train_t.shape # In[24]: # Towards the right, we find the missing indicators. train_t.head() # ## Add indicators to all variables # In[25]: # We can, in practice, set up the indicator to add # missing indicators to all variables imputer = AddMissingIndicator( variables=None, missing_only=False, ) imputer.fit(X_train) # In[26]: # the attribute variables_ now shows all variables # in the train set. len(imputer.variables_) # In[27]: # After transforming the dataset, # we obtain double the number of columns train_t = imputer.transform(X_train) test_t = imputer.transform(X_test) X_train.shape, train_t.shape # ## Automatic imputation # # We can automatically impute missing data in numerical and categorical variables, letting the imputers find out which variables to impute. # # We need to set the parameter variables to None in all imputers. None is the default value, so we can simply omit the parameter when initialising the transformers. # In[28]: # Create a pipeline with the imputation strategy pipe = Pipeline([ # add indicators to variables with NA ('indicators', AddMissingIndicator( missing_only=True, )), # impute all numerical variables with the median ('imputer_num', MeanMedianImputer( imputation_method='median', )), # impute all categorical variables with the mode ('imputer_cat', CategoricalImputer( imputation_method='frequent', )), ]) # In[29]: # With fit() the transformers learn the # required parameters. pipe.fit(X_train) # In[30]: # We can look into the attributes of the # different transformers. # Check the variables that will take indicators. pipe.named_steps['indicators'].variables_ # In[31]: # Check the median values for the imputation. pipe.named_steps['imputer_num'].imputer_dict_ # In[32]: # Check the mode values for the imputation. pipe.named_steps['imputer_cat'].imputer_dict_ # In[33]: # Now, we transform the data. train_t = pipe.transform(X_train) test_t = pipe.transform(X_test) # In[34]: # We should see a complete case dataset train_t.isnull().sum() # In[35]: # Sanity check [v for v in train_t.columns if train_t[v].isnull().sum() > 1] # In[ ]: