#!/usr/bin/env python # coding: utf-8 # In[1]: # Full width from IPython.core.display import display, HTML display(HTML("")) # In[2]: get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') # In[3]: import os, math, subprocess import pandas as pd import numpy as np import matplotlib.pyplot as plt from IPython.display import display # from lib_modeling import * from lib_feature_engineering import * # some settings for displaying Pandas results pd.set_option('display.width', 2000) pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.precision', 4) pd.set_option('display.max_colwidth', -1) # # Load data # In[4]: # load train data data_path = "home-credit-default-risk/application_train.csv" pdf_train = pd.read_csv(data_path) # filter by tvt code pdf_tvt_extend = pd.read_pickle("pdf_tvt_extend.pkl", compression="bz2") pdf_train_filtered = (pdf_tvt_extend.query("tvt_code == 'train'") .merge(pdf_train[["SK_ID_CURR"]], on="SK_ID_CURR") .drop(columns=["tvt_code"])) pdf_train_filtered.head() # In[5]: # load bureau data_path = "home-credit-default-risk/bureau.csv" pdf_data = pd.read_csv(data_path) # # Preprocess data # ## onehot and frequency # # - CREDIT_ACTIVE: Tình trạng của khoản vay # - CREDIT_CURRENCY # In[6]: # những khoản vay bị bán (Sold) được xem là (Bad debt) print("Before") print(pdf_data["CREDIT_ACTIVE"].value_counts()) pdf_data.loc[pdf_data["CREDIT_ACTIVE"] == "Sold", "CREDIT_ACTIVE"] = "Bad debt" print("After") print(pdf_data["CREDIT_ACTIVE"].value_counts()) # In[7]: # những currency hiếm gặp sẽ đưa vào chung một loại currency 2 print("Before") print(pdf_data["CREDIT_CURRENCY"].value_counts()) pdf_data.loc[pdf_data["CREDIT_CURRENCY"].isin(["currency 3", "currency 4"]), "CREDIT_CURRENCY"] = "currency 2" print("After") print(pdf_data["CREDIT_CURRENCY"].value_counts()) # In[8]: pdf_distr_credit_type = pdf_data["CREDIT_TYPE"].value_counts().to_frame("freq") pdf_distr_credit_type # In[9]: pdf_check_credit_type = pdf_train_filtered.merge(pdf_data[["SK_ID_CURR", "CREDIT_TYPE"]]).groupby(["CREDIT_TYPE", "TARGET"]).size().to_frame("count") pdf_check_credit_type = pdf_check_credit_type.reset_index().set_index("CREDIT_TYPE") # pdf_pct = pdf_check_credit_type.query("TARGET == 1").join(pdf_distr_credit_type) pdf_pct["pct"] = pdf_pct["count"] * 100.0 / pdf_pct["freq"] pdf_pct.sort_values("pct") # In[10]: # dựa vào phần trăm tín dụng xấu lớn hơn 5% và có phân bố nhỏ sẽ được xếp vào good/bad loan ls_good_loan = [ "Mobile operator loan", "Interbank credit", "Loan for purchase of shares (margin lending)", "Real estate loan" "Cash loan (non-earmarked)", "Another type of loan" ] ls_bad_loan = [ "Loan for the purchase of equipment", "Microloan", "Loan for working capital replenishment" ] pdf_data.loc[pdf_data["CREDIT_TYPE"].isin(ls_good_loan), "CREDIT_TYPE"] = "Consumer credit" pdf_data.loc[pdf_data["CREDIT_TYPE"].isin(ls_bad_loan), "CREDIT_TYPE"] = "Bad loan" print("After") print(pdf_data["CREDIT_TYPE"].value_counts()) # In[11]: get_ipython().run_cell_magic('time', '', 'dict_feat = {\n "CREDIT_ACTIVE": [\'Closed\', \'Active\', \'Bad debt\'],\n "CREDIT_CURRENCY": [\'currency 1\', \'currency 2\'],\n "CREDIT_TYPE": ["Consumer credit", "Credit card", "Car loan", "Mortgage", "Loan for business development", "Unknown type of loan", "Bad loan"],\n}\npdf_onehot = gen_one_hot_feat(pdf_data, dict_feat)\n') # In[12]: pdf_agg01 = agg_common_data(pdf_onehot, ["max", "sum", "mean"]) eval_agg01 = feature_evaluate(pdf_train_filtered, pdf_agg01.reset_index()) display(eval_agg01) # In[13]: eval_agg01.query("auc <= 0.501").shape # In[14]: sel_feat = eval_agg01.query("auc > 0.501")["name"].tolist() pdf_agg01 = pdf_agg01[sel_feat] print(pdf_agg01.shape) # ## days to years # # - DAYS_CREDIT: Ngày vay so với hiện tại # - CREDIT_DAY_OVERDUE # - DAYS_CREDIT_UPDATE # - DAYS_CREDIT_ENDDATE # - DAYS_ENDDATE_FACT # In[15]: def days_to_years(pdf_input, ls_cols): pdf_out = pdf_input[["SK_ID_CURR"] + ls_cols].copy() col_out = [] for cname in ls_cols: new_name = "{}_TO_YEARS".format(cname) col_out.append(new_name) pdf_out[new_name] = pdf_out["DAYS_CREDIT"] / -365 return pdf_out[["SK_ID_CURR"] + col_out] # In[16]: ls_cols = ["DAYS_CREDIT", "CREDIT_DAY_OVERDUE", "DAYS_CREDIT_UPDATE", "DAYS_CREDIT_ENDDATE", "DAYS_ENDDATE_FACT"] pdf_years = days_to_years(pdf_data, ls_cols) pdf_years.head() # In[17]: pdf_agg02 = agg_common_data(pdf_years, ["min", "max", "mean", "std", "median"]) pdf_agg02.head() # In[18]: eval_agg02 = feature_evaluate(pdf_train_filtered, pdf_agg02.reset_index()) display(eval_agg02) # ## khách hàng sử dụng trong vòng 1 năm # In[19]: pdf_filtered021 = pdf_years[["SK_ID_CURR", "DAYS_CREDIT_TO_YEARS"]].copy() pdf_filtered021["YEARS_CREDIT_1year"] = 0 idx_query = (pdf_years["DAYS_CREDIT_TO_YEARS"] >=1) & (pdf_years["DAYS_CREDIT_TO_YEARS"] < 2) pdf_filtered021.loc[idx_query, "YEARS_CREDIT_1year"] = 1 pdf_filtered021.drop(columns=["DAYS_CREDIT_TO_YEARS"], inplace=True) # pdf_agg021 = agg_common_data(pdf_filtered021, ["max", "sum"]) eval_agg021 = feature_evaluate(pdf_train_filtered, pdf_agg021.reset_index()) display(eval_agg021) # ## khách hàng sử dụng trong vòng 2 năm # In[20]: pdf_filtered022 = pdf_years[["SK_ID_CURR", "DAYS_CREDIT_TO_YEARS"]].copy() pdf_filtered022["YEARS_CREDIT_2year"] = 0 idx_query = (pdf_years["DAYS_CREDIT_TO_YEARS"] >= 2) & (pdf_years["DAYS_CREDIT_TO_YEARS"] < 3) pdf_filtered022.loc[idx_query, "YEARS_CREDIT_2year"] = 1 pdf_filtered022.drop(columns=["DAYS_CREDIT_TO_YEARS"], inplace=True) # pdf_agg022 = agg_common_data(pdf_filtered022, ["max", "sum"]) eval_agg022 = feature_evaluate(pdf_train_filtered, pdf_agg022.reset_index()) display(eval_agg022) # ## khách hàng sử dụng trong vòng lâu năm # In[21]: pdf_filtered023 = pdf_years[["SK_ID_CURR", "DAYS_CREDIT_TO_YEARS"]].copy() pdf_filtered023["YEARS_CREDIT_3year"] = 0 idx_query = pdf_years["DAYS_CREDIT_TO_YEARS"] >= 3 pdf_filtered023.loc[idx_query, "YEARS_CREDIT_3year"] = 1 pdf_filtered023.drop(columns=["DAYS_CREDIT_TO_YEARS"], inplace=True) # pdf_agg023 = agg_common_data(pdf_filtered023, ["max", "sum"]) eval_agg023 = feature_evaluate(pdf_train_filtered, pdf_agg023.reset_index()) display(eval_agg023) # ## keep columns # In[22]: ls_cols = [cname for cname in pdf_data.columns if "AMT" in cname] + ["CNT_CREDIT_PROLONG"] pdf_amt = pdf_data[["SK_ID_CURR"] + ls_cols] pdf_amt.head() # In[23]: pdf_agg03 = agg_common_data(pdf_amt, ["min", "max", "mean", "std", "median"]) # In[24]: eval_agg03 = feature_evaluate(pdf_train_filtered, pdf_agg03.reset_index()) display(eval_agg03) # In[25]: eval_agg03.query("auc <= 0.501").shape # In[26]: sel_feat = eval_agg03.query("auc > 0.501")["name"].tolist() pdf_agg03 = pdf_agg03[sel_feat] print(pdf_agg03.shape) # # save features # In[27]: pdf_feat = pdf_agg01.join(pdf_agg02).join(pdf_agg03).join(pdf_agg021).join(pdf_agg022).join(pdf_agg023) print(pdf_feat.shape) pdf_feat.head() # In[28]: get_ipython().run_cell_magic('time', '', 'fname = "bureau"\nfname = os.path.join("features", "{}.pkl.bz2".format(fname))\npdf_feat.to_pickle(fname, compression="bz2")\nprint("Store features completed!")\n') # In[29]: def check_corr(pdf_input, attr=None, calculated=False): if calculated: pdf_corr = pdf_input else: pdf_corr = pdf_input.corr() # toplot = pdf_corr fig, ax = plt.subplots(figsize=(15, 15)) if attr is not None: idx_check = [name for name in pdf_corr.index if attr in name] toplot = pdf_corr.loc[idx_check, idx_check] ax.matshow(np.abs(toplot)) plt.show() display(toplot) return pdf_corr # pdf_corr = check_corr(pdf_feat)