#!/usr/bin/env python # coding: utf-8 # Now let's go on to our modeling step. As a reminder, our plan of action was as follows: # # 1. Perform EDA on the dataset to extract valuable insight about the process generating the time series **(COMPLETED)**. # 2. Build a baseline model (univariable model without exogenous variables) for benchmarking purposes. **(Covered in this notebook)** # 3. Build a univariate model with all exogenous variables to check best possible performance. **(Covered in this notebook)** # 4. Evaluate the model with exogenous variables and discuss any potential issues. **(Covered in this notebook)** # 5. Overcome issues identified above. **(Covered in this notebook)** # 6. Make future predictions with the best model. # 7. Replicate flow with Automated Time Series Modeling (AutoML) # In[1]: # Only enable critical logging (Optional) import os os.environ["PYCARET_CUSTOM_LOGGING_LEVEL"] = "CRITICAL" # In[2]: def what_is_installed(): from pycaret import show_versions show_versions() try: what_is_installed() except ModuleNotFoundError: get_ipython().system('pip install pycaret') what_is_installed() # In[3]: import numpy as np import pandas as pd from pycaret.datasets import get_data from pycaret.time_series import TSForecastingExperiment # In[4]: # Global Figure Settings for notebook ---- # Depending on whether you are using jupyter notebook, jupyter lab, Google Colab, you may have to set the renderer appropriately # NOTE: Setting to a static renderer here so that the notebook saved size is reduced. global_fig_settings = { # "renderer": "notebook", "renderer": "png", "width": 1000, "height": 600, } # In[5]: data = get_data("airquality") # Limiting the data for demonstration purposes. data = data.iloc[-720:] data["index"] = pd.to_datetime(data["Date"] + " " + data["Time"]) data.drop(columns=["Date", "Time"], inplace=True) data.replace(-200, np.nan, inplace=True) target = "CO(GT)" exclude = ['NMHC(GT)', 'AH'] data.drop(columns=exclude, inplace=True) data.head() # # Step 2: Baseline Model - Univariate forecasting without exogenous variables # In[6]: data_uni = data.copy() data_uni.set_index("index", inplace=True) data_uni = data_uni[target] exp_uni = TSForecastingExperiment() exp_uni.setup( data=data_uni, fh=48, numeric_imputation_target="ffill", numeric_imputation_exogenous="ffill", fig_kwargs=global_fig_settings, session_id=42 ) # In[7]: model = exp_uni.create_model("arima", order=(0,1,0), seasonal_order=(0,1,0,24)) # In[8]: exp_uni.plot_model(model) # On zooming in to the forecasts, we can see that the model is able to capture some of the trends (spikes) in the dataset, but not all. The performance of our baseline model indicates that mean MASE across the CV folds is 1.52 which is not that great. Any value > 1 indicates that the model is performing worse than even a naive model with one step ahead forecasts. This model needs more improvement. Let's see if adding exogenous variables can help improve the model performance. # # Step 3: Improved Model - Univariate forecasting with exogenous variables # In[9]: exp_exo = TSForecastingExperiment() exp_exo.setup( data=data, target=target, index="index", fh=48, numeric_imputation_target="ffill", numeric_imputation_exogenous="ffill", fig_kwargs=global_fig_settings, session_id=42 ) # In[10]: model_exo = exp_exo.create_model("arima", order=(0,1,0), seasonal_order=(0,1,0,24)) # In[11]: exp_exo.plot_model(model_exo) # # Step 4: Evaluate Model # Not bad, We have managed to improve MASE significantly which is much better than the univariate model and also a large improvement over a naive model. We should be happy with this improvement. Let's finalize the model by training it on the entire dataset so we can make true future forecasts. # In[12]: final_model_exo = exp_exo.finalize_model(model_exo) # In[13]: def safe_predict(exp, model): """Prediction wrapper for demo purposes.""" try: exp.predict_model(model) except ValueError as exception: print(exception) exo_vars = exp.exogenous_variables print(f"{len(exo_vars)} exogenous variables (X) needed in order to make future predictions:\n{exo_vars}") safe_predict(exp_exo, final_model_exo) # As we can see, this approach does not come without side effects. The problem is that we have 10 exogenous variables. Hence in order to get any unknown future values for CO concentration, we will need the future values for all these exogenous variables. This is generally obtained through some forecasting process itself. But each forecast will have errors and these errors can be compounded when there are a lot of exogenous variables. # # **Let's see if we can trim down these exogenous variables to a handful of useful variables without compromising on forecasting performance.** # # Step 5: Parsimonious Model - Univariate forecasting with limited exogenous variables # # From the CCF Analysis, we found that many of the exogenous variables show a very similar correlation structure to the CO concentration. E.g. `PT08.S1(CO)`, `NOx(GT)`, `C6H6(GT)`, `PT08.S2(NMHC)` values from 24 hours before (lag = 24) show a high positive correlation to CO concentration. Instead of keeping all of them, lets pick the one with the highest positive correlation at lag 24 which is `NOx(GT)`. # # Similarly, `PT08.S3(NOx)` values from 24 hours ago shows the highest negative correlation to CO concentration. Let's keep this variable as well. # # Finally, in daily cycles, what happens 12 hours back can also impact the current value (e.g. values last night can impact the next day and vice versa). The variable with the highest correlation to CO concentration at lag = 12 is `RH`. We will keep this as well. # In[14]: exp_slim = TSForecastingExperiment() keep = [target, "index", 'NOx(GT)', "PT08.S3(NOx)", "RH"] data_slim = data[keep] exp_slim.setup( data=data_slim, target=target, index="index", fh=48, numeric_imputation_target="ffill", numeric_imputation_exogenous="ffill", fig_kwargs=global_fig_settings, session_id=42 ) # In[15]: model_slim = exp_slim.create_model("arima", order=(0,1,0), seasonal_order=(0,1,0,24)) # In[16]: exp_slim.plot_model(model_slim) # Not bad. MASE has only increased slightly, but we have managed to cut our exogenous variables significantly. This will help us when we make "true" unknown future predictions since we will need the "unknown" future values of these exogenous variables to make the forecast for the CO concentration. # # Finalize the model # # - Train the slim model on the entire dataset so we can make true future forecasts # - Save the model as a pickle file for deployment # In[17]: final_slim_model = exp_slim.finalize_model(model_slim) # In[18]: _ = exp_slim.save_model(final_slim_model, "final_slim_model") # In[19]: safe_predict(exp_slim, final_slim_model) # **So we still need future values for 3 exogenous variables. We will get this in the next part using forecasting techniques.** # ### The next steps would typically be done in a new session/notebook # Now that we have build our model, let's make future predictions. As a reminder, our plan of action was as follows: # # 1. Perform EDA on the dataset to extract valuable insight about the process generating the time series **(COMPLETED)**. # 2. Build a baseline model (univariable model without exogenous variables) for benchmarking purposes **(COMPLETED)**. # 3. Build a univariate model with all exogenous variables to check best possible performance **(COMPLETED)**. # 4. Evaluate the model with exogenous variables and discuss any potential issues **(COMPLETED)**. # 5. Overcome issues identified above **(COMPLETED)**. # 6. Make future predictions with the best model. **(Covered in this notebook)** # 7. Replicate flow with Automated Time Series Modeling (AutoML) # In[20]: exog_vars = ['NOx(GT)', 'PT08.S3(NOx)', 'RH'] data = data[["index"] + exog_vars] data.head() # # Step 6: Making Future Predictions # # # Step 6A: Get future exogenous variable values using forecasting # In[21]: exog_exps = [] exog_models = [] for exog_var in exog_vars: exog_exp = TSForecastingExperiment() exog_exp.setup( data=data[["index", exog_var]], target=exog_var, index="index", fh=48, numeric_imputation_target="ffill", numeric_imputation_exogenous="ffill", fig_kwargs=global_fig_settings, session_id=42 ) # Users can customize how to model future exogenous variables i.e. add # more steps and models to potentially get better models at the expense # of higher modeling time. best = exog_exp.compare_models( sort="mase", include=["arima", "ets", "exp_smooth", "theta", "lightgbm_cds_dt",] ) final_exog_model = exog_exp.finalize_model(best) exog_exps.append(exog_exp) exog_models.append(final_exog_model) # Step 2: Get future predictions for exog variables ---- future_exog = [ exog_exp.predict_model(exog_model) for exog_exp, exog_model in zip(exog_exps, exog_models) ] future_exog = pd.concat(future_exog, axis=1) future_exog.columns = exog_vars # In[22]: future_exog # # Step 6B: Load Model and make future predcitons for the target variable # In[23]: exp_future = TSForecastingExperiment() # In[24]: final_slim_model = exp_future.load_model("final_slim_model") # In[25]: future_preds = exp_future.predict_model(final_slim_model, X=future_exog) future_preds.plot()