#!/usr/bin/env python # coding: utf-8 # # Forecasting strategies # # [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/etna-team/etna/master?filepath=examples/208-forecasting_strategies.ipynb) # There are 5 possible forecasting strategies: # # * **Recursive**: sequentially forecasts `step` points and use them to forecast next points. # * **Direct**: uses separate model to forecast each time subsegment # * **DirRec**: uses a separate model to forecast each time subsegment, fitting the next model on the train set extended with the forecasts of previous models # * **MIMO**: uses a single multi-output model # * **DIRMO**: MIMO + DirREC # # # The first two of these strategies are available in ETNA, and we will take a closer look at them in this notebook. # # **Table of contents** # # * [Loading dataset](#chapter1) # * [Recursive strategy](#chapter2) # * [Direct strategy](#chapter3) # * [Pipeline](#section_3_1) # * [DirectEnsemble](#section_3_2) # * [Summary](#chapter4) # In[1]: import warnings warnings.filterwarnings("ignore") # In[2]: import pandas as pd from etna.analysis import plot_backtest from etna.datasets import TSDataset from etna.metrics import MAE from etna.metrics import MAPE from etna.metrics import SMAPE from etna.models import CatBoostPerSegmentModel from etna.transforms import LagTransform from etna.transforms import LinearTrendTransform # In[3]: HORIZON = 14 HISTORY_LEN = 5 * HORIZON NUMBER_OF_LAGS = 21 # ## 1. Loading dataset # # Let's load and plot the dataset: # In[4]: df = pd.read_csv("data/example_dataset.csv") df = TSDataset.to_dataset(df) ts = TSDataset(df, freq="D") ts.plot() # ## 2. Recursive strategy # # Recursive strategy in ETNA is implemented via `AutoregressivePipeline`. # `AutoRegressivePipeline` is pipeline, which iteratively forecasts `step` values ahead and after that uses forecasted values to build the features for the next steps. # # * Could work slowly in case of small `step`, since the method needs to recalculate features $\lceil{\frac{horizon}{step}} \rceil$ times # * Allows to use lags, that are lower than `HORIZON` # * Could be imprecise on forecasting with large horizons. The thing is that we accumulate errors of forecasts for further horizons. # * Stable for noise-free time series # # **Note**: # # We will add linear trend into the model(because we are working with tree-based models) and use target's lags as features # In[5]: from etna.pipeline import AutoRegressivePipeline # In[6]: model = CatBoostPerSegmentModel() transforms = [ LinearTrendTransform(in_column="target"), LagTransform(in_column="target", lags=[i for i in range(1, 1 + NUMBER_OF_LAGS)], out_column="target_lag"), ] autoregressivepipeline = AutoRegressivePipeline(model=model, transforms=transforms, horizon=HORIZON, step=1) metrics_recursive_df, forecast_recursive_df, _ = autoregressivepipeline.backtest( ts=ts, metrics=[SMAPE(), MAE(), MAPE()] ) autoregressive_pipeline_metrics = metrics_recursive_df.mean() # In[7]: plot_backtest(forecast_recursive_df, ts, history_len=HISTORY_LEN) # ## 3. Direct Strategy # # Recursive strategy in ETNA is implemented via `Pipeline` and `DirectEnsemble`. This strategy assumes conditional independence of forecasts. # ### 3.1 `Pipeline` # # `Pipeline` implements the version of direct strategy, where the only one model is fitted to forecast all the points in the future. This implies the several things: # # * `Pipeline` doesn't accept lags less than `horizon` # * This is the most time-efficient method: both in traning and in forecasting # * This method might lose the quality with the growth of horizon when using the lags, as the only horizon-far lags are available for all the points # # **Note:** # # As mentioned above, we cannot use lags less than `horizon`, so now we will use lags from `horizon` to `horizon + number_of_lags` # In[8]: from etna.pipeline import Pipeline # In[9]: model = CatBoostPerSegmentModel() transforms = [ LinearTrendTransform(in_column="target"), LagTransform(in_column="target", lags=list(range(HORIZON, HORIZON + NUMBER_OF_LAGS)), out_column="target_lag"), ] pipeline = Pipeline(model=model, transforms=transforms, horizon=HORIZON) metrics_pipeline_df, forecast_pipeline_df, _ = pipeline.backtest(ts=ts, metrics=[SMAPE(), MAE(), MAPE()]) pipeline_metrics = metrics_pipeline_df.mean() # In[10]: plot_backtest(forecast_pipeline_df, ts, history_len=HISTORY_LEN) # ### 3.2 `DirectEnsemble` # # `DirectEnsemble` fits the separate pipeline to forecast each time subsegment. Forecasting the future, it selects base pipeline with the shortest horizon that covers the timestamp of the current forecasted point. Let's see an example of choosing a base pipeline for forecasting: # # ![title](./assets/forecasting_strategies/ensemle.png) # * This method can be useful when we have different pipelines, that are effective on different horizons. # * The computational time growth with the number of base pipelines. # * The forecasts from this strategy might look like a "broken curve", this happens because they are obtained from the independent models # # #### Example # Let's build the separate pipeline for each week of interest. The first week will be forecasted using the lags from `7` to `7 + number_of_lags` and the second one with lags from `horizon` to `horizon + number_of_lags`. We expect that the using of the near lags for the first week might improve the forecast quality # # First, let's build our pipelines: # In[11]: horizons = [7, 14] model_1 = CatBoostPerSegmentModel() transforms_1 = [ LinearTrendTransform(in_column="target"), LagTransform( in_column="target", lags=[i for i in range(horizons[0], horizons[0] + NUMBER_OF_LAGS)], out_column="target_lag" ), ] pipeline_1 = Pipeline(model=model_1, transforms=transforms_1, horizon=horizons[0]) model_2 = CatBoostPerSegmentModel() transforms_2 = [ LinearTrendTransform(in_column="target"), LagTransform( in_column="target", lags=[i for i in range(horizons[1], horizons[1] + NUMBER_OF_LAGS)], out_column="target_lag" ), ] pipeline_2 = Pipeline(model=model_2, transforms=transforms_2, horizon=horizons[1]) # Secondly, we will create ensemble and forecasts: # In[12]: from etna.ensembles import DirectEnsemble # In[13]: ensemble = DirectEnsemble(pipelines=[pipeline_1, pipeline_2]) metrics_ensemble_df, forecast_ensemble_df, _ = ensemble.backtest(ts=ts, metrics=[SMAPE(), MAE(), MAPE()]) ensemble_metrics = metrics_ensemble_df.mean() # In[14]: plot_backtest(forecast_ensemble_df, ts, history_len=HISTORY_LEN) # `DirectEnsemble` described above requires the building of the separate pipeline for each of the time subsegment. This pipelines often has many common parts and differs only in the few places. To make the definition of the pipelines a little bit shorter, you can use `assemble_pipelines`. It generates the pipelines using the following rules: # # 1. Input models(horizons) can be specified as one model(horizon) or as a sequence of models(horizons). In first case all generated pipelines will have input model(horizon) and in the second case `i`-th pipeline will hold `i`-th model(horizon). # 2. Transforms can be specified as a sequence of transform or as a sequence of sequences of transforms. Let's look at some examples to understand better transformations with transforms: # # Let's consider that `A`, `B`, `C`, `D`, `E` are different transforms. # # **Example 1** # # If input transform sequence is `[A, B, C]`, function will put `[A, B, C]` for each pipeline # # **Example 2** # # If input transform sequence is `[A, [B, C], D, E]`, function will put `[A, B, D, E]` for the first generated pipeline and `[A, C, D, E]` for the second. # # **Example 3** # # If input transform sequence is `[A, [B, C], [D, E]]`, function will put `[A, B, D]` for the first generated pipeline and `[A, C, E]` for the second. # # **Example 4** # # If input transform sequence is `[A, [B, None]]`, function will put `[A, B]` for the first generated pipeline and `[A]` for the second. # # # Let's build the ensemble from the previous section using `assemble_pipelines` # In[15]: from etna.pipeline import assemble_pipelines # In[16]: models = [CatBoostPerSegmentModel(), CatBoostPerSegmentModel()] transforms = [ LinearTrendTransform(in_column="target"), [ LagTransform( in_column="target", lags=[i for i in range(horizons[0], horizons[0] + NUMBER_OF_LAGS)], out_column="target_lag", ), LagTransform( in_column="target", lags=[i for i in range(horizons[1], horizons[1] + NUMBER_OF_LAGS)], out_column="target_lag", ), ], ] pipelines = assemble_pipelines(models=models, transforms=transforms, horizons=horizons) pipelines # Pipelines generation process looks now a bit simpler, isn't it? Now it's time to create `DirectEnsemble` out of them: # In[17]: ensemble = DirectEnsemble(pipelines=pipelines) metrics_ensemble_df_2, forecast_ensemble_df_2, _ = ensemble.backtest(ts=ts, metrics=[SMAPE(), MAE(), MAPE()]) # Let's check that the forecasts has not changed: # In[18]: pd.testing.assert_frame_equal(metrics_ensemble_df_2, metrics_ensemble_df) # ## 4. Summary # # In this notebook, we discussed forecasting strategies available in ETNA and look at the examples of their usage. In conclusion, let's compare their quality on the considered dataset: # In[19]: df_res = pd.DataFrame( data=[ensemble_metrics, pipeline_metrics, autoregressive_pipeline_metrics], index=["direct_ensemble", "pipeline", "autoregressive_pipeline"], ).drop("fold_number", axis=1) df_res = df_res.sort_values(by="SMAPE") df_res