#!/usr/bin/env python # coding: utf-8 # # AutoML # # [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/etna-team/etna/master?filepath=examples/205-automl.ipynb) # This notebooks covers AutoML utilities of ETNA library. # # **Table of contents** # # - [Hyperparameters tuning](#chapter_1) # - [How Tune works](#section_1_1) # - [Example](#section_1_2) # - [General AutoML](#chapter_2) # - [How Auto works](#section_2_1) # - [Example](#section_2_2) # - [Summary](chapter_3) # In[1]: get_ipython().system('pip install "etna[auto, prophet]" -q') # In[2]: import warnings warnings.filterwarnings("ignore") # In[3]: import pandas as pd from etna.datasets import TSDataset from etna.metrics import SMAPE from etna.models import LinearPerSegmentModel from etna.pipeline import Pipeline from etna.transforms import DateFlagsTransform from etna.transforms import LagTransform # In[4]: HORIZON = 14 # ## 1. Hyperparameters tuning # It is a common task to tune hyperparameters of existing pipeline to improve its quality. For this purpose there is an `etna.auto.Tune` class, which is responsible for creating [optuna](https://github.com/optuna/optuna) study to solve this problem. # # In the next sections we will see how it works and how to use it for your particular problems. # ### 1.1 How `Tune` works # During init `Tune` accepts `pipeline`, its tuning parameters (`params_to_tune`), optimization metric (`target_metric`), parameters of backtest and parameters of optuna study. # # In `fit` the optuna study is created. During each trial the sample of parameters is generated from `params_to_tune` and applied to `pipeline`. After that, the new pipeline is checked in backtest and target metric is returned to optuna framework. # Let's look closer at `params_to_tune` parameter. It expects dictionary with parameter names and its distributions. But how this parameter names should be chosen? # #### 1.1.1 `set_params` # We are going to make a little detour to explain the `set_params` method, which is supported by ETNA pipelines, models and transforms. Given a dictionary with parameters it allows to create from existing object a new one with changed parameters. # First, we define some objects for our future examples. # In[5]: model = LinearPerSegmentModel() transforms = [ LagTransform(in_column="target", lags=list(range(HORIZON, HORIZON + 10)), out_column="target_lag"), DateFlagsTransform(out_column="date_flags"), ] pipeline = Pipeline(model=model, transforms=transforms, horizon=HORIZON) # Let's look at simple example, when we want to change `fit_intercept` parameter of the `model`. # In[6]: model.to_dict() # In[7]: new_model_params = {"fit_intercept": False} new_model = model.set_params(**new_model_params) new_model.to_dict() # Great! On the next step we want to change the `fit_intercept` of `model` inside the `pipeline`. # In[8]: pipeline.to_dict() # In[9]: new_pipeline_params = {"model.fit_intercept": False} new_pipeline = pipeline.set_params(**new_pipeline_params) new_pipeline.to_dict() # Ok, it looks like we managed to do this. On the last step we are going to change `is_weekend` flag of `DateFlagsTransform` inside our `pipeline`. # In[10]: new_pipeline_params = {"transforms.1.is_weekend": False} new_pipeline = pipeline.set_params(**new_pipeline_params) new_pipeline.to_dict() # As we can see, we managed to do this. # #### 1.1.2 `params_to_tune` # Let's get back to our initial question about `params_to_tune`. In our optuna study we are going to sample each parameter value from its distribution and pass it into `pipeline.set_params` method. So, the keys for `params_to_tune` should be a valid for `set_params` method. # # Distributions are taken from `etna.distributions` and they are matching `optuna.Trial.suggest_` methods. # For example, something like this will be valid for our `pipeline` defined above: # In[11]: from etna.distributions import CategoricalDistribution example_params_to_tune = { "model.fit_intercept": CategoricalDistribution([False, True]), "transforms.0.is_weekend": CategoricalDistribution([False, True]), } # There are some good news: it isn't necessary for our users to define `params_to_tune`, because we have a default grid for many of our classes. The default grid is available by calling `params_to_tune` method on pipeline, model or transform. Let's check our `pipeline`: # In[12]: pipeline.params_to_tune() # Now we are ready to use it in practice. # ### 1.2 Example # #### 1.2.1 Loading data # Let's start by loading example data. # In[13]: df = pd.read_csv("data/example_dataset.csv") df.head() # In[14]: df = TSDataset.to_dataset(df) full_ts = TSDataset(df, freq="D") full_ts.plot() # Let's divide current dataset into train and validation parts. We will use validation part later to check final results. # In[15]: ts, _ = full_ts.train_test_split(test_size=HORIZON * 5) # #### 1.2.2 Running `Tune` # We are going to define our `Tune` object: # In[16]: from etna.auto import Tune tune = Tune(pipeline=pipeline, target_metric=SMAPE(), horizon=HORIZON, backtest_params=dict(n_folds=5)) # We used mostly default parameters for this example. But for your own experiments you might want to also set up other parameters. # # For example, parameter `runner` allows you to run tuning in parallel on a local machine, and parameter `storage` makes it possible to store optuna results on a dedicated remote server. # # For a full list of parameters we advise you to check our documentation. # Let's hide the logs of optuna, there are too many of them for a notebook. # In[17]: import optuna optuna.logging.set_verbosity(optuna.logging.CRITICAL) # Let's run the tuning # In[18]: get_ipython().run_cell_magic('capture', '', 'best_pipeline = tune.fit(ts=ts, n_trials=20)\n') # Command `%%capture` just hides the output. # #### 1.2.3 Analysis # In the last section dedicated to `Tune` we will look at methods for result analysis. # First of all there is `summary` method that shows us the results of optuna trials. # In[19]: tune.summary() # Let's show only the columns we are interested in. # In[20]: tune.summary()[["hash", "pipeline", "SMAPE_mean", "state"]].sort_values("SMAPE_mean") # As we can see, we have duplicate lines according to the `hash` column. Some trials have the same sampled hyperparameters and they have the same results. We have a special handling for such duplicates: they are skipped during optimization and the previously computed metric values are returned. # # Duplicates on the summary can be eliminated using `hash` column. # In[21]: tune.summary()[["hash", "pipeline", "SMAPE_mean", "state"]].sort_values("SMAPE_mean").drop_duplicates(subset="hash") # The second method `top_k` is useful when you want to check out best tried pipelines without duplicates. # In[22]: top_3_pipelines = tune.top_k(k=3) # In[23]: top_3_pipelines # ## 2. General AutoML # Hyperparameters tuning is useful, but can be too narrow. In this section we move our attention to general AutoML pipeline. # In ETNA we have an `etna.auto.Auto` class for making automatic pipeline selection. It can be useful to quickly create a good baseline for your forecasting task. # ### 2.1 How `Auto` works # `Auto` init has similar parameters to `Tune`, but instead of `pipeline` it works with `pool`. Pool, in general, is just a list of pipelines. # # During `fit` there are two stages: # # - pool stage, # - tuning stage. # # Pool stage is responsible for checking every pipeline suggested in a given `pool`. For each pipeline we run a backtest and compute `target_metric`. Results are saved in optuna study. # # Tuning stage takes `tune_size` best pipelines according to the resuls of the pool stage. And then runs `Tune` with default `params_to_tune` for them sequentially from best to the worst. # # Limit parameters `n_trials` and `timeout` are shared between pool and tuning stages. First, we run pool stage with given `n_trials` and `timeout`. After that, the remaining values are divided equally among `tune_size` tuning steps. # ### 2.2 Example # We will move stright to the example. # In[24]: from etna.auto import Auto auto = Auto(target_metric=SMAPE(), horizon=HORIZON, backtest_params=dict(n_folds=5)) # We used mostly default parameters, even pool. There is also a default `sampler`, but to make results more reproducible we fixed the `seed`. # Let's start the fitting. We can start by running only pool stage. # In[25]: get_ipython().run_cell_magic('capture', '', 'best_pool_pipeline = auto.fit(ts=ts, tune_size=0)\n') # In[26]: auto.summary()[["hash", "pipeline", "SMAPE_mean", "state", "study"]].sort_values("SMAPE_mean") # We can continue our training. The pool stage is over and there will be only the tuning stage. If we don't want to wait forever we should limit the tuning by fixing `n_trials` or `timeout`. # # We also set some parameters for `optuna.Study.optimize`: # # - `gc_after_trial=True`: to prevent `fit` from increasing memory consumption # - `catch=(Exception,)`: to prevent failing if some trials are erroneous. # In[27]: get_ipython().run_cell_magic('capture', '', 'best_tuning_pipeline = auto.fit(ts=ts, tune_size=3, n_trials=100, gc_after_trial=True, catch=(Exception,))\n') # Let's look at the results. # In[28]: auto.summary()[["hash", "pipeline", "SMAPE_mean", "state", "study"]].sort_values("SMAPE_mean").drop_duplicates( subset=("hash", "study") ).head(10) # In[29]: auto.top_k(k=5) # If we look at `study` column we will see that best trial from tuning stage is better then best trial from pool stage. It means, that tuning stage was successful and improved the final result. # # Let's compare best pipeline on pool and tuning stages on hold-out part of initial `ts`. # In[30]: get_ipython().run_cell_magic('capture', '', 'best_pool_metrics, _, _ = best_pool_pipeline.backtest(ts=full_ts, metrics=[SMAPE()], n_folds=5)\nbest_tuning_metrics, _, _ = best_tuning_pipeline.backtest(ts=full_ts, metrics=[SMAPE()], n_folds=5)\n') # In[31]: best_pool_smape = best_pool_metrics["SMAPE"].mean() best_tuning_smape = best_tuning_metrics["SMAPE"].mean() print(f"Best pool SMAPE: {best_pool_smape:.3f}") print(f"Best tuning SMAPE: {best_tuning_smape:.3f}") # As we can see, the results are slightly better after the tuning stage, but it can be statistically insignificant. For your datasets the results could be different. # ## 3. Summary # In this notebook we discussed how AutoML works in ETNA library and how to use it. There are two supported scenarios: # # - Tuning your existing pipeline; # - Automatic search of the pipeline for your forecasting task.