#pip install pycaret-nightly
from pycaret.utils import version
version()
pycaret-nightly-0.26
from pycaret.datasets import get_data
data = get_data('juice')
Id | Purchase | WeekofPurchase | StoreID | PriceCH | PriceMM | DiscCH | DiscMM | SpecialCH | SpecialMM | LoyalCH | SalePriceMM | SalePriceCH | PriceDiff | Store7 | PctDiscMM | PctDiscCH | ListPriceDiff | STORE | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 1 | CH | 237 | 1 | 1.75 | 1.99 | 0.00 | 0.0 | 0 | 0 | 0.500000 | 1.99 | 1.75 | 0.24 | No | 0.000000 | 0.000000 | 0.24 | 1 |
1 | 2 | CH | 239 | 1 | 1.75 | 1.99 | 0.00 | 0.3 | 0 | 1 | 0.600000 | 1.69 | 1.75 | -0.06 | No | 0.150754 | 0.000000 | 0.24 | 1 |
2 | 3 | CH | 245 | 1 | 1.86 | 2.09 | 0.17 | 0.0 | 0 | 0 | 0.680000 | 2.09 | 1.69 | 0.40 | No | 0.000000 | 0.091398 | 0.23 | 1 |
3 | 4 | MM | 227 | 1 | 1.69 | 1.69 | 0.00 | 0.0 | 0 | 0 | 0.400000 | 1.69 | 1.69 | 0.00 | No | 0.000000 | 0.000000 | 0.00 | 1 |
4 | 5 | CH | 228 | 7 | 1.69 | 1.69 | 0.00 | 0.0 | 0 | 0 | 0.956535 | 1.69 | 1.69 | 0.00 | Yes | 0.000000 | 0.000000 | 0.00 | 0 |
from pycaret.classification import *
clf1 = setup(data, target = 'Purchase', session_id=7267, log_experiment=True, experiment_name='pycaret2-juice')
Setup Succesfully Completed!
Description | Value | |
---|---|---|
0 | session_id | 7267 |
1 | Target Type | Binary |
2 | Label Encoded | CH: 0, MM: 1 |
3 | Original Data | (1070, 19) |
4 | Missing Values | False |
5 | Numeric Features | 13 |
6 | Categorical Features | 5 |
7 | Ordinal Features | False |
8 | High Cardinality Features | False |
9 | High Cardinality Method | None |
10 | Sampled Data | (1070, 19) |
11 | Transformed Train Set | (748, 16) |
12 | Transformed Test Set | (322, 16) |
13 | Numeric Imputer | mean |
14 | Categorical Imputer | constant |
15 | Normalize | False |
16 | Normalize Method | None |
17 | Transformation | False |
18 | Transformation Method | None |
19 | PCA | False |
20 | PCA Method | None |
21 | PCA Components | None |
22 | Ignore Low Variance | False |
23 | Combine Rare Levels | False |
24 | Rare Level Threshold | None |
25 | Numeric Binning | False |
26 | Remove Outliers | False |
27 | Outliers Threshold | None |
28 | Remove Multicollinearity | False |
29 | Multicollinearity Threshold | None |
30 | Clustering | False |
31 | Clustering Iteration | None |
32 | Polynomial Features | False |
33 | Polynomial Degree | None |
34 | Trignometry Features | False |
35 | Polynomial Threshold | None |
36 | Group Features | False |
37 | Feature Selection | False |
38 | Features Selection Threshold | None |
39 | Feature Interaction | False |
40 | Feature Ratio | False |
41 | Interaction Threshold | None |
42 | Fix Imbalance | False |
43 | Fix Imbalance Method | SMOTE |
best_model = compare_models()
Model | Accuracy | AUC | Recall | Prec. | F1 | Kappa | MCC | TT (Sec) | |
---|---|---|---|---|---|---|---|---|---|
0 | Linear Discriminant Analysis | 0.8383 | 0.9083 | 0.7978 | 0.7933 | 0.7934 | 0.6607 | 0.6632 | 0.0161 |
1 | Ridge Classifier | 0.8356 | 0.0000 | 0.7979 | 0.7873 | 0.7907 | 0.6555 | 0.6578 | 0.0295 |
2 | Ada Boost Classifier | 0.8276 | 0.8913 | 0.7637 | 0.7922 | 0.7760 | 0.6360 | 0.6380 | 0.2087 |
3 | CatBoost Classifier | 0.8223 | 0.9026 | 0.7502 | 0.7884 | 0.7672 | 0.6238 | 0.6261 | 5.0027 |
4 | Logistic Regression | 0.8222 | 0.9071 | 0.7430 | 0.7927 | 0.7643 | 0.6221 | 0.6255 | 0.1138 |
5 | Gradient Boosting Classifier | 0.8222 | 0.8967 | 0.7637 | 0.7777 | 0.7697 | 0.6251 | 0.6261 | 0.2643 |
6 | Light Gradient Boosting Machine | 0.8089 | 0.8865 | 0.7469 | 0.7609 | 0.7529 | 0.5973 | 0.5982 | 0.3430 |
7 | Random Forest Classifier | 0.8035 | 0.8672 | 0.7163 | 0.7672 | 0.7393 | 0.5824 | 0.5847 | 0.1235 |
8 | Extreme Gradient Boosting | 0.8035 | 0.8776 | 0.7464 | 0.7533 | 0.7480 | 0.5872 | 0.5892 | 0.2332 |
9 | Extra Trees Classifier | 0.7981 | 0.8468 | 0.7159 | 0.7558 | 0.7343 | 0.5718 | 0.5734 | 0.3468 |
10 | Naive Bayes | 0.7859 | 0.8488 | 0.8185 | 0.6920 | 0.7493 | 0.5649 | 0.5718 | 0.0068 |
11 | Decision Tree Classifier | 0.7566 | 0.7429 | 0.6780 | 0.6942 | 0.6847 | 0.4869 | 0.4881 | 0.0181 |
12 | K Neighbors Classifier | 0.7393 | 0.7890 | 0.6098 | 0.6910 | 0.6447 | 0.4409 | 0.4454 | 0.0138 |
13 | Quadratic Discriminant Analysis | 0.7286 | 0.7917 | 0.6844 | 0.6699 | 0.6460 | 0.4340 | 0.4504 | 0.0121 |
14 | SVM - Linear Kernel | 0.5643 | 0.0000 | 0.2000 | 0.0773 | 0.1115 | 0.0000 | 0.0000 | 0.0319 |
best_model
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None, solver='svd', store_covariance=False, tol=0.0001)
top5 = compare_models(n_select=5)
w
LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0, importance_type='split', learning_rate=0.1, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=100, n_jobs=-1, num_leaves=31, objective=None, random_state=7267, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0)
w = compare_models(whitelist=['dt','rf','xgboost','lightgbm'])
Model | Accuracy | AUC | Recall | Prec. | F1 | Kappa | MCC | TT (Sec) | |
---|---|---|---|---|---|---|---|---|---|
0 | Light Gradient Boosting Machine | 0.8089 | 0.8865 | 0.7469 | 0.7609 | 0.7529 | 0.5973 | 0.5982 | 0.3285 |
1 | Random Forest Classifier | 0.8035 | 0.8672 | 0.7163 | 0.7672 | 0.7393 | 0.5824 | 0.5847 | 0.1270 |
2 | Extreme Gradient Boosting | 0.8035 | 0.8776 | 0.7464 | 0.7533 | 0.7480 | 0.5872 | 0.5892 | 0.2405 |
3 | Decision Tree Classifier | 0.7566 | 0.7429 | 0.6780 | 0.6942 | 0.6847 | 0.4869 | 0.4881 | 0.0081 |
tuned_best_model = tune_model(best_model, optimize='Precision')
Accuracy | AUC | Recall | Prec. | F1 | Kappa | MCC | |
---|---|---|---|---|---|---|---|
0 | 0.8400 | 0.9445 | 0.7241 | 0.8400 | 0.7778 | 0.6538 | 0.6582 |
1 | 0.7867 | 0.8621 | 0.6552 | 0.7600 | 0.7037 | 0.5385 | 0.5421 |
2 | 0.8267 | 0.8808 | 0.7241 | 0.8077 | 0.7636 | 0.6274 | 0.6298 |
3 | 0.8533 | 0.9453 | 0.8621 | 0.7812 | 0.8197 | 0.6966 | 0.6990 |
4 | 0.8000 | 0.8808 | 0.6552 | 0.7917 | 0.7170 | 0.5645 | 0.5705 |
5 | 0.8133 | 0.9220 | 0.7241 | 0.7778 | 0.7500 | 0.6014 | 0.6023 |
6 | 0.8267 | 0.8830 | 0.7000 | 0.8400 | 0.7636 | 0.6286 | 0.6351 |
7 | 0.8133 | 0.9178 | 0.7000 | 0.8077 | 0.7500 | 0.6023 | 0.6062 |
8 | 0.8378 | 0.8720 | 0.7586 | 0.8148 | 0.7857 | 0.6555 | 0.6566 |
9 | 0.8108 | 0.9069 | 0.6897 | 0.8000 | 0.7407 | 0.5931 | 0.5971 |
Mean | 0.8209 | 0.9015 | 0.7193 | 0.8021 | 0.7572 | 0.6162 | 0.6197 |
SD | 0.0190 | 0.0285 | 0.0564 | 0.0245 | 0.0317 | 0.0440 | 0.0435 |
tuned_best_model
from gplearn.genetic import SymbolicClassifier
sc = SymbolicClassifier()
sc = create_model(sc, fold=5)
IntProgress(value=0, description='Processing: ', max=9)
Initiated | . . . . . . . . . . . . . . . . . . | 12:55:14 |
---|---|---|
Status | . . . . . . . . . . . . . . . . . . | Creating Logs |
ETC | . . . . . . . . . . . . . . . . . . | Almost Finished |
Accuracy | AUC | Recall | Prec. | F1 | Kappa | MCC | |
---|---|---|---|---|---|---|---|
0 | 0.8267 | 0.9213 | 0.7241 | 0.8077 | 0.7636 | 0.6274 | 0.6298 |
--------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) <ipython-input-10-ffd762ec6369> in <module> ----> 1 sc = create_model(sc, fold=5) ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\pycaret\classification.py in create_model(estimator, ensemble, method, fold, round, cross_validation, verbose, system, **kwargs) 2422 if hasattr(model, 'predict_proba'): 2423 logger.info("Fitting Model") -> 2424 model.fit(Xtrain,ytrain) 2425 logger.info("Evaluating Metrics") 2426 pred_prob = model.predict_proba(Xtest) ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\gplearn\genetic.py in fit(self, X, y, sample_weight) 469 seeds[starts[i]:starts[i + 1]], 470 params) --> 471 for i in range(n_jobs)) 472 473 # Reduce, maintaining order across different n_jobs ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\joblib\parallel.py in __call__(self, iterable) 1027 # remaining jobs. 1028 self._iterating = False -> 1029 if self.dispatch_one_batch(iterator): 1030 self._iterating = self._original_iterator is not None 1031 ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\joblib\parallel.py in dispatch_one_batch(self, iterator) 845 return False 846 else: --> 847 self._dispatch(tasks) 848 return True 849 ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\joblib\parallel.py in _dispatch(self, batch) 763 with self._lock: 764 job_idx = len(self._jobs) --> 765 job = self._backend.apply_async(batch, callback=cb) 766 # A job can complete so quickly than its callback is 767 # called before we get here, causing self._jobs to ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\joblib\_parallel_backends.py in apply_async(self, func, callback) 204 def apply_async(self, func, callback=None): 205 """Schedule a func to be run""" --> 206 result = ImmediateResult(func) 207 if callback: 208 callback(result) ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\joblib\_parallel_backends.py in __init__(self, batch) 568 # Don't delay the application, to avoid keeping the input 569 # arguments in memory --> 570 self.results = batch() 571 572 def get(self): ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\joblib\parallel.py in __call__(self) 251 with parallel_backend(self._backend, n_jobs=self._n_jobs): 252 return [func(*args, **kwargs) --> 253 for func, args, kwargs in self.items] 254 255 def __reduce__(self): ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\joblib\parallel.py in <listcomp>(.0) 251 with parallel_backend(self._backend, n_jobs=self._n_jobs): 252 return [func(*args, **kwargs) --> 253 for func, args, kwargs in self.items] 254 255 def __reduce__(self): ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\gplearn\genetic.py in _parallel_evolve(n_programs, parents, X, y, sample_weight, seeds, params) 144 oob_sample_weight[indices] = 0 145 --> 146 program.raw_fitness_ = program.raw_fitness(X, y, curr_sample_weight) 147 if max_samples < n_samples: 148 # Calculate OOB fitness ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\gplearn\_program.py in raw_fitness(self, X, y, sample_weight) 460 461 """ --> 462 y_pred = self.execute(X) 463 if self.transformer: 464 y_pred = self.transformer(y_pred) ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\gplearn\_program.py in execute(self, X) 378 else X[:, t] if isinstance(t, int) 379 else t for t in apply_stack[-1][1:]] --> 380 intermediate_result = function(*terminals) 381 if len(apply_stack) != 1: 382 apply_stack.pop() ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\gplearn\functions.py in __call__(self, *args) 44 45 def __call__(self, *args): ---> 46 return self.function(*args) 47 48 ~\Anaconda3\envs\pycaret-nightly-env\lib\site-packages\gplearn\functions.py in _protected_division(x1, x2) 125 """Closure of division (x1/x2) for zero denominator.""" 126 with np.errstate(divide='ignore', invalid='ignore'): --> 127 return np.where(np.abs(x2) > 0.001, np.divide(x1, x2), 1.) 128 129 KeyboardInterrupt:
predict_model(sc);
plot_model(sc, plot = 'confusion_matrix')
plot_model(sc, plot = 'auc')
plot_model(sc, plot = 'threshold')
lightgbm = create_model('lightgbm')
Accuracy | AUC | Recall | Prec. | F1 | Kappa | MCC | |
---|---|---|---|---|---|---|---|
0 | 0.7867 | 0.9078 | 0.6552 | 0.7600 | 0.7037 | 0.5385 | 0.5421 |
1 | 0.7867 | 0.8741 | 0.7586 | 0.7097 | 0.7333 | 0.5559 | 0.5567 |
2 | 0.7600 | 0.8501 | 0.7241 | 0.6774 | 0.7000 | 0.5004 | 0.5011 |
3 | 0.8800 | 0.9175 | 0.8621 | 0.8333 | 0.8475 | 0.7486 | 0.7489 |
4 | 0.8267 | 0.8703 | 0.7931 | 0.7667 | 0.7797 | 0.6369 | 0.6371 |
5 | 0.8400 | 0.9025 | 0.7931 | 0.7931 | 0.7931 | 0.6627 | 0.6627 |
6 | 0.7467 | 0.8541 | 0.6667 | 0.6897 | 0.6780 | 0.4693 | 0.4695 |
7 | 0.8133 | 0.9244 | 0.7333 | 0.7857 | 0.7586 | 0.6067 | 0.6077 |
8 | 0.8243 | 0.8506 | 0.7241 | 0.8077 | 0.7636 | 0.6245 | 0.6269 |
9 | 0.8243 | 0.9138 | 0.7586 | 0.7857 | 0.7719 | 0.6291 | 0.6294 |
Mean | 0.8089 | 0.8865 | 0.7469 | 0.7609 | 0.7529 | 0.5973 | 0.5982 |
SD | 0.0375 | 0.0282 | 0.0582 | 0.0494 | 0.0480 | 0.0784 | 0.0782 |
lightgbm
LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0, importance_type='split', learning_rate=0.1, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=100, n_jobs=-1, num_leaves=31, objective=None, random_state=7267, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0)
import numpy as np
lgbms = []
for i in np.arange(0.1,1,0.1):
m = create_model('lightgbm', learning_rate=i, verbose=False)
lgbms.append(m)
print(len(lgbms))
9
lgbms
[LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0, importance_type='split', learning_rate=0.1, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=100, n_jobs=-1, num_leaves=31, objective=None, random_state=7267, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0), LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0, importance_type='split', learning_rate=0.2, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=100, n_jobs=-1, num_leaves=31, objective=None, random_state=7267, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0), LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0, importance_type='split', learning_rate=0.30000000000000004, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=100, n_jobs=-1, num_leaves=31, objective=None, random_state=7267, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0), LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0, importance_type='split', learning_rate=0.4, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=100, n_jobs=-1, num_leaves=31, objective=None, random_state=7267, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0), LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0, importance_type='split', learning_rate=0.5, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=100, n_jobs=-1, num_leaves=31, objective=None, random_state=7267, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0), LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0, importance_type='split', learning_rate=0.6, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=100, n_jobs=-1, num_leaves=31, objective=None, random_state=7267, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0), LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0, importance_type='split', learning_rate=0.7000000000000001, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=100, n_jobs=-1, num_leaves=31, objective=None, random_state=7267, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0), LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0, importance_type='split', learning_rate=0.8, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=100, n_jobs=-1, num_leaves=31, objective=None, random_state=7267, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0), LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0, importance_type='split', learning_rate=0.9, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=100, n_jobs=-1, num_leaves=31, objective=None, random_state=7267, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0)]
best_model_cv = automl()
print(best_model_cv)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None, solver='svd', store_covariance=False, tol=0.0001)
best_model_holdout = automl(use_holdout=True)
print(best_model_holdout)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None, solver='svd', store_covariance=False, tol=0.0001)
!mlflow ui
^C
from pycaret.classification import *
clf1 = setup(data, target = 'Purchase', session_id=7267, log_experiment=True, experiment_name='pycaret2-juice',
log_plots = True)
Setup Succesfully Completed!
Description | Value | |
---|---|---|
0 | session_id | 7267 |
1 | Target Type | Binary |
2 | Label Encoded | CH: 0, MM: 1 |
3 | Original Data | (1070, 19) |
4 | Missing Values | False |
5 | Numeric Features | 13 |
6 | Categorical Features | 5 |
7 | Ordinal Features | False |
8 | High Cardinality Features | False |
9 | High Cardinality Method | None |
10 | Sampled Data | (1070, 19) |
11 | Transformed Train Set | (748, 16) |
12 | Transformed Test Set | (322, 16) |
13 | Numeric Imputer | mean |
14 | Categorical Imputer | constant |
15 | Normalize | False |
16 | Normalize Method | None |
17 | Transformation | False |
18 | Transformation Method | None |
19 | PCA | False |
20 | PCA Method | None |
21 | PCA Components | None |
22 | Ignore Low Variance | False |
23 | Combine Rare Levels | False |
24 | Rare Level Threshold | None |
25 | Numeric Binning | False |
26 | Remove Outliers | False |
27 | Outliers Threshold | None |
28 | Remove Multicollinearity | False |
29 | Multicollinearity Threshold | None |
30 | Clustering | False |
31 | Clustering Iteration | None |
32 | Polynomial Features | False |
33 | Polynomial Degree | None |
34 | Trignometry Features | False |
35 | Polynomial Threshold | None |
36 | Group Features | False |
37 | Feature Selection | False |
38 | Features Selection Threshold | None |
39 | Feature Interaction | False |
40 | Feature Ratio | False |
41 | Interaction Threshold | None |
42 | Fix Imbalance | False |
43 | Fix Imbalance Method | SMOTE |
lr = create_model('lr')
Accuracy | AUC | Recall | Prec. | F1 | Kappa | MCC | |
---|---|---|---|---|---|---|---|
0 | 0.8267 | 0.9468 | 0.6552 | 0.8636 | 0.7451 | 0.6175 | 0.6310 |
1 | 0.7600 | 0.8778 | 0.7241 | 0.6774 | 0.7000 | 0.5004 | 0.5011 |
2 | 0.8267 | 0.8771 | 0.6897 | 0.8333 | 0.7547 | 0.6225 | 0.6292 |
3 | 0.8800 | 0.9400 | 0.8966 | 0.8125 | 0.8525 | 0.7517 | 0.7543 |
4 | 0.8133 | 0.8808 | 0.6552 | 0.8261 | 0.7308 | 0.5908 | 0.6001 |
5 | 0.8000 | 0.9138 | 0.7241 | 0.7500 | 0.7368 | 0.5756 | 0.5759 |
6 | 0.7867 | 0.8889 | 0.7000 | 0.7500 | 0.7241 | 0.5506 | 0.5514 |
7 | 0.8533 | 0.9296 | 0.8333 | 0.8065 | 0.8197 | 0.6961 | 0.6964 |
8 | 0.8243 | 0.8904 | 0.7586 | 0.7857 | 0.7719 | 0.6291 | 0.6294 |
9 | 0.8514 | 0.9261 | 0.7931 | 0.8214 | 0.8070 | 0.6862 | 0.6865 |
Mean | 0.8222 | 0.9071 | 0.7430 | 0.7927 | 0.7643 | 0.6221 | 0.6255 |
SD | 0.0330 | 0.0258 | 0.0744 | 0.0512 | 0.0456 | 0.0704 | 0.0703 |
<Figure size 576x396 with 0 Axes>
#useful for remote runs like Kaggle Kernel or GitHub actions
xl_logs = get_logs(save=True)
#see example on Databricks