from fastai.gen_doc.nbdoc import * from fastai.basics import * show_doc(accuracy) jekyll_warn("This metric is intended for classification of objects belonging to a single class.") preds = tensor([0.4, 0.6], [0.3, 0.7], [0.2, 0.8], [0.6, 0.4], [0.9, 0.1]) # bs = 5, n = 2 ys = tensor([1], [0], [1], [0], [1]) accuracy(preds, ys) show_doc(accuracy_thresh) jekyll_note("This function is intended for one-hot-encoded targets (often in a multiclassification problem).") preds = tensor([0.4, 0.6], [0.3, 0.7], [0.2, 0.8], [0.6, 0.4], [0.9, 0.1]) ys = tensor([0, 1], [1, 0], [0, 1], [1, 0], [0, 1]) accuracy_thresh(preds, ys, thresh=0.65, sigmoid=False) show_doc(top_k_accuracy) show_doc(dice) preds = tensor([0.4, 0.6], [0.3, 0.7], [0.2, 0.8], [0.6, 0.4], [0.9, 0.1]) ys = tensor([1], [0], [1], [0], [1]) dice(preds, ys) # TP = 2, FP = 1, FN = 1 show_doc(error_rate) show_doc(mean_squared_error) show_doc(mean_absolute_error) show_doc(mean_squared_logarithmic_error) show_doc(exp_rmspe) show_doc(root_mean_squared_error) show_doc(fbeta) preds = tensor([0.6, 0.8, 0.2, 0.4, 0.9]).view(1, 5) # TP =2, FP = 1, FN = 1 ys = tensor([1, 0, 0, 1, 1]).view(1, 5) fbeta(preds, ys, thresh=0.5, sigmoid=False) jekyll_note("This function is intended for one-hot-encoded targets (often in a multiclassification problem).") show_doc(explained_variance) preds = tensor([0.10, .20, .30, .40, .50]) ys = tensor([0.12, .17, .25, .44, .56]) # predictions are close to the truth explained_variance(preds, ys) show_doc(r2_score) r2_score(preds, ys) show_doc(RMSE, title_level=3) show_doc(ExpRMSPE, title_level=3) show_doc(Precision, title_level=3) show_doc(Recall, title_level=3) show_doc(FBeta, title_level=3) show_doc(R2Score, title_level=3) show_doc(ExplainedVariance, title_level=3) show_doc(MatthewsCorreff, title_level=3) show_doc(KappaScore, title_level=3) show_doc(ConfusionMatrix, title_level=3) show_doc(MultiLabelFbeta, title_level=3) show_doc(auc_roc_score, title_level=3) jekyll_note("Instead of passing this method to the learner's metrics directly, make use of the AUROC() class." show_doc(roc_curve, title_level=3) jekyll_note("Instead of passing this method to the learner's metrics directly, make use of the AUROC() class." show_doc(AUROC, title_level=3) class AverageMetric(Callback): "Wrap a `func` in a callback for metrics computation." def __init__(self, func): # If it's a partial, use func.func name = getattr(func,'func',func).__name__ self.func, self.name = func, name def on_epoch_begin(self, **kwargs): "Set the inner value to 0." self.val, self.count = 0.,0 def on_batch_end(self, last_output, last_target, **kwargs): "Update metric computation with `last_output` and `last_target`." if not is_listy(last_target): last_target=[last_target] self.count += last_target[0].size(0) val = self.func(last_output, *last_target) self.val += last_target[0].size(0) * val.detach().cpu() def on_epoch_end(self, last_metrics, **kwargs): "Set the final result in `last_metrics`." return add_metrics(last_metrics, self.val/self.count) class Precision(Callback): def on_epoch_begin(self, **kwargs): self.correct, self.total = 0, 0 def on_batch_end(self, last_output, last_target, **kwargs): preds = last_output.argmax(1) self.correct += ((preds==0) * (last_target==0)).float().sum() self.total += (preds==0).float().sum() def on_epoch_end(self, last_metrics, **kwargs): return add_metrics(last_metrics, self.correct/self.total) import tracemalloc class TraceMallocMetric(Callback): def __init__(self): super().__init__() self.name = "peak RAM" def on_epoch_begin(self, **kwargs): tracemalloc.start() def on_epoch_end(self, last_metrics, **kwargs): current, peak = tracemalloc.get_traced_memory() tracemalloc.stop() return add_metrics(last_metrics, torch.tensor(peak)) import tracemalloc class TraceMallocMultiColMetric(LearnerCallback): _order=-20 # Needs to run before the recorder def __init__(self, learn): super().__init__(learn) self.train_max = 0 def on_train_begin(self, **kwargs): self.learn.recorder.add_metric_names(['used', 'max_used', 'peak']) def on_batch_end(self, train, **kwargs): # track max memory usage during the train phase if train: current, peak = tracemalloc.get_traced_memory() self.train_max = max(self.train_max, current) def on_epoch_begin(self, **kwargs): tracemalloc.start() def on_epoch_end(self, last_metrics, **kwargs): current, peak = tracemalloc.get_traced_memory() tracemalloc.stop() return add_metrics(last_metrics, [current, self.train_max, peak]) show_doc(FBeta.on_batch_end) show_doc(FBeta.on_epoch_begin) show_doc(FBeta.on_epoch_end) show_doc(mae) show_doc(msle) show_doc(mse) show_doc(rmse) show_doc(Precision.on_epoch_end) show_doc(FBeta.on_train_end) show_doc(KappaScore.on_epoch_end) show_doc(MatthewsCorreff.on_epoch_end) show_doc(FBeta.on_train_begin) show_doc(RMSE.on_epoch_end) show_doc(ConfusionMatrix.on_train_begin) show_doc(ConfusionMatrix.on_batch_end) show_doc(ConfusionMatrix.on_epoch_end) show_doc(Recall.on_epoch_end) show_doc(ExplainedVariance.on_epoch_end) show_doc(ExpRMSPE.on_epoch_end) show_doc(ConfusionMatrix.on_epoch_begin) show_doc(R2Score.on_epoch_end)