from collections.abc import Mapping import torch import torch.nn.functional as F from torch import nn import torchvision.transforms.functional as TF from datasets import load_dataset,load_dataset_builder from miniai.datasets import * from miniai.learner import * x,y = 'image','label' name = "fashion_mnist" dsd = load_dataset(name) @inplace def transformi(b): b[x] = [torch.flatten(TF.to_tensor(o)) for o in b[x]] bs = 1024 tds = dsd.with_transform(transformi) dls = DataLoaders.from_dd(tds, bs) dt = dls.train xb,yb = next(iter(dt)) xb.shape,yb[:10] m,nh = 28*28,50 model = nn.Sequential(nn.Linear(m,nh), nn.ReLU(), nn.Linear(nh,10)) cbs = [TrainCB(), CudaCB(), MetricsCB(Accuracy()), ProgressCB(plot=True)] learn = Learner(model, dls, F.cross_entropy, lr=0.2, cbs=cbs) learn.fit(1) import inspect import types class CallbackWrapper(Callback): def __init__(self, wrapped): self.wrapped = wrapped self.wrapped.callback = self.callback for name, fn in inspect.getmembers(wrapped.__class__, predicate=inspect.isfunction): if name.startswith("_"): continue # TODO: maybe ignore already wrapped? # TODO: provide name mapping so `after_get_loss` -> `after_loss` if we want setattr(wrapped, name, types.MethodType(with_cbs(name)(fn), wrapped)) def __getattr__(self, name): if name[0]=='_': raise AttributeError(name) return getattr(self.wrapped, name) def __setattr__(self, name, value): # Magic name if name == "wrapped": super().__setattr__(name, value) else: setattr(self.wrapped, name, value) def callback(self, method_nm): getattr(self, method_nm, identity)() class Tracer(CallbackWrapper): def after_predict(self): print("after_predict") def after_get_loss(self): print("after_get_loss") cbs = [Tracer(TrainCB()), CudaCB(), MetricsCB(Accuracy()), ProgressCB()] learn = Learner(model, dls, F.cross_entropy, lr=0.001, cbs=cbs) learn.fit(1) from itertools import chain def flatten(lists): return chain.from_iterable(lists) # TODO: copy from fastai implementation to deal with lists, dicts, etc. def to_float(x): return x.float() if torch.is_floating_point(x) else x class MixedPrecision(CallbackWrapper): def before_fit(self): self.autocast = torch.autocast("cuda") def before_batch(self): self.autocast.__enter__() def after_predict(self): self.learn.preds = to_float(self.learn.preds) def after_get_loss(self): self.autocast.__exit__(None, None, None) model = nn.Sequential(nn.Linear(m,nh), nn.ReLU(), nn.Linear(nh,10)) cbs = [MixedPrecision(TrainCB()), CudaCB(), MetricsCB(Accuracy()), ProgressCB(plot=True)] learn = Learner(model, dls, F.cross_entropy, lr=0.2, cbs=cbs) learn.fit(1) from accelerate import Accelerator model = nn.Sequential(nn.Linear(m,nh), nn.ReLU(), nn.Linear(nh,10)) class Distributed(CallbackWrapper): def __init__(self, wrapped): super().__init__(wrapped) self.accelerator = Accelerator() def before_fit(self): self.learn.model, self.learn.opt, self.learn.dls = self.accelerator.prepare( self.learn.model, self.learn.opt, self.learn.dls ) # Replaces TrainCB.backward def backward(self): self.accelerator.backward(self.learn.loss) cbs = [Distributed(TrainCB()), CudaCB(), MetricsCB(Accuracy()), ProgressCB(plot=True)] learn = Learner(model, dls, F.cross_entropy, lr=0.2, cbs=cbs) learn.fit(1) model = nn.Sequential(nn.Linear(m,nh), nn.ReLU(), nn.Linear(nh,10)) cbs = [MixedPrecision(Distributed(TrainCB())), CudaCB(), MetricsCB(Accuracy()), ProgressCB(plot=True)] learn = Learner(model, dls, F.cross_entropy, lr=0.2, cbs=cbs) learn.fit(1) cbs = [Distributed(MixedPrecision(TrainCB())), CudaCB(), MetricsCB(Accuracy()), ProgressCB(plot=True)] learn = Learner(model, dls, F.cross_entropy, lr=0.2, cbs=cbs) learn.fit(1)