In this notebook we're going to investigate a range of different techniques for the Kaggle fisheries competition. In this competition, The Nature Conservancy asks you to help them detect which species of fish appears on a fishing boat, based on images captured from boat cameras of various angles. Your goal is to predict the likelihood of fish species in each picture. Eight target categories are available in this dataset: Albacore tuna, Bigeye tuna, Yellowfin tuna, Mahi Mahi, Opah, Sharks, Other
You can use this api to download the data from Kaggle.
# Put these at the top of every notebook, to get automatic reloading and inline plotting
%reload_ext autoreload
%autoreload 2
%matplotlib inline
# This file contains all the main external libs we'll use
from fastai.imports import *
from fastai.plots import *
from fastai.io import get_data
PATH = "data/fish/"
!ls {PATH}
!ls {PATH}train
files = !ls {PATH}train/ALB | head
files
img = plt.imread(f'{PATH}train/ALB/{files[0]}')
plt.imshow(img);
Here we are changing the structure of the training data to make it more convinient. We will have all images in a common directory images
and will have a file train.csv
with all labels.
from os import listdir
from os.path import join
train_path = f'{PATH}/train'
dirs = [d for d in listdir(train_path) if os.path.isdir(join(train_path,d))]
print(dirs)
train_dict = {d: listdir(join(train_path, d)) for d in dirs}
train_dict["LAG"][:10]
sum(len(v) for v in train_dict.values())
with open(f"{PATH}train.csv", "w") as csv:
csv.write("img,label\n")
for d in dirs:
for f in train_dict[d]: csv.write(f'{f},{d}\n')
img_path = f'{PATH}images'
os.makedirs(img_path, exist_ok=True)
!cp {PATH}train/*/*.jpg {PATH}images/
Here we import the libraries we need. We'll learn about what each does during the course.
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
sz=350
bs=64
csv_fname = os.path.join(PATH, "train.csv")
train_labels = list(open(csv_fname))
n = len(list(open(csv_fname)))-1
val_idxs = get_cv_idxs(n)
tfms = tfms_from_model(resnet34, sz)
data = ImageClassifierData.from_csv(PATH, "images", csv_fname, bs, tfms, val_idxs)
learn = ConvLearner.pretrained(resnet34, data, precompute=True, opt_fn=optim.Adam, ps=0.5)
models are fine supper init 0%| | 0/48 [00:00<?, ?it/s]torch.Size([64, 3, 350, 350])
--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-5-2599e6e01994> in <module>() ----> 1 learn = ConvLearner.pretrained(resnet34, data, precompute=True, opt_fn=optim.Adam, ps=0.5) ~/github/fastai/courses/dl1/fastai/conv_learner.py in pretrained(self, f, data, ps, xtra_fc, xtra_cut, **kwargs) 89 models = ConvnetBuilder(f, data.c, data.is_multi, data.is_reg, ps=ps, xtra_fc=xtra_fc, xtra_cut=xtra_cut) 90 print("models are fine") ---> 91 return self(data, models, **kwargs) 92 93 @property ~/github/fastai/courses/dl1/fastai/conv_learner.py in __init__(self, data, models, precompute, **kwargs) 81 elif self.metrics is None: 82 self.metrics = [accuracy_multi] if self.data.is_multi else [accuracy] ---> 83 self.save_fc1() 84 self.freeze() 85 self.precompute=precompute ~/github/fastai/courses/dl1/fastai/conv_learner.py in save_fc1(self) 123 if len(self.activations[0])==0: 124 m=self.models.top_model --> 125 predict_to_bcolz(m, self.data.fix_dl, act) 126 predict_to_bcolz(m, self.data.val_dl, val_act) 127 if self.data.test_dl: predict_to_bcolz(m, self.data.test_dl, test_act) ~/github/fastai/courses/dl1/fastai/model.py in predict_to_bcolz(m, gen, arr, workers) 11 for x,*_ in tqdm(gen): 12 print(x.shape) ---> 13 y = to_np(m(VV(x)).data) 14 with lock: 15 arr.append(y) ~/github/fastai/courses/dl1/fastai/core.py in VV(x) 16 def V(x): return [V_(o) for o in x] if isinstance(x,list) else V_(x) 17 def VV_(x): return x.cuda(async=True) if isinstance(x, Variable) else Variable(x.cuda(async=True), volatile=True) ---> 18 def VV(x): return [VV_(o) for o in x] if isinstance(x,list) else VV_(x) 19 20 def to_np(v): ~/github/fastai/courses/dl1/fastai/core.py in VV_(x) 15 def V_(x): return x.cuda(async=True) if isinstance(x, Variable) else Variable(x.cuda(async=True)) 16 def V(x): return [V_(o) for o in x] if isinstance(x,list) else V_(x) ---> 17 def VV_(x): return x.cuda(async=True) if isinstance(x, Variable) else Variable(x.cuda(async=True), volatile=True) 18 def VV(x): return [VV_(o) for o in x] if isinstance(x,list) else VV_(x) 19 ~/anaconda/lib/python3.6/site-packages/torch/_utils.py in _cuda(self, device, async) 64 else: 65 new_type = getattr(torch.cuda, self.__class__.__name__) ---> 66 return new_type(self.size()).copy_(self, async) 67 68 RuntimeError: cuda runtime error (2) : out of memory at /opt/conda/conda-bld/pytorch_1502009910772/work/torch/lib/THC/generic/THCStorage.cu:66
lrf=learn.lr_find()
learn.sched.plot()
65%|██████▍ | 31/48 [00:00<00:03, 4.59it/s, loss=2.07]
learn.fit(0.01, 4, cycle_len=1, cycle_mult=2)
[ 0. 0.6912 0.617 0.7949] [ 1. 0.6748 0.3572 0.8928] [ 2. 0.5156 0.3283 0.8974] [ 3. 0.4837 0.3402 0.8873] [ 4. 0.4303 0.2782 0.9179] [ 5. 0.3515 0.2209 0.9267] [ 6. 0.2841 0.2082 0.9349] [ 7. 0.318 0.2482 0.9287] [ 8. 0.3518 0.2316 0.9323] [ 9. 0.3343 0.2133 0.9323] [ 10. 0.2857 0.2128 0.9293] [ 11. 0.2414 0.2148 0.9349] [ 12. 0.2077 0.189 0.9287] [ 13. 0.1834 0.1826 0.9352] [ 14. 0.162 0.179 0.9391]
lrs=np.array([1e-4,1e-3,1e-2])
learn.precompute=False
learn.freeze_to(6)
lrf=learn.lr_find(lrs/1e3)
learn.sched.plot()
79%|███████▉ | 38/48 [00:24<00:05, 1.89it/s, loss=1.48]
NOTE: Before running this remove the temp file under data/fish.
sz = 350
tfms = tfms_from_model(resnet34, sz, crop_type=CropType.NO)
data = ImageClassifierData.from_csv(PATH, "images", csv_fname, bs, tfms, val_idxs)
learn = ConvLearner.pretrained(resnet34, data, precompute=True, opt_fn=optim.Adam, ps=0.5)
--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-8-2599e6e01994> in <module>() ----> 1 learn = ConvLearner.pretrained(resnet34, data, precompute=True, opt_fn=optim.Adam, ps=0.5) ~/github/fastai/courses/dl1/fastai/conv_learner.py in pretrained(self, f, data, ps, xtra_fc, xtra_cut, **kwargs) 87 @classmethod 88 def pretrained(self, f, data, ps=None, xtra_fc=None, xtra_cut=0, **kwargs): ---> 89 models = ConvnetBuilder(f, data.c, data.is_multi, data.is_reg, ps=ps, xtra_fc=xtra_fc, xtra_cut=xtra_cut) 90 print("models are fine") 91 return self(data, models, **kwargs) ~/github/fastai/courses/dl1/fastai/conv_learner.py in __init__(self, f, c, is_multi, is_reg, ps, xtra_fc, xtra_cut) 40 fc_layers = self.get_fc_layers() 41 self.n_fc = len(fc_layers) ---> 42 self.fc_model = nn.Sequential(*fc_layers).cuda() 43 apply_init(self.fc_model, kaiming_normal) 44 self.model=nn.Sequential(*(layers+fc_layers)).cuda() ~/anaconda/lib/python3.6/site-packages/torch/nn/modules/module.py in cuda(self, device_id) 145 copied to that device 146 """ --> 147 return self._apply(lambda t: t.cuda(device_id)) 148 149 def cpu(self): ~/anaconda/lib/python3.6/site-packages/torch/nn/modules/module.py in _apply(self, fn) 116 def _apply(self, fn): 117 for module in self.children(): --> 118 module._apply(fn) 119 120 for param in self._parameters.values(): ~/anaconda/lib/python3.6/site-packages/torch/nn/modules/module.py in _apply(self, fn) 122 # Variables stored in modules are graph leaves, and we don't 123 # want to create copy nodes, so we have to unpack the data. --> 124 param.data = fn(param.data) 125 if param._grad is not None: 126 param._grad.data = fn(param._grad.data) ~/anaconda/lib/python3.6/site-packages/torch/nn/modules/module.py in <lambda>(t) 145 copied to that device 146 """ --> 147 return self._apply(lambda t: t.cuda(device_id)) 148 149 def cpu(self): ~/anaconda/lib/python3.6/site-packages/torch/_utils.py in _cuda(self, device, async) 64 else: 65 new_type = getattr(torch.cuda, self.__class__.__name__) ---> 66 return new_type(self.size()).copy_(self, async) 67 68 RuntimeError: cuda runtime error (2) : out of memory at /opt/conda/conda-bld/pytorch_1502009910772/work/torch/lib/THC/generic/THCStorage.cu:66
lrf=learn.lr_find()
learn.sched.plot()
56%|█████▋ | 27/48 [00:00<00:02, 10.40it/s, loss=2.16]
learn.fit(0.01, 4, cycle_len=1, cycle_mult=2)
[ 0. 0.682 0.5775 0.8251] [ 1. 0.6791 0.3741 0.8824] [ 2. 0.5242 0.3172 0.9056] [ 3. 0.4669 0.2973 0.8942] [ 4. 0.4197 0.2335 0.9238] [ 5. 0.3512 0.2161 0.9277] [ 6. 0.2884 0.181 0.9306] [ 7. 0.3255 0.2539 0.9176] [ 8. 0.3452 0.2123 0.9284] [ 9. 0.317 0.2007 0.9371] [ 10. 0.2644 0.1644 0.9557] [ 11. 0.2244 0.1454 0.9528] [ 12. 0.1818 0.1343 0.9665] [ 13. 0.1599 0.1404 0.9599] [ 14. 0.1546 0.1408 0.9586]
lrs=np.array([1e-4,1e-3,1e-2])
learn.precompute=False
learn.unfreeze()
lrf=learn.lr_find(lrs/1e3)
learn.sched.plot()
79%|███████▉ | 38/48 [00:36<00:07, 1.25it/s, loss=1.65]
lrs=np.array([1e-5,1e-4,1e-3])
learn.fit(lrs, 5, cycle_len=1, cycle_mult=2)
2%|▏ | 1/48 [00:05<04:30, 5.75s/it, loss=0.74] 4%|▍ | 2/48 [00:06<03:16, 4.26s/it, loss=0.936] 6%|▋ | 3/48 [00:07<02:25, 3.23s/it, loss=0.874] 8%|▊ | 4/48 [00:08<01:49, 2.50s/it, loss=0.754] [ 0. 0.2649 0.1643 0.9404] [ 1. 0.1998 0.1073 0.9681] [ 2. 0.1288 0.1007 0.9694] [ 3. 0.0923 0.1089 0.9736] [ 4. 0.0843 0.1173 0.971 ] [ 5. 0.0549 0.1002 0.9694] [ 6. 0.0396 0.0964 0.9733] [ 7. 0.0315 0.1249 0.9762] [ 8. 0.0382 0.1132 0.9746] [ 9. 0.0406 0.1013 0.973 ] [ 10. 0.0285 0.1085 0.9749] [ 11. 0.0202 0.1131 0.9762] [ 12. 0.0154 0.1048 0.9772] [ 13. 0.0308 0.0956 0.9785] [ 14. 0.0166 0.1028 0.9788] [ 1.5000e+01 1.1636e-02 1.0597e-01 9.7753e-01] [ 1.6000e+01 1.1961e-02 1.0537e-01 9.7623e-01] [ 1.7000e+01 1.2807e-02 1.0589e-01 9.7753e-01] [ 1.8000e+01 1.4130e-02 1.3264e-01 9.7493e-01] [ 1.9000e+01 1.6299e-02 1.1089e-01 9.8014e-01] [ 2.0000e+01 1.6554e-02 1.3690e-01 9.7493e-01] [ 2.1000e+01 1.4021e-02 1.5582e-01 9.6809e-01] [ 2.2000e+01 9.8160e-03 1.3994e-01 9.7363e-01] [ 2.3000e+01 7.9080e-03 1.3732e-01 9.7590e-01] [ 2.4000e+01 1.7213e-02 1.5735e-01 9.7330e-01] [ 2.5000e+01 2.0878e-02 1.5379e-01 9.7363e-01] [ 2.6000e+01 2.0992e-02 1.5257e-01 9.7363e-01] [ 2.7000e+01 1.8790e-02 1.5161e-01 9.7330e-01] [ 2.8000e+01 1.1324e-02 1.4440e-01 9.7199e-01] [ 2.9000e+01 7.9810e-03 1.4998e-01 9.7363e-01] [ 3.0000e+01 6.6660e-03 1.4507e-01 9.7493e-01]
This part needs to run just the first time to get the file trn_bb_labels
import json
anno_classes = ['alb', 'bet', 'dol', 'lag', 'other', 'shark', 'yft']
def get_annotations():
annot_urls = [
'5458/bet_labels.json', '5459/shark_labels.json', '5460/dol_labels.json',
'5461/yft_labels.json', '5462/alb_labels.json', '5463/lag_labels.json'
]
cache_subdir = os.path.abspath(os.path.join(PATH, 'annos'))
url_prefix = 'https://kaggle2.blob.core.windows.net/forum-message-attachments/147157/'
os.makedirs(cache_subdir, exist_ok=True)
for url_suffix in annot_urls:
fname = url_suffix.rsplit('/', 1)[-1]
get_data(url_prefix + url_suffix, f'{cache_subdir}/{fname}')
# run this code to get annotation files
get_annotations()
# creates a dictionary of all annotations per file
bb_json = {}
for c in anno_classes:
if c == 'other': continue # no annotation file for "other" class
j = json.load(open(f'{PATH}annos/{c}_labels.json', 'r'))
for l in j:
if 'annotations' in l.keys() and len(l['annotations'])>0:
bb_json[l['filename'].split('/')[-1]] = sorted(
l['annotations'], key=lambda x: x['height']*x['width'])[-1]
bb_json['img_04908.jpg']
raw_filenames = pd.read_csv(csv_fname)["img"].values
file2idx = {o:i for i,o in enumerate(raw_filenames)}
empty_bbox = {'height': 0., 'width': 0., 'x': 0., 'y': 0.}
for f in raw_filenames:
if not f in bb_json.keys(): bb_json[f] = empty_bbox
bb_params = ['height', 'width', 'x', 'y']
def convert_bb(bb):
bb = [bb[p] for p in bb_params]
bb[2] = max(bb[2], 0)
bb[3] = max(bb[3], 0)
return bb
trn_bbox = np.stack([convert_bb(bb_json[f]) for f in raw_filenames]).astype(np.float32)
trn_bb_labels = [f + ',' + ' '.join(map(str,o))+'\n' for f,o in zip(raw_filenames,trn_bbox)]
open(f'{PATH}trn_bb_labels', 'w').writelines(trn_bb_labels)
fnames,csv_labels,_,_ = parse_csv_labels(f'{PATH}trn_bb_labels', skip_header=False)
def bb_corners(bb):
bb = np.array(bb, dtype=np.float32)
row1 = bb[3]
col1 = bb[2]
row2 = row1 + bb[0]
col2 = col1 + bb[1]
return [row1, col1, row2, col2]
f = 'img_02642.jpg'
bb = csv_labels[f]
print(bb)
bb_corners(bb)
new_labels = [f + "," + " ".join(map(str, bb_corners(csv_labels[f]))) + "\n" for f in raw_filenames]
open(f'{PATH}trn_bb_corners_labels', 'w').writelines(new_labels)
# reading bb file
bbox = {}
bb_data = pd.read_csv(f'{PATH}trn_bb_labels', header=None)
fnames,csv_labels,_,_ = parse_csv_labels(f'{PATH}trn_bb_labels', skip_header=False)
fnames,corner_labels,_,_ = parse_csv_labels(f'{PATH}trn_bb_corners_labels', skip_header=False)
corner_labels["img_06297.jpg"]
['396.21', '404.67', '504.78', '623.22']
csv_labels["img_06297.jpg"]
['108.57', '218.55', '404.67', '396.21']
def create_rect(bb, color='red'):
return plt.Rectangle((bb[2], bb[3]), bb[1], bb[0], color=color, fill=False, lw=3)
def show_bb(path, f='img_04908.jpg'):
file_path = f'{path}images/{f}'
bb = csv_labels[f]
plots_from_files([file_path])
plt.gca().add_patch(create_rect(bb))
def create_corner_rect(bb, color='red'):
bb = np.array(bb, dtype=np.float32)
return plt.Rectangle((bb[1], bb[0]), bb[3]-bb[1], bb[2]-bb[0], color=color, fill=False, lw=3)
def show_corner_bb(path, f='img_04908.jpg'):
file_path = f'{path}images/{f}'
bb = corner_labels[f]
plots_from_files([file_path])
plt.gca().add_patch(create_corner_rect(bb))
show_corner_bb(PATH, f = 'img_02642.jpg')
sz=299
bs=64
label_csv=f'{PATH}trn_bb_corners_labels'
n = len(list(open(label_csv)))-1
val_idxs = get_cv_idxs(n)
tfms = tfms_from_model(resnet34, sz, crop_type=CropType.NO, tfm_y=TfmType.COORD)
data = ImageClassifierData.from_csv(PATH, 'images', label_csv, tfms=tfms, val_idxs=val_idxs,
continuous=True, skip_header=False)
trn_ds = data.trn_dl.dataset
x, y = trn_ds[0]
print(x.shape, y)
(3, 299, 299) [ 56 62 163 150]
learn = ConvLearner.pretrained(resnet34, data, precompute=True, opt_fn=optim.Adam, ps=0.5)
0%| | 0/48 [00:00<?, ?it/s]
--------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) <ipython-input-8-2599e6e01994> in <module>() ----> 1 learn = ConvLearner.pretrained(resnet34, data, precompute=True, opt_fn=optim.Adam, ps=0.5) ~/github/fastai/courses/dl1/fastai/conv_learner.py in pretrained(self, f, data, ps, xtra_fc, xtra_cut, **kwargs) 87 def pretrained(self, f, data, ps=None, xtra_fc=None, xtra_cut=0, **kwargs): 88 models = ConvnetBuilder(f, data.c, data.is_multi, data.is_reg, ps=ps, xtra_fc=xtra_fc, xtra_cut=xtra_cut) ---> 89 return self(data, models, **kwargs) 90 91 @property ~/github/fastai/courses/dl1/fastai/conv_learner.py in __init__(self, data, models, precompute, **kwargs) 80 elif self.metrics is None: 81 self.metrics = [accuracy_multi] if self.data.is_multi else [accuracy] ---> 82 self.save_fc1() 83 self.freeze() 84 self.precompute=precompute ~/github/fastai/courses/dl1/fastai/conv_learner.py in save_fc1(self) 121 if len(self.activations[0])==0: 122 m=self.models.top_model --> 123 predict_to_bcolz(m, self.data.fix_dl, act) 124 predict_to_bcolz(m, self.data.val_dl, val_act) 125 if self.data.test_dl: predict_to_bcolz(m, self.data.test_dl, test_act) ~/github/fastai/courses/dl1/fastai/model.py in predict_to_bcolz(m, gen, arr, workers) 9 lock=threading.Lock() 10 m.eval() ---> 11 for x,*_ in tqdm(gen): 12 y = to_np(m(VV(x)).data) 13 with lock: ~/anaconda/lib/python3.6/site-packages/tqdm/_tqdm.py in __iter__(self) 870 """, fp_write=getattr(self.fp, 'write', sys.stderr.write)) 871 --> 872 for obj in iterable: 873 yield obj 874 # Update and print the progressbar. ~/github/fastai/courses/dl1/fastai/dataset.py in __next__(self) 218 if self.i>=len(self.dl): raise StopIteration 219 self.i+=1 --> 220 return next(self.it) 221 222 @property ~/anaconda/lib/python3.6/site-packages/torch/utils/data/dataloader.py in __next__(self) 193 while True: 194 assert (not self.shutdown and self.batches_outstanding > 0) --> 195 idx, batch = self.data_queue.get() 196 self.batches_outstanding -= 1 197 if idx != self.rcvd_idx: ~/anaconda/lib/python3.6/queue.py in get(self, block, timeout) 162 elif timeout is None: 163 while not self._qsize(): --> 164 self.not_empty.wait() 165 elif timeout < 0: 166 raise ValueError("'timeout' must be a non-negative number") ~/anaconda/lib/python3.6/threading.py in wait(self, timeout) 293 try: # restore state no matter what (e.g., KeyboardInterrupt) 294 if timeout is None: --> 295 waiter.acquire() 296 gotit = True 297 else: KeyboardInterrupt:
lrf=learn.lr_find()
learn.sched.plot()
0%| | 0/48 [00:00<?, ?it/s] [ 0. 234.0974 4450.0396]
learn.fit(0.01, 5, cycle_len=1, cycle_mult=2)
0%| | 0/48 [00:00<?, ?it/s] [ 0. 238.681 364.7433] 0%| | 0/48 [00:00<?, ?it/s] [ 1. 263.2261 186.7536] 0%| | 0/48 [00:00<?, ?it/s] [ 2. 210.889 168.2116] 0%| | 0/48 [00:00<?, ?it/s] [ 3. 176.0693 144.2513] 0%| | 0/48 [00:00<?, ?it/s] [ 4. 151.4565 129.4032] 0%| | 0/48 [00:00<?, ?it/s] [ 5. 138.8294 123.4013] 0%| | 0/48 [00:00<?, ?it/s] [ 6. 131.2182 122.3169] 0%| | 0/48 [00:00<?, ?it/s] [ 7. 125.9923 114.4574] 0%| | 0/48 [00:00<?, ?it/s] [ 8. 121.7743 109.9652] 0%| | 0/48 [00:00<?, ?it/s] [ 9. 117.3897 104.9769] 0%| | 0/48 [00:00<?, ?it/s] [ 10. 112.0477 100.5813] 0%| | 0/48 [00:00<?, ?it/s] [ 11. 109.0689 98.3695] 0%| | 0/48 [00:00<?, ?it/s] [ 12. 106.7465 98.7911] [ 13. 105.5374 97.5796] [ 14. 105.4487 96.7256] [ 15. 104.3599 98.1003] [ 16. 106.321 95.0515] [ 17. 104.1545 94.0836] [ 18. 102.1962 92.3605] [ 19. 100.5235 91.6409] [ 20. 99.355 89.3562] [ 21. 98.3139 88.7288] [ 22. 97.8664 87.1307] [ 23. 95.7317 85.4799] [ 24. 93.4525 86.0595] [ 25. 93.5365 83.7847] [ 26. 93.2249 84.8397] [ 27. 93.2065 85.1662] [ 28. 91.5837 83.8048] [ 29. 90.2605 83.596 ] [ 30. 92.2839 82.4151]
lrs=np.array([1e-4,1e-3,1e-2])
learn.precompute=False
learn.unfreeze()
lrf=learn.lr_find(lrs/1e3)
learn.sched.plot()
[ 0.0000e+00 1.3241e+02 3.5670e+13]
lrs=np.array([1e-5,1e-4,1e-3])
learn.fit(lrs, 5, cycle_len=1, cycle_mult=2)
[ 0. 71.1009 94.999 ] [ 1. 88.0754 82.1335] [ 2. 87.4359 77.1687] [ 3. 86.5974 80.8263] [ 4. 83.9792 71.4977] [ 5. 79.1773 69.3897] [ 6. 77.2359 68.047 ] [ 7. 77.6487 77.4456] [ 8. 78.9041 73.7645] [ 9. 75.8422 68.0503] [ 10. 73.4623 64.6083] [ 11. 72.2943 63.5834] [ 12. 70.4287 62.1409] [ 13. 66.5874 59.9783] [ 14. 66.0414 60.785 ] [ 15. 66.3314 65.7976] [ 16. 69.3549 65.7114] [ 17. 67.7767 64.7094] [ 18. 67.158 61.4241] [ 19. 65.6192 58.7482] [ 20. 62.9917 60.5301] [ 21. 64.713 61.0712] [ 22. 61.309 56.2067] [ 23. 58.6235 55.2833] [ 24. 57.5971 55.6252] [ 25. 58.6757 55.0441] [ 26. 57.1917 52.6833] [ 27. 56.4466 53.1598] [ 28. 56. 51.4715] [ 29. 55.0628 52.217 ] [ 30. 55.1955 51.3541]
f="img_06297.jpg"
PIL.Image.open(PATH+"images/" + f).size
sizes = [PIL.Image.open(PATH+f).size for f in data.trn_ds.fnames]
raw_val_sizes = [PIL.Image.open(PATH+f).size for f in data.val_ds.fnames]