In case of problems or questions, please first check the list of Frequently Asked Questions (FAQ).
Please shutdown all other training/prediction notebooks before running this notebook (as those might occupy the GPU memory otherwise).
If you have not looked at the regular example notebooks, please do so first.
The notebooks in this folder provide further details about the inner workings of StarDist and might be useful if you want to apply it in a slightly different context.
from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
import matplotlib
matplotlib.rcParams["image.interpolation"] = None
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
from glob import glob
from tqdm import tqdm
from tifffile import imread
from csbdeep.utils import Path, normalize
from csbdeep.io import save_tiff_imagej_compatible
from stardist import export_imagej_rois
from stardist.models import StarDist2D
X = sorted(glob('data/dsb2018/test/images/*.tif'))
X = list(map(imread,X))
n_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]
axis_norm = (0,1) # normalize channels independently
# axis_norm = (0,1,2) # normalize channels jointly
if n_channel > 1:
print("Normalizing image channels %s." % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))
# show all test images
if False:
fig, ax = plt.subplots(7,8, figsize=(16,16))
for i,(a,x) in enumerate(zip(ax.flat, X)):
a.imshow(x,cmap='gray')
a.set_title(i)
[a.axis('off') for a in ax.flat]
plt.tight_layout()
None;
model = StarDist2D.from_pretrained('2D_versatile_fluo')
Found model '2D_versatile_fluo' for 'StarDist2D'. Loading network weights from 'weights_best.h5'. Loading thresholds from 'thresholds.json'. Using default values: prob_thresh=0.479071, nms_thresh=0.3.
img = normalize(X[29], 1,99.8, axis=axis_norm) # example image
labels, polygons = model.predict_instances(img)
save_tiff_imagej_compatible('img.tif', img, axes='YX')
export_imagej_rois('img_rois.zip', polygons['coord'])
Making a fake "time lapse" by stacking all images of size 256 x 256. Replace with your actual movie.
timelapse = np.stack([x for x in X if x.shape==(256,256)])
timelapse = normalize(timelapse, 1,99.8, axis=(0,)+tuple(1+np.array(axis_norm)))
timelapse.shape
(26, 256, 256)
# loop over all frames and collect polygons from each
polygons = [model.predict_instances(frame)[1]['coord'] for frame in tqdm(timelapse)]
100%|██████████| 26/26 [00:03<00:00, 9.01it/s]
save_tiff_imagej_compatible('timelapse.tif', timelapse, axes='TYX')
export_imagej_rois('timelapse_rois.zip', polygons)