#!/usr/bin/env python # coding: utf-8 #

Table of Contents

#
# # Custom Feature Detection and Velocity Fields: Bubble tracking in 2D foams # As illustrated in the [walktrough example](http://nbviewer.ipython.org/github/soft-matter/trackpy-examples/blob/master/notebooks/walkthrough.ipynb), the tracking relies on two steps: # # * the detection of features # * the tracking of these features # # In many situations, we track point-like particles. However, it is also possible to track extended features such as bubbles. Trackpy provides a 'locate' function to detect particles, seen as spots. But, in the case of bubbles in foams, bubbles are in contact with their neighbors and the 'locate' function could not detect their position. # # In this notebook, we show that we can apply a custom method to detect the position of the bubbles and then, pass the list to the tracking algorithm. # ## Scientific libraries # In[1]: # change the following to %matplotlib notebook for interactive plotting get_ipython().run_line_magic('matplotlib', 'inline') import numpy as np import pandas as pd import pims import trackpy as tp import os import matplotlib as mpl import matplotlib.pyplot as plt # Optionally, tweak styles. mpl.rc('figure', figsize=(10, 6)) mpl.rc('image', cmap='gray') # In[2]: datapath = '../sample_data/foam' prefix = 'V1.75f3.125000' # ## Image preprocessing # First, we load the batch of pictures and we display an image as an example # In[3]: id_example = 4 rawframes = pims.open(os.path.join(datapath, prefix + '*.tif')) plt.imshow(rawframes[id_example]); # Because, we will need to apply several image processing steps to detect the positions of the bubbles, we crop the picture to keep only the region of interest. With PIMS, this is done by defining a pipeline function to process the frames. # In[4]: @pims.pipeline def crop(img): """ Crop the image to select the region of interest """ x_min = 45 x_max = -35 y_min = 100 y_max = -300 return img[y_min:y_max, x_min:x_max] rawframes = crop(pims.open(os.path.join(datapath, prefix + '*.tif'))) plt.imshow(rawframes[id_example]); # The next step is to use [scikit-image](http://scikit-image.org/) [1] to make a binary image of the edge of the bubbles. It usually requires several trials before getting a successful procedure for a particular dataset. # In[5]: from scipy import ndimage from skimage import morphology, util, filters @pims.pipeline def preprocess_foam(img): """ Apply image processing functions to return a binary image """ # Crop the pictures as for raw images. img = crop(img) # Apply thresholds adaptive_thresh = filters.threshold_local(img, block_size=301) img = img < adaptive_thresh # Apply dilation twice to fill up small voids for _ in range(2): img = ndimage.binary_dilation(img) return util.img_as_int(img) frames = preprocess_foam(pims.open(os.path.join(datapath, prefix + '*.tif'))) plt.imshow(frames[id_example]); # ## Custom feature detection # Our features are the bubbles represented by the small black areas surrounded by white edges. Note that the black area at the bottom is the liquid should not be considered as a feature. We use again scikit-image with the labeling function to detect the bubbles. The function returns several values such as the positon, the mean intensity, the area, the excentricity, etc. that can be used to remove false-postive labels. In our example, we must choose smart criterions because the bubble size is clearly different on the top and on the bottom. # # We make a test on one picture first to seek and validate our criterions. We use matplotlib to draw rectangles around each feature. # In[6]: from skimage.measure import label, regionprops import matplotlib.patches as mpatches img_example = frames[id_example] # Label elements on the picture # Background is white white = util.dtype_limits(img_example)[1] label_image, number_of_labels = label(img_example, background=white, return_num=True) print(f"Found {number_of_labels} features") fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12)) ax.imshow(img_example) for region in regionprops(label_image, intensity_image=img_example): # Everywhere, skip small and large areas if region.area < 5 or region.area > 800: continue # On the top of the image, # skip small area with a second threshold if region.centroid[0] < 260 and region.area < 80: continue # Draw rectangle on the remaining regions minr, minc, maxr, maxc = region.bbox rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=1) ax.add_patch(rect) # Now, we use this algorithm on each picture of the stack. The features are stored in a pandas DataFrame [2]. # In[7]: features = pd.DataFrame() for num, img in enumerate(frames): white = util.dtype_limits(img_example)[1] label_image = label(img, background=white) for region in regionprops(label_image, intensity_image=img): # Everywhere, skip small and large areas if region.area < 5 or region.area > 800: continue # On the top of the image, # skip small area with a second threshold if region.centroid[0] < 260 and region.area < 80: continue # Store features which survived to the criterions row = [{'y': region.centroid[0], 'x': region.centroid[1], 'frame': num, },] features = pd.concat([features, pd.DataFrame(row)], ignore_index=True) # We can also use 'annotate' provided with trackpy to display the features which will be track in the next step. # In[8]: tp.annotate(features[features.frame==(id_example+1)], img_example); # ## Bubble tracking # As for particle tracking, this step links the successive positions of each feature. We superimpose the trajectories with the first picture to check their consistency. # In[9]: search_range = 11 t = tp.link_df(features, search_range, memory=5) tp.plot_traj(t, superimpose=img); # In[10]: unstacked = t.set_index(['frame', 'particle']).unstack() # ## Velocity field # ### Step 1: calculation # With the position of the center of mass of each bubble, we can calculate the velocity vector. # # Here is a simple method that is fast enough for these data: # In[11]: data = pd.DataFrame() for item in set(t.particle): sub = t[t.particle==item] dvx = np.diff(sub.x) dvy = np.diff(sub.y) for x, y, dx, dy, frame in zip(sub.x[:-1], sub.y[:-1], dvx, dvy, sub.frame[:-1],): row = [{'dx': dx, 'dy': dy, 'x': x, 'y': y, 'frame': frame, 'particle': item, }] data = pd.concat([data, pd.DataFrame(row)], ignore_index=True) # Here is a more efficient method, [contributed separately](https://github.com/soft-matter/trackpy/issues/524) by trackpy user [Thefalas](https://github.com/Thefalas), for doing the same thing. It is a little faster for these data, but is a much better choice if you have a larger data set: # In[12]: col_names = ['dx', 'dy', 'x', 'y', 'frame', 'particle'] # Creating an empty dataframe to store results data = pd.DataFrame(np.zeros(shape=(1, 6), dtype=np.int64), columns=col_names) for item in set(t.particle): sub = t[t.particle==item] if sub.shape[0]<=2: # Cases in which particle only has 1 or 2 rows of data pass else: #print('Deriving velocities for particle:', str(item)) dvx = pd.DataFrame(np.gradient(sub.x), columns=['dx',]) dvy = pd.DataFrame(np.gradient(sub.y), columns=['dy',]) new_df = pd.concat((dvx, dvy, sub.x.reset_index(drop=True), sub.y.reset_index(drop=True), sub.frame.reset_index(drop=True), sub.particle.reset_index(drop=True)), axis=1, names=col_names, sort=False) data = pd.concat((data, new_df), axis=0) # This is to get rid of the first 'np.zeros' row and to reset indexes data = data.reset_index(drop=True) data = data.drop(0) data = data.reset_index(drop=True) # ### Step 2: rendering # We now display the picture and plot the velocity field on top with matplotlib [3]. Here, we plot only the first picture for illustration purposes. # In[13]: from matplotlib.pyplot import quiver i = 0 d = data[data.frame==i] plt.imshow(rawframes[i]) plt.quiver(d.x, d.y, d.dx, -d.dy, pivot='middle', headwidth=4, headlength=6, color='red') plt.axis('off'); # ## About this work # This presentation is based on a scientific work which aims to understand the damping of a thin layer of foam placed on the top of an oscillating liquid bath. The main idea of this work is presented in the reference [4] and the scientific results are in reference [5]. This work has been performed at Princeton University (USA) by all the co-authors and this presentation has been written by F. Boulogne. # ## References # [1] Stéfan van der Walt, Johannes L. Schönberger, Juan Nunez-Iglesias, François Boulogne, Joshua D. Warner, Neil Yager, Emmanuelle Gouillart, Tony Yu and the scikit-image contributors. scikit-image: Image processing in Python. PeerJ 2:e453, 2014. http://dx.doi.org/10.7717/peerj.453 # # [2] Wes Mckinney. Data structures for statistical computing in Python. In : Proc. 9th Python Sci. Conf. 2010. p. 51-56. # # [3] Hunter, John D. Matplotlib: A 2D graphics environment. Computing in Science & Engineering, 2007, vol. 9, no 3, p. 90-95. # # [4] J. Cappello, A. Sauret, F. Boulogne, E. Dressaire and H.A. Stone, Journal of Visualization, 18, 269-271 (2015). https://doi.org/10.1007/s12650-014-0250-1 # # [5] A. Sauret, F. Boulogne, J. Cappello, E. Dressaire and H.A. Stone, Physics of Fluids, 27 (2015). https://doi.org/10.1063/1.4907048