#!/usr/bin/env python # coding: utf-8 # # EDS-TEM quantification of core shell nanoparticles # # Using machine learning methods, such as independent component analysis (ICA), the composition of embedded nanostructures, such as core-shell nanoparticles, can be accurately measured as demonstrated by D. Roussow et al., Nano Letters, 2015 (see the [full article](https://www.repository.cam.ac.uk/bitstream/handle/1810/248102/Roussouw%20et%20al%202015%20Nano%20Letters.pdf?sequence=1)). Using the same data, this notebook reproduces the main results of this article. # # # ## Author # # * 13/04/2015 Pierre Burdet - Developed for HyperSpy workshop at University of Cambridge # # ## Changes # # * 29/05/2016 Duncan Johnstone. Update the syntax for HyperSpy 0.8.5 (Python 3 compatibility) # * 03/08/2016 Francisco de la Peña. Update the syntax for HyperSpy 1.1 # * 06/08/2016 Francisco de la Peña. Update the syntax for HyperSpy 0.8.1 # * 27/08/2016 Pierre Burdet. Update for workshop at EMC Lyon # * 04/04/2018 Joshua Taillon. Bugfix and update for workshop at NIST # * 18/07/2019 Katherine MacArthur. Update to include basic quantification, for M&M 2019 Portland # # ## Requirements # # * HyperSpy 1.5.1 # # ## Contents # # 1. Specimen & Data # 2. Loading and viewing data # 3. Extracting Counts # 4. Basic Quantification # 5. Blind source separation of core/shell nanoparticles # 6. Representative spectrum from bare cores # 7. Comparison and quantification # 8. Going father: Isolating the nanoparticles # # # 1. Specimen & Data # # The sample and the data used in this tutorial are described in # D. Roussow, et al., Nano Letters, In Press (2015) (see the [full article](https://www.repository.cam.ac.uk/bitstream/handle/1810/248102/Roussouw%20et%20al%202015%20Nano%20Letters.pdf?sequence=1)). # # FePt@Fe$_3$O$_4$ core-shell nanoparticles are investigated with an EDS/TEM experiment (FEI Osiris TEM, 4 EDS detectors). The composition of the core can be measured with ICA (see figure 1c). To prove the accuracy of the results, measurements on bare FePt bimetallic nanoparticles from a synthesis prior to the shell addition step are used. # # Figure 1: (a) A spectrum image obtained from a cluster of core-shell nanoparticles. (b) The nanoparticles are comprised of a bi-metallic Pt/Fe core surrounded by an iron oxide shell on a carbon support. (c) ICA decomposes the mixed EDX signals into components representing the core (IC#0), shell (IC#1) and support (IC#2). # In[3]: #Download the data (1MB) from urllib.request import urlretrieve, urlopen from zipfile import ZipFile files = urlretrieve("https://www.dropbox.com/s/ecdlgwxjq04m5mx/HyperSpy_demos_EDS_TEM_files.zip?raw=1", "./HyperSpy_demos_EDX_TEM_files.zip") with ZipFile("HyperSpy_demos_EDX_TEM_files.zip") as z: z.extractall() # # 2. Loading and viewing data # Table of contents # Import HyperSpy, numpy and matplotlib libraries. # # *Remember, if at any point you do not understand how a function operates, its help file can be loaded by typing the name of the command followed by a '?' into a cell and then running that cell.* # In[1]: get_ipython().run_line_magic('matplotlib', 'widget') import hyperspy.api as hs # Load the spectrum images of the bare nanoparticles and those with a core-shell structure. # In[4]: c = hs.load("bare_core.hdf5", reader="hspy") cs = hs.load("core_shell.hdf5", reader="hspy") # Check the metadata has imported correctly. In particular whether the list of elements you wish to analyse is correct. # In[5]: cs.metadata # Plot the core-shell data to inspect the signal level: # In[6]: cs.plot() # Plotting the integrated counts for the whole spectrum image is a good way to check what elements exist in the sample. Adding 'True' to the function also labels any elements from the metadata onto the spectrum. # In[7]: cs.sum().plot(True) # ## 3. Extracting count maps of elements # # Table of contents # # If they're not already added it is important to make sure all the elements you want to extract the intensities for are in the metadata of the sample. # In[8]: cs.set_elements(['Fe','Pt']) cs.set_lines(['Fe_Ka', 'Pt_La']) c.set_elements(['Fe','Pt']) c.set_lines(['Fe_Ka', 'Pt_La']) #cs.add_elements and cs.add_lines also work if you don't want to override what is #already in the metadata. # Extracting lines can be done without any background or integration window parameters. However if none are specified the default integration window is 1 FWHM and no background subtraction is carried out. # Line_width is the distance from the x-ray line (in FWHM) the the background window is taken [left, right] allowing different distances for the two directions. # An asymmetric value is used here because otherwise the Pt background windows overlap with the Cu K$_β$ line from the sample grid. # In[10]: bw = cs.estimate_background_windows(line_width=[5.0, 2.0]) iw = cs.estimate_integration_windows(windows_width=3) # It is important to plot the windows to check that they are selecting the data correctly otherwise errors, particularly in background subtraction arise. # # The integration windows are represented by dashed lines and background windows by solid lines. The estimated background is the plotted by the close to horizontal black lines. # In[11]: cs.sum().plot(True, background_windows=bw, integration_windows=iw) # *Try running the previous two cells of code above with line_width=[3.0,3.0] and see how this results in an erroneous, background subtraction by plotting the background lines. (You might need to zoom in to see it)* # How accurate background subtraction will be on a pixel-by-pixel basis can be see with this plot. # # The x and y sliders select a pixel in the particle images we plotted earlier. # # You should be able to find some examples (e.g. the Fe K$_α$ line at X=39, Y=44) of where the background subtraction still fails due to a poor signal-to-noise ratio in the data. # # # In[12]: cs.plot(True, background_windows=bw, navigator='slider') # Another way to adjust the location of the background windows is by changing specific numbers in the background window array individually. # # Running the 'bw' command will output the array, which contains keV coordinates corresponding to the position of the background windows. Each row corresponds to a different element in the list given in the metadata. Remember arrays in Python start at (0,0). # # These two commands therefore alter the position of the start and end points of the left-hand background window for Pt. # In[13]: bw[1, 0] = 8.44 bw[1, 1] = 8.65 bw # Often it is prudent to rebin the data such that counts per pixel are increased and a more reliable background subtraction can be carried out. This can be easily done with the rebin function to any new scale. # # These functions will perform rebinning on both the core-shell ('cs') data and the core-only ('c') data. We define using the 'scale' parameter that we want 2x binning in X, 2x binning in Y, and 1x binning in Z (our counts). # # *Note, as we are re-defining 'cs' or 'c', this overwrites our previously-imported data. This means running this command multiple times will re-bin the data multiple times. If you accidentally run this command too many times, simply re-import the data by running the 'hs.load' commands at the top of this workbook'.* # In[14]: cs = cs.rebin(scale=(2,2,1)) c = c.rebin(scale=(2,2,1)) # Finally, once the background subtraction windows have been selected to be in careful positions it is possible to extract the intensities. # # Note that exactly the same windows have been used for analysis of both the 'core' and 'core-shell' data sets. This is critical here as we are comparing the two datasets. # In[15]: cs_intensities = cs.get_lines_intensity(background_windows=bw, integration_windows=iw) c_intensities = c.get_lines_intensity(background_windows=bw, integration_windows=iw) # Each 'get_lines_intensity' command will create a list of images, again in the same order of the list of elements in the list of metadata. If the element is not in the metadata its intensity map will not be extracted. # # We can then run 'cs_intensities' to confirm the that we have extracted intensity maps for all our elements of interest. # In[16]: cs_intensities # In[17]: # Plotting one particular image (in this case, the first, Fe_Ka map) can be done with: cs_intensities[0].plot() # In[18]: #All the intensity maps can be plotted using: hs.plot.plot_images(cs_intensities, cmap='viridis', axes_decor=None, scalebar='all') # Plotting and extracting intensity for both data sets can be condensed into one line. # In[19]: axes = hs.plot.plot_images((c.get_lines_intensity(background_windows=bw, integration_windows=iw) + cs.get_lines_intensity(background_windows=bw, integration_windows=iw)), scalebar='all', axes_decor=None, per_row=2, cmap='viridis') # ## 4. Quantification of count maps # # Table of contents # Hyperspy is able to carry out EDX quantification using k-factors 'CL', zeta-factors 'zeta', or cross_sections 'cross_sections'. # # All these methods are applied in the same way using the combination of the stack of intensities and and original data. # For 'zeta' or 'cross_section' quantification both a 'live_time' and a 'beam_current' should be in the metadata. # In[20]: #Setting these parameters in the metadata. cs.set_microscope_parameters(live_time = 6.15) #in seconds cs.set_microscope_parameters(beam_current = 0.5) #in nA # In[21]: #From Brucker software (Esprit) k-factors factors = [1.450226, 5.75602] # In[22]: quant = cs.quantification(cs_intensities, 'CL', factors=factors) # In[23]: quant # Again as with the intensities the quantification function result produces a list of images with atomic percent of each element (at least in the 'CL' case). # # In the 'zeta' and 'cross_section' methods more information is outputed from quantification. See the [EDS quantification](http://hyperspy.org/hyperspy-doc/current/user_guide/eds.html#eds-quantification) section of the documentation for more details. # Alternatively, if the factors are treated as cross_sections then the output result contains two lists of images, the first is a list of atomic *percent* maps (Index [0]) the second is a list of atomic *number* maps (Index [1]). This allows us to 'zero-out' regions of the image with too few counts. # *Please note these values aren't accurate cross-sections but can be used as such for the purpose of this demo.* # Ignore the warning produced, in this case we want to use a 1nm$^2$ probe size. As long as the pixel scale is calibrated in your spectrum image, probe size is taken as the pixel unless otherwise specified using s.set_microscope_parameters(probe_area = ?). # In[24]: quant = cs.quantification(cs_intensities, 'cross_section', factors=factors) # In[25]: quant # Summing all the images containing numbers of atoms (quant[1]) gives us an image mapping out the total number of estimated atoms in the sample. # In[26]: No_of_atoms = quant[1][0] + quant[1][1] No_of_atoms.plot() # This 'total number of atoms' image can be used to make a mask and 'zero-out' any region of the image where the total counts equate to less than 1 atom count. This could also be done on an element by element basis instead. # In[27]: Mask = No_of_atoms > 1. hs.plot.plot_images([Mask*quant[0][0], Mask*quant[0][1]], scalebar ='all', cmap='viridis', label=['Fe', 'Pt'], axes_decor='off', vmin=0, vmax=100) # ## 5. Blind source separation of core/shell nanoparticles # Table of contents # Apply blind source separation (ICA) to obtain a factor (spectrum) corresponding to the core. # In[28]: cs.change_dtype('float') cs.decomposition() # In[29]: ax = cs.plot_explained_variance_ratio() # ICA on the three first components. # In[30]: cs.blind_source_separation(3) # In[31]: axes = cs.plot_bss_loadings() # In[32]: axes = cs.plot_bss_factors() # The first component corresponds to the core. # In[33]: s_bss = cs.get_bss_factors().inav[0] # ## 6. Representative spectrum from bare cores # Table of contents # To obtain an integrated representative spectrum of the bare nanoparticles, the low intensity of Pt L$_{\alpha}$ is masked. # In[39]: pt_la = c.get_lines_intensity(['Pt_La'])[0] mask = pt_la > 12 # In[63]: axes = hs.plot.plot_images((mask, pt_la * mask), axes_decor=None, colorbar=None, label=['Mask', 'Pt Lα intensity'], cmap='viridis') # To apply the mask, the navigation dimensions of the mask must be manipulated to match the navigation dimensions of the EDS spectrum image. This is achieved crudely via first generating a mask using the built in vacuum_mask() method and then overwriting the data with the mask generated above. # In[41]: c_mask = c.sum(-1) c_mask.data = mask.data # The sum over the particles is used as a bare core spectrum. # In[42]: s_bare = (c * c_mask).sum() # ## 7. Model fitting and quantification # Table of contents # With a greater signal to noise ratio from integrating the spectrum curve fitting now beceoms possible as a method of intensity extraction. # # First we stack together the spectrum of bare particles and the first ICA component. # In[43]: s_bare.change_dtype('float') s = hs.stack([s_bare, s_bss], new_axis_name='Bare or BSS') s.metadata.General.title = 'Bare or BSS' # In[44]: axes = hs.plot.plot_spectra(s, style='mosaic', legend=['Bare particles', 'BSS #0']) # ### Method 1 - Window extraction # # X-ray intensities measurement with background subtraction, using the windows created earlier. # In[45]: s.plot(background_windows=bw, integration_windows=iw) # In[46]: sI = s.get_lines_intensity(background_windows=bw, integration_windows=iw) # In[47]: print('Bare core Fe_Kα/Pt_Lα ratio: \t{:.2f}'.format(list(sI[0].inav[0].data / sI[1].inav[0].data)[0])) print('BSS Fe_Kα/Pt_Lα ratio: \t\t{:.2f}'.format(list(sI[0].inav[1].data / sI[1].inav[1].data)[0])) # ### Method 2 - Model fitting # # Measure X-ray intensity by fitting a Gaussian model # In[48]: #Removing the low energy part of the spectrum as this is not a region we're interested in. m = s.isig[5.:15.].create_model() # In[49]: #These lines needed to be added to the model because they are not in the metadata. #In this way they are included in the curve fitting but not in the final quantification. m.add_family_lines(['Cu_Ka', 'Co_Ka']) # In[50]: m.components # In[51]: m.plot() # In[52]: m.multifit() # In[53]: m.fit_background() # In[54]: m.calibrate_energy_axis() # In[55]: m.plot() # In[56]: sI = m.get_lines_intensity()[:2] sI # Set up the kfactors for Fe K$_{\alpha}$ and Pt L$_{\alpha}$. # In[57]: #From Brucker software (Esprit) kfactors = [1.450226, 5.075602] # Quantify with Cliff Lorimer. # In[58]: composition = s.quantification(method="CL", intensities=sI, factors=kfactors) # In[59]: print(' |-----------------------------|') print(' | Atomic compositions |') print(' |-----------------------------|') print(' \t | Bare core | BSS Signal |') print('|------------|-------------|---------------|') print('| Fe (at. %) | {:.2f} | {:.2f} |'.format(composition[0].data[0], composition[0].data[1])) print('| Pt (at. %) | {:.2f} | {:.2f} |'.format(composition[1].data[0], composition[1].data[1])) print('|------------|-------------|---------------|') # ## 6. Going further # Table of contents # Further image processing with [scikit-image](http://scikit-image.org/) and [scipy](http://www.scipy.org/). Apply a watershed transformation to isolate the nanoparticles. # - Transform the mask into a distance map. # - Find local maxima. # - Apply the watershed to the distance map using the local maximum as seed (markers). # # Adapted from this scikit-image [example](http://scikit-image.org/docs/dev/auto_examples/plot_watershed.html). # In[60]: from scipy.ndimage import distance_transform_edt, label from skimage.morphology import watershed from skimage.feature import peak_local_max # In[61]: distance = distance_transform_edt(mask.data) local_maxi = peak_local_max(distance, indices=False, min_distance=2, labels=mask.data) labels = watershed(-distance, markers=label(local_maxi)[0], mask=mask.data) # In[62]: axes = hs.plot.plot_images( [pt_la.T, mask.T, hs.signals.Signal2D(distance), hs.signals.Signal2D(labels)], axes_decor='off', per_row=2, colorbar=None, cmap=['viridis','tab20'], label=['Pt Lα intensity', 'Mask', 'Distances', 'Separated particles']) # In[ ]: