#!/usr/bin/env python # coding: utf-8 # # Lucid Modelzoo # # If you want to study techniques for visualizing and understanding neural networks, it's important to be able to try your experiments on multiple models. # # [Lucid](https://github.com/tensorflow/lucid) is a library for visualizing neural networks. # As of lucid v0.3, we provide a consistent API for interacting with 27 different vision models. # # # ## General Setup # In[ ]: # Expanded modelzoo is only available as of lucid v0.3 get_ipython().system('pip install --quiet lucid==0.3') import numpy as np import tensorflow as tf from lucid.misc.io import show, load import lucid.optvis.objectives as objectives import lucid.optvis.param as param import lucid.optvis.render as render import lucid.optvis.transform as transform # ## Import Modelzoo # In[ ]: # Lucid's modelzoo can be accessed as classes in vision_models import lucid.modelzoo.vision_models as models # ... or throguh a more systematic factory API import lucid.modelzoo.nets_factory as nets # ## List Models # # As of lucid v0.3 # In[3]: print "" print "Model".ljust(27), " ", "Dataset" print "" for name, Model in nets.models_map.iteritems(): print name.ljust(27), " ", Model.dataset # ## List Model Layers # In[4]: models.InceptionV4_slim.layers # ## Show Model Graph # In[5]: model = models.InceptionV4_slim() model.load_graphdef() model.show_graph() # ## Visualize Neuron # # See the [lucid tutorial](https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/tutorial.ipynb) to learn more. # # We pick `InceptionV4/InceptionV4/Mixed_6b/concat` from above, and chose to focus on unit 0. # In[9]: model = models.InceptionV4_slim() model.load_graphdef() _ = render.render_vis(model, "InceptionV4/InceptionV4/Mixed_6b/concat:0") # ## Caricature # # See the [inversion and caricature notebook](https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/misc/feature_inversion_caricatures.ipynb) to learn more. # In[8]: from lucid.recipes.caricature import feature_inversion img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png") model = models.InceptionV4_slim() model.load_graphdef() result = feature_inversion(img, model, "InceptionV4/InceptionV4/Mixed_6b/concat", n_steps=512, cossim_pow=0.0) show(result)