# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Installations !pip -q install lucid>=0.3.6 # General support import math import tensorflow as tf import numpy as np import json # General lucid code import lucid.modelzoo.vision_models as models options = { 'model': 'inceptionv1', 'split': 'train' } # Let's import a model from the modelzoo model = models.InceptionV1() model.load_graphdef() # Write the classification labels once model.labels # Setup the data provider for imagenet # Note you will need to download imagenet data yourself and setup a data provider # http://image-net.org/download # data_split = imagenet.get_split(options['split']) # provider = tf.contrib.slim.dataset_data_provider.DatasetDataProvider(data_split, seed=7) # image_tensor, t_label, t_record_key, t_label_text = provider.get(["image", "label", "record_key", "label_text"]) sess = tf.InteractiveSession() coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) image_tensor_ = tf.image.resize_images(image_tensor[None], [224, 224])/255 T = render.import_model(model, image_tensor_, image_tensor_) def save_data(base_dir, options, activations, attributions): # spatial activations activations_path = optionsToURL(base_dir + "activations", "npy", options) with gfile.GFile(activations_path, "w") as f: np.save(f, activations) # spatial attributions to final classes attributions_path = optionsToURL(base_dir + "attribution", "npy", options) with gfile.GFile(attributions_path, "w") as f: np.save(f, attributions) def fwd_gradients(ys, xs, d_xs): """Forward-mode pushforward analogous to the pullback defined by tf.gradients. With tf.gradients, grad_ys is the vector being pulled back, and here d_xs is the vector being pushed forward. By mattjj@google.com from https://github.com/renmengye/tensorflow-forward-ad/issues/2 """ v = tf.zeros_like(ys) g = tf.gradients(ys, xs, grad_ys=v) return tf.gradients(g, v, grad_ys=d_xs) number_of_images = int(1e6) options['sample_images'] = number_of_images number_of_pages = 500 number_of_images_per_page = number_of_images / number_of_pages layers = [ "conv2d2", "mixed3a", "mixed3b", "mixed4a", "mixed4b", "mixed4c", "mixed4d", "mixed4e", "mixed5a", "mixed5b", ] for layer_name in reversed(layers): print() print(layer_name) options['layer'] = layer_name d_previous = tf.placeholder("float32") d_logit = fwd_gradients(T("softmax2_pre_activation"), T(layer_name), d_previous)[0] zeros = None print number_of_pages for p in range(number_of_pages): activations = [] attributions = [] for n in range(number_of_images_per_page): # evaluate vec, label_index, record_key, label_text, image = sess.run([T(layer_name), t_label, t_record_key, t_label_text, image_tensor_]) # sample one random position in the image, minus the edges options['sample_type'] = 'random' n_x = np.random.randint(1, vec.shape[1]) n_y = np.random.randint(1, vec.shape[2]) # Compute logit attribution if zeros is None: zeros = np.zeros(vec.shape) else: zeros[:] = 0 zeros[0, n_x, n_y] = vec[0, n_x, n_y] logit_attr = d_logit.eval({T(layer_name): vec, d_previous: zeros}) # top attributions for spatial activation: top_attribution_class_index = int(np.argsort(-logit_attr[0])[0]) top_attribution_class_label = model.labels[top_attribution_class_index] activations.append(vec[0, n_x, n_y]) attributions.append(logit_attr[0]) # progress indicator print p + 1, # save files to bigstore options['page'] = '{}_of_{}'.format(p + 1, number_of_pages) save_data(base_dir_gcs + layer_name + "/", options, activations, attributions)