# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. !pip -q install lucid>=0.3.1 # installing tensorflow 1.x # %tensoflow_verion 1.x works only in colab %tensorflow_version 1.x import numpy as np import tensorflow as tf assert tf.__version__.startswith("1") import lucid.modelzoo.vision_models as models from lucid.misc.io import show, load, save # for resizing images from PIL import Image # for image uploader from google.colab import files # For plots import matplotlib.pyplot as plt model = models.InceptionV1() model.load_graphdef() def classify(image, n=10, show_output=True): im = Image.fromarray(np.uint8(image * 255.)); im_resize = im.resize((224, 224), Image.LANCZOS) im_array = np.asarray(im_resize)/255. test_image = im_array[:,:,:3] tf.reset_default_graph() with tf.Session().as_default() as sess: input_t = tf.placeholder(tf.float32, shape=(224,224,3)) model.import_graph(input_t) softmax2_t = sess.graph.get_tensor_by_name('import/softmax2:0') tf.global_variables_initializer().run() logits, = sess.run([softmax2_t], {input_t: test_image}) BATCH = 0 top_n_label_indices = np.argsort(logits)[BATCH][-n:][::-1] safe_top_n_label_indices = np.extract(top_n_label_indices < 1001, top_n_label_indices) top_n_labels = np.array(model.labels)[safe_top_n_label_indices] top_n_probs = logits[BATCH][safe_top_n_label_indices] max_length = max(len(label) for label in top_n_labels) predictions = ["{}: {: >6.2%}".format(label.rjust(max_length), prob) for (label, prob) in zip(top_n_labels, top_n_probs)] if show_output: show(test_image) print("\n".join(predictions)) return (top_n_labels, top_n_probs) # Takes two images and composites them def composite_img(background_img, patch_img, patch_width_ratio=0.1, patch_position=(0.0, 0.0)): composite = background_img.copy() composite_img = Image.fromarray(np.uint8(composite * 255.)) composite_img = composite_img.resize((224, 224), Image.LANCZOS) composite = (np.asarray(composite_img)/255.)[:,:,:3] if patch_width_ratio > 0: patch_img_width = int(patch_width_ratio * 224) patch_img_ = Image.fromarray(np.uint8(patch_img[:,:,:3] * 255.)) patch_img_thumb = patch_img_.resize((patch_img_width, patch_img_width), Image.LANCZOS) patch_img_thumb_array = np.asarray(patch_img_thumb)/255. patch_img_thumb_size = patch_img_thumb_array.shape patch_y = int(patch_position[1] * (224 - patch_img_thumb_size[1])) patch_x = int(patch_position[0] * (224 - patch_img_thumb_size[0])) composite[patch_y:patch_y + patch_img_thumb_size[1], patch_x:patch_x + patch_img_thumb_size[0], 0:3] = patch_img_thumb_array composite_img = Image.fromarray(np.uint8(composite * 255.)) composite_img_resize = composite_img.resize((224, 224), Image.LANCZOS) composite_img_resize_array = np.asarray(composite_img_resize)/255. return composite_img_resize_array ## If you want to upload an image from your desktop, uncomment and change this code, ## or use the left-side panel in colab to upload to the filesystem. # uploaded = files.upload() # f = list(uploaded.keys())[0] # user_img = load(f) # classify(user_img) snorkel_url = "https://storage.googleapis.com/lucid-static/activation-atlas/adversarial-examples/examples/snorkel.png" grey_whale_url = "https://storage.googleapis.com/lucid-static/activation-atlas/adversarial-examples/examples/grey-whale.jpg" frying_pan_url = "https://storage.googleapis.com/lucid-static/activation-atlas/adversarial-examples/examples/frying-pan.png" background_img = load(snorkel_url) _ = classify(background_img, n=10) # Ten different patches are available, 01 - 10 patch_img = load("https://storage.googleapis.com/lucid-static/activation-atlas/adversarial-examples/patches/train-10.png") _ = classify(patch_img) # a small patch doesn't effect the classification too much small = composite_img(background_img, patch_img, patch_width_ratio=0.2, patch_position=(0.0, 0.0)) _ = classify(small) # slightly bigger and it flips the top classification medium = composite_img(background_img, patch_img, patch_width_ratio=0.4, patch_position=(0.0, 0.0)) _ = classify(medium) # Large patch overpowers the classification small = composite_img(background_img, patch_img, patch_width_ratio=0.7, patch_position=(0., 0.)) _ = classify(small) # Plotting different classifications at many different sizes values = [] for i in range(80): size = i / 100. medium = composite_img(background_img, patch_img, size, (0, 0)) _ = classify(medium, n=1008, show_output=False) scuba_index = _[0].tolist().index("snorkel") snorkel_index = _[0].tolist().index("scuba diver") target_index = _[0].tolist().index("steam locomotive") values.append([size, _[1][scuba_index], _[1][snorkel_index], _[1][target_index]]) values = np.array(values) plt.figure(figsize=(10, 5)) plt.plot(values[:,0],values[:,1], c="b") plt.plot(values[:,0],values[:,2], c="y") plt.plot(values[:,0],values[:,3], c="r") plt.show() grey_whale_img = load(grey_whale_url) _ = classify(grey_whale_img, n=10) # Ten different patches are available, 01 - 10 baseball_patch = load("https://storage.googleapis.com/lucid-static/activation-atlas/adversarial-examples/patches/baseball-01.png") _ = classify(baseball_patch, n=10) # Position does have an effect: small = composite_img(grey_whale_img, baseball_patch, patch_width_ratio=0.4, patch_position=(0.0, 0.0)) _ = classify(small) small = composite_img(grey_whale_img, baseball_patch, patch_width_ratio=0.4, patch_position=(0.0, 1.0)) _ = classify(small) small = composite_img(grey_whale_img, baseball_patch, patch_width_ratio=0.4, patch_position=(1.0, 1.0)) _ = classify(small) small = composite_img(grey_whale_img, baseball_patch, patch_width_ratio=0.4, patch_position=(1.0, 0.0)) _ = classify(small) frying_pan_img = load(frying_pan_url) _ = classify(frying_pan_img, n=10) # Ten different patches are available, 01 - 10 noodle_patch = load("https://storage.googleapis.com/lucid-static/activation-atlas/adversarial-examples/patches/noodles-03.png") _ = classify(noodle_patch, n=10) # Position does have an effect: small = composite_img(frying_pan_img, noodle_patch, patch_width_ratio=0.4, patch_position=(0.0, 0.0)) _ = classify(small)