# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. !pip install --quiet lucid import numpy as np import scipy.ndimage as nd import tensorflow as tf import lucid.modelzoo.vision_models as models from lucid.misc.io import show import lucid.optvis.objectives as objectives import lucid.optvis.param as param import lucid.optvis.render as render import lucid.optvis.transform as transform # Let's import a model from the Lucid modelzoo! model = models.InceptionV1() model.load_graphdef() LEARNING_RATE = 0.05 optimizer = tf.train.AdamOptimizer(LEARNING_RATE) imgs = render.render_vis(model, "mixed4b_pre_relu:452", optimizer=optimizer, transforms=[], param_f=lambda: param.image(64, fft=False, decorrelate=False), thresholds=(1, 32, 128, 256, 2048), verbose=False) # Note that we're doubling the image scale to make artifacts more obvious show([nd.zoom(img[0], [2,2,1], order=0) for img in imgs]) L1 = -0.05 TV = -0.25 BLUR = -1.0 obj = objectives.channel("mixed4b_pre_relu", 452) obj += L1 * objectives.L1(constant=.5) obj += TV * objectives.total_variation() obj += BLUR * objectives.blur_input_each_step() imgs = render.render_vis(model, obj, transforms=[], param_f=lambda: param.image(64, fft=False, decorrelate=False), thresholds=(1, 32, 128, 256, 2048), verbose=False) # Note that we're doubling the image scale to make artifacts more obvious show([nd.zoom(img[0], [2,2,1], order=0) for img in imgs]) JITTER = 1 ROTATE = 5 SCALE = 1.1 transforms = [ transform.pad(2*JITTER), transform.jitter(JITTER), transform.random_scale([SCALE ** (n/10.) for n in range(-10, 11)]), transform.random_rotate(range(-ROTATE, ROTATE+1)) ] imgs = render.render_vis(model, "mixed4b_pre_relu:452", transforms=transforms, param_f=lambda: param.image(64), thresholds=(1, 32, 128, 256, 2048), verbose=False) # Note that we're doubling the image scale to make artifacts more obvious show([nd.zoom(img[0], [2,2,1], order=0) for img in imgs]) LEARNING_RATE = 0.05 DECORRELATE = True ROBUSTNESS = True # `fft` parameter controls spatial decorrelation # `decorrelate` parameter controls channel decorrelation param_f = lambda: param.image(64, fft=DECORRELATE, decorrelate=DECORRELATE) if ROBUSTNESS: transforms = transform.standard_transforms else: transforms = [] optimizer = tf.train.AdamOptimizer(LEARNING_RATE) imgs = render.render_vis(model, "mixed4b_pre_relu:452", optimizer=optimizer, transforms=transforms, param_f=param_f, thresholds=(1, 32, 128, 256, 2048), verbose=False) # Note that we're doubling the image scale to make artifacts more obvious show([nd.zoom(img[0], [2,2,1], order=0) for img in imgs])