#@title Generated Images
#@title Display utility functions
from IPython import display
def make_iframe(src):
return f''''''
iframe = make_iframe('https://www.comet.ml/embedded/?instanceId=PSMnawkGLivaljLupbmXYeptA&projectId=1dc7a6ddf9a444178f408f49a530035f&templateId=TrYNymVHvwNPgS84AzLhvtPEl&viewId=5IbNaVEfZFWBjdPQTHJkqpHXZ')
display.HTML(iframe)
#@title Generate Videos
iframe = make_iframe("https://www.comet.ml/embedded/?instanceId=qvKKiuyRQLXAkgwnwJOu4R5g5&projectId=1dc7a6ddf9a444178f408f49a530035f&templateId=xDJ93UCEHhXQ82bzdJnPjidQH&viewId=5IbNaVEfZFWBjdPQTHJkqpHXZ")
display.HTML(iframe)
#@title Install StyleGAN3 dependencies
from IPython.display import clear_output
!git clone https://github.com/NVlabs/stylegan3.git
%cd stylegan3
!wget -O mini.sh https://repo.anaconda.com/miniconda/Miniconda3-py38_4.8.2-Linux-x86_64.sh
!chmod +x mini.sh
!bash ./mini.sh -b -f -p /usr/local
!conda install -q -y --prefix /usr/local jupyter
!python -m ipykernel install --name "py38" --user
!pip install click -q
!pip install numpy -q
!pip install pillow -q
!pip install torch -q
!pip install scipy -q
!pip install Ninja -q
!pip install imageio -q
!pip install imageio-ffmpeg -q
clear_output()
#@title Install / Initialize Comet
#@markdown Running this cell will install comet and run `comet_ml.init()`, which will prompt you to log in and copy/paste your API key, or to create an account if you do not already have one.
#@markdown If you have ever synced Google Drive to a Colab notebook, this process should feel familiar.
#@markdown **Note:** You can use this notebook without installing and signing up for Comet. You simply won't be able to log your images/videos to view later through the Comet UI. If you want to proceed without Comet, uncheck the checkbox below before running this cell.
use_comet = True #@param {type:"boolean"}
if use_comet:
!pip3 install --quiet comet_ml
import comet_ml
comet_ml.init()
comet_project_name = "stylegan3-colab-demo" #@param {type:"string"}
#@title Generate an image
#@markdown StyleGAN3 pre-trained models for config T (translation equiv.) and config R (translation and rotation equiv.)
seed = 3454 #@param {type:"slider", min:0, max:9999, step:1}
baselink ='https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/'
model = "stylegan3-t-metfaces-1024x1024.pkl" #@param ["stylegan3-r-afhqv2-512x512.pkl", "stylegan3-r-ffhq-1024x1024.pkl", "stylegan3-r-ffhqu-1024x1024.pkl","stylegan3-r-ffhqu-256x256.pkl","stylegan3-r-metfaces-1024x1024.pkl","stylegan3-r-metfacesu-1024x1024.pkl","stylegan3-t-afhqv2-512x512.pkl","stylegan3-t-ffhq-1024x1024.pkl","stylegan3-t-ffhqu-1024x1024.pkl","stylegan3-t-ffhqu-256x256.pkl","stylegan3-t-metfaces-1024x1024.pkl","stylegan3-t-metfacesu-1024x1024.pkl"]
if use_comet:
experiment = comet_ml.Experiment(project_name=comet_project_name)
params = {
'model':model,
'seed':seed,
'task':'image'
}
experiment.log_parameters(params)
# Generate an image using pre-trained AFHQv2 model ("Ours" in Figure 1, left).
!python gen_images.py --outdir=/content/stylegan3/out --trunc=1 \
--seeds=$seed --network=$baselink$model
if use_comet:
image_path = f'{model.replace(".pkl", "")}__seed{seed:04d}.png'
experiment.log_image(image_data='/content/stylegan3/out/seed%04d.png' % seed, name=image_path.split('/')[-1])
experiment.end()
import matplotlib.pyplot as plt
from PIL import Image
plt.figure(figsize=(10,10))
img = Image.open('/content/stylegan3/out/seed%04d.png' % seed);
plt.imshow(img);
plt.axis('off');
#@title Generate an interpolation video
%cd /content/stylegan3
start_seed = 4#@param {type:"number"}
stop_seed = 2235 #@param {type:"number"}
n_cols = 2#@param {type:"number"}
n_rows = 1#@param {type:"number"}
#@markdown How many key frames to have?
num_keyframes = 3#@param {type:"number"}
#@markdown How many frames for interpolation?
w_frames = 20#@param {type:"number"}
#@markdown Total length in frames is `num_keyframes`*`w_frames`
assert stop_seed > start_seed, 'Stop_seed should be larger then start_seed'
baselink ='https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/'
model = "stylegan3-r-metfacesu-1024x1024.pkl" #@param ["stylegan3-r-afhqv2-512x512.pkl", "stylegan3-r-ffhq-1024x1024.pkl", "stylegan3-r-ffhqu-1024x1024.pkl","stylegan3-r-ffhqu-256x256.pkl","stylegan3-r-metfaces-1024x1024.pkl","stylegan3-r-metfacesu-1024x1024.pkl","stylegan3-t-afhqv2-512x512.pkl","stylegan3-t-ffhq-1024x1024.pkl","stylegan3-t-ffhqu-1024x1024.pkl","stylegan3-t-ffhqu-256x256.pkl","stylegan3-t-metfaces-1024x1024.pkl","stylegan3-t-metfacesu-1024x1024.pkl"]
if use_comet:
experiment = comet_ml.Experiment(project_name=comet_project_name)
params = {
'start_seed':start_seed,
'stop_seed':stop_seed,
'n_cols':n_cols,
'n_rows':n_rows,
'num_keyframes':num_keyframes,
'w_frames':w_frames,
'baselink':baselink,
'model':model,
'task':'video'
}
experiment.log_parameters(params)
# Generate an image using pre-trained AFHQv2 model ("Ours" in Figure 1, left).
output_video = f'start_{start_seed}_stop_{stop_seed}.mp4'
# Render a grid of interpolations for seeds N through K.
!python gen_video.py --output=$output_video --trunc=1 --seeds=$start_seed-$stop_seed --grid={n_cols}x{n_rows} \
--network=$baselink$model --num-keyframes=$num_keyframes \
--w-frames=$w_frames
if use_comet:
experiment.log_asset(output_video, file_name=f"{model.replace('.pkl', '')}_{output_video}")
experiment.end()
from IPython.display import HTML
from base64 import b64encode
mp4 = open(output_video,'rb').read()
data_url = "data:video/mp4;base64," + b64encode(mp4).decode()
HTML("""
""" % data_url)
experiment.display_project()